From cb4b5f7c9fb80e23da87637956bb742e8b0068ef Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Wed, 14 Feb 2024 14:58:24 +0530 Subject: [PATCH 001/243] Intial Signatures and class abstract --- dspy/datasets/synthesizer.py | 105 +++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 dspy/datasets/synthesizer.py diff --git a/dspy/datasets/synthesizer.py b/dspy/datasets/synthesizer.py new file mode 100644 index 0000000000..65e3be5a9f --- /dev/null +++ b/dspy/datasets/synthesizer.py @@ -0,0 +1,105 @@ +import dspy +from typing import List + +class ExplainTask(dspy.Signature): + """Imagine you're a detective in a game where your mission is to unlock the mystery of a hidden treasure. The treasure map, in this case, is the task description. Your first step is to study the map carefully to understand where the treasure is hidden and how to get there. This means figuring out what the task is all about—like decoding clues. Once you've got a good grasp, your job is to explain your plan to your team in a way that's super easy to understand, as if you're telling a friend how to find the treasure without using the map. You won't be using the clues directly in your explanation but rather your understanding of them to guide your team clearly and simply to the treasure.""" + + @staticmethod + def format_examples(examples: List[dspy.Example]): + if isinstance(examples, str): + return examples + + formatted_example = "" + + for example in examples: + input_keys = example.inputs().keys() + label_keys = example.labels().keys() + + formatted_example += f"Input:\n" + for key in input_keys: + formatted_example += f"{key}: {example[key]}\n" + + formatted_example += f"Output:\n" + for key in label_keys: + formatted_example += f"{key}: {example[key]}\n" + + return formatted_example + + examples = dspy.InputField( + prefix="Few Shot Examples:-", + desc="List of examples to analyze and explain the task.", + format=format_examples, + ) + explanation = dspy.OutputField( + prefix="Explanation:", + desc="Explanation of the task.", + ) + +class GenerateFieldDescription(dspy.Signature): + """I'll be providing you with the name of the field and the task description. Your task is to generate a description for the field. The description should be such that it is easy to understand and gives a clear idea of what the field is about.""" + + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task the field is an input to.", + ) + field_name = dspy.InputField( + prefix="Field Name:", + desc="Name of the field to generate synthetic data for.", + ) + field_description = dspy.OutputField( + prefix="Field Description:", + desc="Description of the field.", + ) + +class GenerateInputFieldsData(dspy.Signature): + """You are an expert data generator with 30 years of experience in generating synthetic data. We want you to put these skills at work, I'll be providing you with some input fields that are columns of the csv file and the explanation of the task thse fields would be an input to. Your task is to generate synthetic for these fields.""" + pass + +class GenerateOutputFieldsData(dspy.Signature): + pass + +class Synthesizer: + def __init__(self): + self.explain_task = ExplainTask() + self.generate_field_description = GenerateFieldDescription() + + self.generate_input_data = GenerateInputFieldsData() + self.generate_output_data = GenerateOutputFieldsData() + + def _prepare_synthetic_data_signature(self, signature: dspy.Signature): + signature + + def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Example]: + input_keys = examples[0].keys() + + task_description = self.explain_task(examples=examples) + self.generate_output_data.__doc__ = task_description + + self._prepare_synthetic_data_signature() + + data = [] + for _ in range(num_data): + synthetic_data = {field: self.generate_synthetic_data() for field in fields} + data.append(synthetic_data) + + + def export(self): + pass + + def _to_csv(self): + pass + + def _to_jsonl(self): + pass + + def _to_pickle(self): + pass + + def _to_sql(self): + pass + + def _to_parquet(self): + pass + + def _to_arrow(self): + pass \ No newline at end of file From bafb475c7e6a9ca2586a62b35c8e4d6adaf2b570 Mon Sep 17 00:00:00 2001 From: ragul-kachiappan Date: Tue, 27 Feb 2024 16:13:12 +0530 Subject: [PATCH 002/243] fix: default collection embedding function attribute in ChromadbRM constructor --- dspy/retrieve/chromadb_rm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index 1338d48d52..b55fec8fff 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -74,7 +74,7 @@ def __init__( k: int = 7, ): self._init_chromadb(collection_name, persist_directory) - self.ef = embedding_function or self._chromadb_collection.embedding_function + self.ef = embedding_function or self._chromadb_collection._embedding_function super().__init__(k=k) From 7e7ed09215896acc5fb45fab7d7eadd2be527dae Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Tue, 27 Feb 2024 20:15:52 -0800 Subject: [PATCH 003/243] adding in bayesian optimizer notebook, plus a few minor changes to bayesian optimizer --- dspy/teleprompt/signature_opt_bayesian.py | 66 +- examples/qa/hotpot/hotpotqa_optimized.ipynb | 1915 +++++++++++++++++++ 2 files changed, 1953 insertions(+), 28 deletions(-) create mode 100644 examples/qa/hotpot/hotpotqa_optimized.ipynb diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 68d7aacf0e..bca0d299df 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -65,12 +65,12 @@ class BasicGenerateInstructionWithExamples(dspy.Signature): proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): - ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. + ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will give you some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. I will also provide you with the current ``basic instruction`` that is being used for this task. Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") observations = dspy.InputField(desc="Observations about the dataset and task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") @@ -112,12 +112,8 @@ def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10 def _print_full_program(self, program): for i,predictor in enumerate(program.predictors()): if self.verbose: print(f"Predictor {i}") - if (hasattr(predictor, 'extended_signature')): - if self.verbose: print(f"i: {predictor.extended_signature.instructions}") - if self.verbose: print(f"p: {predictor.extended_signature.fields[-1].name}") - else: - if self.verbose: print(f"i: {predictor.extended_signature1.instructions}") - if self.verbose: print(f"p: {predictor.extended_signature1.fields[-1].name}") + if self.verbose: print(f"i: {self._get_signature(predictor).instructions}") + if self.verbose: print(f"p: {self._get_signature(predictor).fields[-1].name}") if self.verbose: print("\n") def _print_model_history(self, model, n=1): @@ -162,6 +158,12 @@ def _create_example_string(self, fields, example): # Joining all the field strings return '\n'.join(output) + + def _get_signature(self, predictor): + if (hasattr(predictor, 'extended_signature')): + return predictor.extended_signature + elif (hasattr(predictor, 'signature')): + return predictor.signature def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo_candidates, devset): candidates = {} @@ -186,7 +188,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo if example["augmented"]: if example_set_i not in example_set: example_set[example_set_i] = [] - fields_to_use = predictor.signature.fields + fields_to_use = self._get_signature(predictor).fields input_variable_names = [field.input_variable for field in fields_to_use] example_with_only_signature_fields = {key: value for key, value in example.items() if key in input_variable_names} example_string = self._create_example_string(fields_to_use, example_with_only_signature_fields) @@ -200,12 +202,8 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo for predictor in module.predictors(): basic_instruction = None basic_prefix = None - if (hasattr(predictor, 'extended_signature')): - basic_instruction = predictor.extended_signature.instructions - basic_prefix = predictor.extended_signature.fields[-1].name - else: - basic_instruction = predictor.extended_signature1.instructions - basic_prefix = predictor.extended_signature1.fields[-1].name + basic_instruction = self._get_signature(predictor).instructions + basic_prefix = self._get_signature(predictor).fields[-1].name with dspy.settings.context(lm=self.prompt_model): # Data & Examples if view_data and view_examples: @@ -251,6 +249,14 @@ def compile(self, student, *, devset, optuna_trials_num, max_bootstrapped_demos, # Set up program and evaluation function module = student.deepcopy() evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) + + # In the case where the bootstrapped and labeled demos are set to 0, we'll stil bootstrap examples to use in our meta prompt + if max_bootstrapped_demos==0 and max_labeled_demos==0: #TODO: address case when max_bootstrapped alone is 0 + max_bootstrapped_demos_for_candidate_gen = 1 + max_labeled_demos_for_candidate_gen = 1 #TODO: this might only need to be 0 + else: + max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos + max_labeled_demos_for_candidate_gen = max_labeled_demos # Generate N few shot example sets demo_candidates = {} @@ -267,7 +273,7 @@ def compile(self, student, *, devset, optuna_trials_num, max_bootstrapped_demos, rng = random.Random(i) shuffled_devset = devset[:] # Create a copy of devset rng.shuffle(shuffled_devset) # Shuffle the copy - tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, teacher_settings=self.teacher_settings) + tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings) candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_devset) # Store the candidate demos @@ -275,10 +281,14 @@ def compile(self, student, *, devset, optuna_trials_num, max_bootstrapped_demos, if id(module_p) not in demo_candidates.keys(): demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append(candidate_p.demos) - + # Generate N candidate prompts instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, devset) + # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples + if max_bootstrapped_demos==0 and max_labeled_demos==0: + demo_candidates = None + # Initialize variables to store the best program and its score best_score = float('-inf') best_program = None @@ -293,20 +303,20 @@ def objective(trial): candidate_program = baseline_program.deepcopy() # Suggest the instruction to use for our predictor - if self.verbose: print(f"Starting trial num: {trial_num}") + print(f"Starting trial #{trial_num}") trial_logs[trial_num] = {} for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): # Get instruction candidates for our given predictor p_instruction_candidates = instruction_candidates[id(p_old)] - p_demo_candidates = demo_candidates[id(p_old)] + if demo_candidates: p_demo_candidates = demo_candidates[id(p_old)] # Suggest the index of the instruction candidate to use in our trial instruction_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_instruction",range(len(p_instruction_candidates))) - demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) + if demo_candidates: demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx - trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx + if demo_candidates: trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx # Get the selected instruction candidate selected_candidate = p_instruction_candidates[instruction_idx] @@ -314,17 +324,17 @@ def objective(trial): selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() # Use this candidates in our program - p_new.extended_signature.instructions = selected_instruction - p_new.extended_signature.fields[-1] = p_new.extended_signature.fields[-1]._replace(name=selected_prefix) + self._get_signature(p_new).instructions = selected_instruction + self._get_signature(p_new).fields[-1] = self._get_signature(p_new).fields[-1]._replace(name=selected_prefix) # Get the selected demos - selected_demos = p_demo_candidates[demos_idx] + if demo_candidates: selected_demos = p_demo_candidates[demos_idx] # Use these demos in our program - p_new.demos = selected_demos + if demo_candidates: p_new.demos = selected_demos if self.verbose: print("Evaling the following program:") - self._print_full_program(candidate_program) + if self.verbose: self._print_full_program(candidate_program) trial_logs[trial_num]["program"] = candidate_program # Evaluate with the new prompts @@ -347,14 +357,14 @@ def objective(trial): # Handle pruning based on the intermediate value. if trial.should_prune(): - if self.verbose: print(f"Optuna decided to prune!") + print(f"Trial pruned.") trial_logs[trial_num]["score"] = curr_weighted_avg_score trial_logs[trial_num]["pruned"] = True trial_num += 1 raise optuna.TrialPruned() if self.verbose: print(f"Fully evaled score: {curr_weighted_avg_score}") - self._print_model_history(self.task_model, n=1) + if self.verbose: self._print_model_history(self.task_model, n=1) score = curr_weighted_avg_score trial_logs[trial_num]["score"] = curr_weighted_avg_score diff --git a/examples/qa/hotpot/hotpotqa_optimized.ipynb b/examples/qa/hotpot/hotpotqa_optimized.ipynb new file mode 100644 index 0000000000..cf002054e1 --- /dev/null +++ b/examples/qa/hotpot/hotpotqa_optimized.ipynb @@ -0,0 +1,1915 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# __DSPy Prompt Optimizers__: Optimizing instructions & fewshot examples for LM programs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Description of this notebook, what you'll learn, etc." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 0] Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "SyntaxError", + "evalue": "unterminated string literal (detected at line 11) (1838760429.py, line 11)", + "output_type": "error", + "traceback": [ + "\u001b[0;36m Cell \u001b[0;32mIn[1], line 11\u001b[0;36m\u001b[0m\n\u001b[0;31m os.environ[\"DSP_CACHEDIR\"] = f\"repo_clone_path/sub_dir\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m unterminated string literal (detected at line 11)\n" + ] + } + ], + "source": [ + "!rm -rf DSPy_optimizer_cache\n", + "!git clone https://huggingface.co/kopsahlong/DSPy_optimizer_cache\n", + "%cd DSPy_optimizer_cache/\n", + "# !git checkout master\n", + "%cd ..\n", + "import os\n", + "repo_clone_path = 'DSPy_optimizer_cache/cache' #TODO: update this cache to just contain the runs we need!!\n", + "\n", + "# Set up the cache for this notebook\n", + "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = repo_clone_path\n", + "os.environ[\"DSP_CACHEDIR\"] = f\"{repo_clone_path}/cachedir\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/home\n" + ] + } + ], + "source": [ + "!echo $HOME" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also specify the __prompt LM model__ (in this case GPT 3.5), the __task LM model__ (Llama 13B) and the retrieval model we'll be using for our task (a HotPotQA multihop retrieval task)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/miniconda3/envs/dspy_test/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import os \n", + "import dspy\n", + "import openai\n", + "import os\n", + "\n", + "### NOTE: if you'd like to run this code without a cache, you can remove these lines to configure your OPEN AI key ###\n", + "# os.environ['OPENAI_API_KEY'] = \"TODO: ADD YOUR OPEN AI KEY HERE\"\n", + "# openai.api_key = os.environ.get('OPENAI_API_KEY')\n", + "# openai.api_base = \"https://api.openai.com/v1\"\n", + "\n", + "prompt_model_name = \"gpt-3.5-turbo-1106\"\n", + "task_model_name = \"meta-llama/Llama-2-13b-chat-hf\"\n", + "colbert_v2_endpoint = \"http://20.102.90.50:2017/wiki17_abstracts\"\n", + "\n", + "ports = [7140, 7141, 7142, 7143] #TODO: REMOVE THIS\n", + "\n", + "prompt_model = dspy.OpenAI(model=prompt_model_name, max_tokens=150)\n", + "task_model = dspy.HFClientTGI(model=task_model_name, port=[7140, 7141, 7142, 7143], max_tokens=150)\n", + "\n", + "colbertv2 = dspy.ColBERTv2(url=colbert_v2_endpoint)\n", + "\n", + "dspy.settings.configure(rm=colbertv2, lm=task_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1] Define Task" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we'll define the program that we'd like to run, which is a multihop [...] (we can say that it was loosely inspired by a certain paper). We additionally load in the data, and define how we'd like to evaluate this task." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/miniconda3/envs/dspy_test/lib/python3.10/site-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n", + " table = cls._concat_blocks(blocks, axis=0)\n" + ] + } + ], + "source": [ + "from dspy.evaluate import Evaluate\n", + "import re \n", + "from dspy.datasets import HotPotQA\n", + "\n", + "class ReturnRankedDocuments(dspy.Signature):\n", + " \"\"\"Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.\"\"\"\n", + " question = dspy.InputField(desc=\"The question we're trying to answer.\")\n", + " context = dspy.InputField(desc=\"List of potentially related passages.\")\n", + " ranking = dspy.OutputField(desc=\"A comma separated list of numbers corresponding to passage indices, ranked in descending order by their helpfulness in answering our question.\")\n", + "\n", + "class RankingMultiHop(dspy.Module):\n", + " def __init__(self, hops, num_passages_to_retrieve, max_passages_in_context):\n", + " super().__init__()\n", + " self.hops = hops\n", + " self.num_passages_to_retrieve = num_passages_to_retrieve\n", + " self.max_passages_in_context = max_passages_in_context\n", + " self.retrieve = dspy.Retrieve(k = self.num_passages_to_retrieve)\n", + " self.generate_query = dspy.ChainOfThought(\"context ,question->search_query\")\n", + " self.generate_answer = dspy.ChainOfThought(\"context ,question->answer\")\n", + " self.generate_ranking = dspy.ChainOfThought(ReturnRankedDocuments)\n", + " \n", + " def forward (self,question) :\n", + " context = []\n", + " full_context = []\n", + " top_context = []\n", + " max_passage_num = self.max_passages_in_context\n", + " for hop in range(self.hops):\n", + " # Get a new query\n", + " query = self.generate_query(context = context, question = question).search_query\n", + " # Get new passages\n", + " context = self.retrieve(query).passages\n", + " # Add these new passages to the previous top context \n", + " full_context = top_context + context\n", + " # Get the most important indices, ranked\n", + " most_important_indices = self.generate_ranking(question=question, context=full_context).ranking\n", + " indices = [int(num) for num in re.findall(r'\\d+', most_important_indices)]\n", + "\n", + " if len(indices) < max_passage_num:\n", + " indices = range(1,max_passage_num+1)\n", + "\n", + " valid_indices = [index-1 for index in indices if index-1 < len(context)]\n", + " top_indices = sorted(valid_indices, key=lambda x: x)[:max_passage_num+1]\n", + " most_important_context_list = [context[idx] for idx in top_indices]\n", + " # Save the top context\n", + " top_context = most_important_context_list\n", + "\n", + " return dspy.Prediction(context=context, answer=self.generate_answer(context = top_context , question = question).answer)\n", + "\n", + "program = RankingMultiHop(hops=4, num_passages_to_retrieve=5, max_passages_in_context=5)\n", + "\n", + "# Load and configure the datasets.\n", + "TRAIN_SIZE = 500\n", + "EVAL_SIZE = 500\n", + "\n", + "hotpot_dataset = HotPotQA(train_seed=1, eval_seed=2023, test_size=0)\n", + "trainset = [x.with_inputs('question') for x in hotpot_dataset.train][:TRAIN_SIZE]\n", + "devset = [x.with_inputs('question') for x in hotpot_dataset.dev][:EVAL_SIZE]\n", + "\n", + "# Set up metrics\n", + "NUM_THREADS = 10\n", + "\n", + "metric = dspy.evaluate.answer_exact_match\n", + "\n", + "# kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=None)\n", + "kwargs = dict(num_threads=NUM_THREADS, display_progress=True)\n", + "evaluate = Evaluate(devset=devset, metric=metric, **kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2] Baseline Evaluation\n", + "Now, we'll quickly evaluate our baseline program so that we can see how the performance using the Prompt Optimizer compares. We should see performance of about __16%__ on our trainset, and __21.4%__ on our devset." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/500 [00:00 Date: Thu, 29 Feb 2024 03:34:36 +0530 Subject: [PATCH 004/243] Update API References --- api/assertions.md | 261 ++++++++++++++++++ api/hosting_language_models_locally/MLC.md | 48 ---- .../_category_.json | 8 - api/intro.md | 4 +- api/language_model_clients/Anyscale.md | 31 +++ api/language_model_clients/AzureOpenAI.md | 56 ++++ api/language_model_clients/Cohere.md | 34 +++ api/language_model_clients/Databricks.md | 43 +++ api/language_model_clients/HFClientVLLM.md | 23 ++ api/language_model_clients/OpenAI.md | 54 ++++ api/language_model_clients/TGI.md | 34 +++ api/language_model_clients/Together.md | 32 +++ api/language_model_clients/_category_.json | 8 + .../HFModel.md | 8 +- api/local_language_model_clients/MLC.md | 41 +++ .../Ollama.md | 6 +- .../TGI.md | 42 +-- .../_category_.json | 8 + .../vLLM.md | 2 +- api/modules/ChainOfThought.md | 2 + api/modules/_category_.json | 2 +- api/optimizers/BootstrapFewShot.md | 63 +++++ .../BootstrapFewShotWithRandomSearch.md | 59 ++++ api/optimizers/BootstrapFinetune.md | 56 ++++ api/optimizers/Ensemble.md | 47 ++++ api/optimizers/LabeledFewShot.md | 58 ++++ api/optimizers/_category_.json | 8 + .../AzureCognitiveSearch.md | 34 +++ api/retrieval_model_clients/ChromadbRM.md | 65 +++++ api/retrieval_model_clients/ColBERTv2.md | 51 ++++ api/retrieval_model_clients/FaissRM.md | 62 +++++ api/retrieval_model_clients/_category_.json | 8 + docs/building-blocks/1-language_models.md | 10 +- .../local_models/HFClientTGI.mdx | 2 +- .../local_models/HFClientVLLM.mdx | 2 +- 35 files changed, 1180 insertions(+), 92 deletions(-) create mode 100644 api/assertions.md delete mode 100644 api/hosting_language_models_locally/MLC.md delete mode 100644 api/hosting_language_models_locally/_category_.json create mode 100644 api/language_model_clients/Anyscale.md create mode 100644 api/language_model_clients/AzureOpenAI.md create mode 100644 api/language_model_clients/Cohere.md create mode 100644 api/language_model_clients/Databricks.md create mode 100644 api/language_model_clients/HFClientVLLM.md create mode 100644 api/language_model_clients/OpenAI.md create mode 100644 api/language_model_clients/TGI.md create mode 100644 api/language_model_clients/Together.md create mode 100644 api/language_model_clients/_category_.json rename api/{hosting_language_models_locally => local_language_model_clients}/HFModel.md (54%) create mode 100644 api/local_language_model_clients/MLC.md rename api/{hosting_language_models_locally => local_language_model_clients}/Ollama.md (89%) rename api/{hosting_language_models_locally => local_language_model_clients}/TGI.md (51%) create mode 100644 api/local_language_model_clients/_category_.json rename api/{hosting_language_models_locally => local_language_model_clients}/vLLM.md (97%) create mode 100644 api/optimizers/BootstrapFewShot.md create mode 100644 api/optimizers/BootstrapFewShotWithRandomSearch.md create mode 100644 api/optimizers/BootstrapFinetune.md create mode 100644 api/optimizers/Ensemble.md create mode 100644 api/optimizers/LabeledFewShot.md create mode 100644 api/optimizers/_category_.json create mode 100644 api/retrieval_model_clients/AzureCognitiveSearch.md create mode 100644 api/retrieval_model_clients/ChromadbRM.md create mode 100644 api/retrieval_model_clients/ColBERTv2.md create mode 100644 api/retrieval_model_clients/FaissRM.md create mode 100644 api/retrieval_model_clients/_category_.json diff --git a/api/assertions.md b/api/assertions.md new file mode 100644 index 0000000000..07dfa55f4c --- /dev/null +++ b/api/assertions.md @@ -0,0 +1,261 @@ +--- +sidebar_position: 7 +--- + +# DSPy Assertions + +Language models (LMs) have transformed how we interact with machine learning, offering vast capabilities in natural language understanding and generation. However, ensuring these models adhere to domain-specific constraints remains a challenge. Despite the growth of techniques like fine-tuning or “prompt engineering”, these approaches are extremely tedious and rely on heavy, manual hand-waving to guide the LMs in adhering to specific constraints. Even DSPy's modularity of programming prompting pipelines lacks mechanisms to effectively and automatically enforce these constraints. + +To address this, we introduce DSPy Assertions, a feature within the DSPy framework designed to automate the enforcement of computational constraints on LMs. DSPy Assertions empower developers to guide LMs towards desired outcomes with minimal manual intervention, enhancing the reliability, predictability, and correctness of LM outputs. + +## dspy.Assert and dspy.Suggest API + +We introduce two primary constructs within DSPy Assertions: + +- **`dspy.Assert`**: + - **Parameters**: + - `constraint (bool)`: Outcome of Python-defined boolean validation check. + - `msg (Optional[str])`: User-defined error message providing feedback or correction guidance. + - `backtrack (Optional[module])`: Specifies target module for retry attempts upon constraint failure. The default backtracking module is the last module before the assertion. + - **Behavior**: Initiates retry upon failure, dynamically adjusting the pipeline's execution. If failures persist, it halts execution and raises a `dspy.AssertionError`. + +- **`dspy.Suggest`**: + - **Parameters**: Similar to `dspy.Assert`. + - **Behavior**: Encourages self-refinement through retries without enforcing hard stops. Logs failures after maximum backtracking attempts and continues execution. + +- **dspy.Assert vs. Python Assertions**: Unlike conventional Python `assert` statements that terminate the program upon failure, `dspy.Assert` conducts a sophisticated retry mechanism, allowing the pipeline to adjust. + +Specifically, when a constraint is not met: + +- Backtracking Mechanism: An under-the-hood backtracking is initiated, offering the model a chance to self-refine and proceed, which is done through +- Dynamic Signature Modification: internally modifying your DSPy program’s Signature by adding the following fields: + - Past Output: your model's past output that did not pass the validation_fn + - Instruction: your user-defined feedback message on what went wrong and what possibly to fix + +If the error continues past the `max_backtracking_attempts`, then `dspy.Assert` will halt the pipeline execution, altering you with an `dspy.AssertionError`. This ensures your program doesn't continue executing with “bad” LM behavior and immediately highlights sample failure outputs for user assessment. + +- **dspy.Suggest vs. dspy.Assert**: `dspy.Suggest` on the other hand offers a softer approach. It maintains the same retry backtracking as `dspy.Assert` but instead serves as a gentle nudger. If the model outputs cannot pass the model constraints after the `max_backtracking_attempts`, `dspy.Suggest` will log the persistent failure and continue execution of the program on the rest of the data. This ensures the LM pipeline works in a "best-effort" manner without halting execution. + +- **`dspy.Suggest`** are best utilized as "helpers" during the evaluation phase, offering guidance and potential corrections without halting the pipeline. +- **`dspy.Assert`** are recommended during the development stage as "checkers" to ensure the LM behaves as expected, providing a robust mechanism for identifying and addressing errors early in the development cycle. + + +## Use Case: Including Assertions in DSPy Programs + +We start with using an example of a multi-hop QA SimplifiedBaleen pipeline as defined in the intro walkthrough. + +```python +class SimplifiedBaleen(dspy.Module): + def __init__(self, passages_per_hop=2, max_hops=2): + super().__init__() + + self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] + self.retrieve = dspy.Retrieve(k=passages_per_hop) + self.generate_answer = dspy.ChainOfThought(GenerateAnswer) + self.max_hops = max_hops + + def forward(self, question): + context = [] + prev_queries = [question] + + for hop in range(self.max_hops): + query = self.generate_query[hop](context=context, question=question).query + prev_queries.append(query) + passages = self.retrieve(query).passages + context = deduplicate(context + passages) + + pred = self.generate_answer(context=context, question=question) + pred = dspy.Prediction(context=context, answer=pred.answer) + return pred + +baleen = SimplifiedBaleen() + +baleen(question = "Which award did Gary Zukav's first book receive?") +``` + +To include DSPy Assertions, we simply define our validation functions and declare our assertions following the respective model generation. + +For this use case, suppose we want to impose the following constraints: + 1. Length - each query should be less than 100 characters + 2. Uniqueness - each generated query should differ from previously-generated queries. + +We can define these validation checks as boolean functions: + +```python +#simplistic boolean check for query length +len(query) <= 100 + +#Python function for validating distinct queries +def validate_query_distinction_local(previous_queries, query): + """check if query is distinct from previous queries""" + if previous_queries == []: + return True + if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): + return False + return True +``` + +We can declare these validation checks through `dspy.Suggest` statements (as we want to test the program in a best-effort demonstration). We want to keep these after the query generation `query = self.generate_query[hop](context=context, question=question).query`. + +```python +dspy.Suggest( + len(query) <= 100, + "Query should be short and less than 100 characters", +) + +dspy.Suggest( + validate_query_distinction_local(prev_queries, query), + "Query should be distinct from: " + + "; ".join(f"{i+1}) {q}" for i, q in enumerate(prev_queries)), +) +``` + +It is recommended to define a program with assertions separately than your original program if you are doing comparative evaluation for the effect of assertions. If not, feel free to set Assertions away! + +Let's take a look at how the SimplifiedBaleen program will look with Assertions included: + +```python +class SimplifiedBaleenAssertions(dspy.Module): + def __init__(self, passages_per_hop=2, max_hops=2): + super().__init__() + self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] + self.retrieve = dspy.Retrieve(k=passages_per_hop) + self.generate_answer = dspy.ChainOfThought(GenerateAnswer) + self.max_hops = max_hops + + def forward(self, question): + context = [] + prev_queries = [question] + + for hop in range(self.max_hops): + query = self.generate_query[hop](context=context, question=question).query + + dspy.Suggest( + len(query) <= 100, + "Query should be short and less than 100 characters", + ) + + dspy.Suggest( + validate_query_distinction_local(prev_queries, query), + "Query should be distinct from: " + + "; ".join(f"{i+1}) {q}" for i, q in enumerate(prev_queries)), + ) + + prev_queries.append(query) + passages = self.retrieve(query).passages + context = deduplicate(context + passages) + + if all_queries_distinct(prev_queries): + self.passed_suggestions += 1 + + pred = self.generate_answer(context=context, question=question) + pred = dspy.Prediction(context=context, answer=pred.answer) + return pred +``` + +Now calling programs with DSPy Assertions requires one last step, and that is transforming the program to wrap it with internal assertions backtracking and Retry logic. + +```python +from dspy.primitives.assertions import assert_transform_module, backtrack_handler + +baleen_with_assertions = assert_transform_module(SimplifiedBaleenAssertions(), backtrack_handler) + +# backtrack_handler is parameterized over a few settings for the backtracking mechanism +# To change the number of max retry attempts, you can do +baleen_with_assertions_retry_once = assert_transform_module(SimplifiedBaleenAssertions(), + functools.partial(backtrack_handler, max_backtracks=1)) +``` + +Alternatively, you can also directly call `activate_assertions` on the program with `dspy.Assert/Suggest` statements using the default backtracking mechanism (`max_backtracks=2`): + +```python +baleen_with_assertions = SimplifiedBaleenAssertions().activate_assertions() +``` + +Now let's take a look at the internal LM backtracking by inspecting the history of the LM query generations. Here we see that when a query fails to pass the validation check of being less than 100 characters, its internal `GenerateSearchQuery` signature is dynamically modified during the backtracking+Retry process to include the past query and the corresponding user-defined instruction: `"Query should be short and less than 100 characters"`. + + +``` +Write a simple search query that will help answer a complex question. + +--- + +Follow the following format. + +Context: may contain relevant facts + +Question: ${question} + +Reasoning: Let's think step by step in order to ${produce the query}. We ... + +Query: ${query} + +--- + +Context: +[1] «Kerry Condon | Kerry Condon (born 4 January 1983) is [...]» +[2] «Corona Riccardo | Corona Riccardo (c. 1878October 15, 1917) was [...]» + +Question: Who acted in the shot film The Shore and is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet." ? + +Reasoning: Let's think step by step in order to find the answer to this question. First, we need to identify the actress who played Ophelia in a Royal Shakespeare Company production of "Hamlet." Then, we need to find out if this actress also acted in the short film "The Shore." + +Query: "actress who played Ophelia in Royal Shakespeare Company production of Hamlet" + "actress in short film The Shore" + + + +Write a simple search query that will help answer a complex question. + +--- + +Follow the following format. + +Context: may contain relevant facts + +Question: ${question} + +Past Query: past output with errors + +Instructions: Some instructions you must satisfy + +Query: ${query} + +--- + +Context: +[1] «Kerry Condon | Kerry Condon (born 4 January 1983) is an Irish television and film actress, best known for her role as Octavia of the Julii in the HBO/BBC series "Rome," as Stacey Ehrmantraut in AMC's "Better Call Saul" and as the voice of F.R.I.D.A.Y. in various films in the Marvel Cinematic Universe. She is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet."» +[2] «Corona Riccardo | Corona Riccardo (c. 1878October 15, 1917) was an Italian born American actress who had a brief Broadway stage career before leaving to become a wife and mother. Born in Naples she came to acting in 1894 playing a Mexican girl in a play at the Empire Theatre. Wilson Barrett engaged her for a role in his play "The Sign of the Cross" which he took on tour of the United States. Riccardo played the role of Ancaria and later played Berenice in the same play. Robert B. Mantell in 1898 who struck by her beauty also cast her in two Shakespeare plays, "Romeo and Juliet" and "Othello". Author Lewis Strang writing in 1899 said Riccardo was the most promising actress in America at the time. Towards the end of 1898 Mantell chose her for another Shakespeare part, Ophelia im Hamlet. Afterwards she was due to join Augustin Daly's Theatre Company but Daly died in 1899. In 1899 she gained her biggest fame by playing Iras in the first stage production of Ben-Hur.» + +Question: Who acted in the shot film The Shore and is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet." ? + +Past Query: "actress who played Ophelia in Royal Shakespeare Company production of Hamlet" + "actress in short film The Shore" + +Instructions: Query should be short and less than 100 characters + +Query: "actress Ophelia RSC Hamlet" + "actress The Shore" + +``` + + +## Assertion-Driven Optimizations + +DSPy Assertions work with optimizations that DSPy offers, particularly with `BootstrapFewShotWithRandomSearch`, including the following settings: + +- Compilation with Assertions + This includes assertion-driven example bootstrapping and counterexample bootstrapping during compilation. The teacher model for bootstrapping few-shot demonstrations can make use of DSPy Assertions to offer robust bootstrapped examples for the student model to learn from during inference. In this setting, the student model does not perform assertion aware optimizations (backtracking and retry) during inference. +- Compilation + Inference with Assertions + -This includes assertion-driven optimizations in both compilation and inference. Now the teacher model offers assertion-driven examples but the student can further optimize with assertions of its own during inference time. +```python +teleprompter = BootstrapFewShotWithRandomSearch( + metric=validate_context_and_answer_and_hops, + max_bootstrapped_demos=max_bootstrapped_demos, + num_candidate_programs=6, +) + +#Compilation with Assertions +compiled_with_assertions_baleen = teleprompter.compile(student = baleen, teacher = baleen_with_assertions, trainset = trainset, valset = devset) + +#Compilation + Inference with Assertions +compiled_baleen_with_assertions = teleprompter.compile(student=baleen_with_assertions, teacher = baleen_with_assertions, trainset=trainset, valset=devset) + +``` \ No newline at end of file diff --git a/api/hosting_language_models_locally/MLC.md b/api/hosting_language_models_locally/MLC.md deleted file mode 100644 index 87bf65d0d0..0000000000 --- a/api/hosting_language_models_locally/MLC.md +++ /dev/null @@ -1,48 +0,0 @@ -## Setting up an MLC language model - -### Prerequisites - -Install the required packages using the following commands: - -```shell -pip install --no-deps --pre --force-reinstall mlc-ai-nightly-cu118 mlc-chat-nightly-cu118 -f https://mlc.ai/wheels -pip install transformers -git lfs install -``` - -Adjust the pip wheels according to your OS/platform by referring to the provided commands in [MLC packages](https://mlc.ai/package/). - - -### Running MLC Llama-2 models - -1. Create a directory for prebuilt models: - -```shell -mkdir -p dist/prebuilt -``` - -2. Clone the necessary libraries from the repository: - -```shell -git clone https://github.com/mlc-ai/binary-mlc-llm-libs.git dist/prebuilt/lib -cd dist/prebuilt -``` - -3. Choose a Llama-2 model from [MLC LLMs](https://huggingface.co/mlc-ai) and clone the model repository: - -```shell -git clone https://huggingface.co/mlc-ai/mlc-chat-Llama-2-7b-chat-hf-q4f16_1 -``` - -### Sending requests to the server - -Initialize the `ChatModuleClient` within your program with the desired parameters. Here's an example call: - -```python -model = 'dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1' -model_path = 'dist/prebuilt/lib/Llama-2-7b-chat-hf-q4f16_1-cuda.so' - -llama = dspy.ChatModuleClient(model=model, model_path=model_path) -``` - -Please refer to the [official MLC repository](https://github.com/mlc-ai/mlc-llm) for more detailed [docs](https://mlc.ai/mlc-llm/docs/get_started/try_out.html). diff --git a/api/hosting_language_models_locally/_category_.json b/api/hosting_language_models_locally/_category_.json deleted file mode 100644 index f729af67a9..0000000000 --- a/api/hosting_language_models_locally/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Hosting Local Language Models in DSPy", - "position": 3, - "link": { - "type": "generated-index", - "description": "Hosting Local Language Models in DSPy" - } -} \ No newline at end of file diff --git a/api/intro.md b/api/intro.md index 165f76c2d2..7859c7d0e5 100644 --- a/api/intro.md +++ b/api/intro.md @@ -2,4 +2,6 @@ sidebar_position: 1 --- -# API References \ No newline at end of file +# API References + +Welcome to the API References for DSPy! This is where you'll find easy-to-understand information about all the parts of DSPy that you can use in your projects. We've got guides on different tools and helpers that DSPy has, like modules and optimizers. Everything is sorted so you can quickly find what you need. If you're making something and need to quickly get started with DSPy to do certain tasks, this place will show you how to set it up and get it working just right. \ No newline at end of file diff --git a/api/language_model_clients/Anyscale.md b/api/language_model_clients/Anyscale.md new file mode 100644 index 0000000000..8fc5241fd2 --- /dev/null +++ b/api/language_model_clients/Anyscale.md @@ -0,0 +1,31 @@ +--- +sidebar_position: 6 +--- + +# dspy.Anyscale + +### Usage + +```python +lm = dspy.Anyscale(model="mistralai/Mistral-7B-Instruct-v0.1") +``` + +### Constructor + +The constructor initializes the base class `LM` and verifies the `api_key` for using Anyscale API. +We expect the following environment variables to be set: +- `ANYSCALE_API_KEY`: API key for Together. +- `ANYSCALE_API_BASE`: API base URL for Together. + + +```python +class Anyscale(HFModel): + def __init__(self, model, **kwargs): +``` + +**Parameters:** +- `model` (_str_): models hosted on Together. + +### Methods + +Refer to [`dspy.OpenAI`](#openai) documentation. diff --git a/api/language_model_clients/AzureOpenAI.md b/api/language_model_clients/AzureOpenAI.md new file mode 100644 index 0000000000..1f173eb5e5 --- /dev/null +++ b/api/language_model_clients/AzureOpenAI.md @@ -0,0 +1,56 @@ +--- +sidebar_position: 2 +--- + +# dspy.AzureOpenAI + +### Usage + +```python +lm = dspy.AzureOpenAI(api_base='...', api_version='2023-12-01-preview', model='gpt-3.5-turbo') +``` + +### Constructor + +The constructor initializes the base class `LM` and verifies the provided arguments like the `api_provider`, `api_key`, and `api_base` to set up OpenAI request retrieval through Azure. The `kwargs` attribute is initialized with default values for relevant text generation parameters needed for communicating with the GPT API, such as `temperature`, `max_tokens`, `top_p`, `frequency_penalty`, `presence_penalty`, and `n`. + +```python +class AzureOpenAI(LM): + def __init__( + self, + api_base: str, + api_version: str, + model: str = "gpt-3.5-turbo-instruct", + api_key: Optional[str] = None, + model_type: Literal["chat", "text"] = None, + **kwargs, + ): +``` + + + +**Parameters:** +- `api_base` (str): Azure Base URL. +- `api_version` (str): Version identifier for Azure OpenAI API. +- `api_key` (_Optional[str]_, _optional_): API provider authentication token. Retrieves from `AZURE_OPENAI_KEY` environment variable if None. +- `model_type` (_Literal["chat", "text"]_): Specified model type to use, defaults to 'chat'. +- `**kwargs`: Additional language model arguments to pass to the API provider. + +### Methods + +#### `__call__(self, prompt: str, only_completed: bool = True, return_sorted: bool = False, **kwargs) -> List[Dict[str, Any]]` + +Retrieves completions from Azure OpenAI Endpoints by calling `request`. + +Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. + +After generation, the completions are post-processed based on the `model_type` parameter. If the parameter is set to 'chat', the generated content look like `choice["message"]["content"]`. Otherwise, the generated text will be `choice["text"]`. + +**Parameters:** +- `prompt` (_str_): Prompt to send to Azure OpenAI. +- `only_completed` (_bool_, _optional_): Flag to return only completed responses and ignore completion due to length. Defaults to True. +- `return_sorted` (_bool_, _optional_): Flag to sort the completion choices using the returned averaged log-probabilities. Defaults to False. +- `**kwargs`: Additional keyword arguments for completion request. + +**Returns:** +- `List[Dict[str, Any]]`: List of completion choices. \ No newline at end of file diff --git a/api/language_model_clients/Cohere.md b/api/language_model_clients/Cohere.md new file mode 100644 index 0000000000..1133d8a107 --- /dev/null +++ b/api/language_model_clients/Cohere.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 3 +--- + +# dsp.Cohere + +### Usage + +```python +lm = dsp.Cohere(model='command-nightly') +``` + +### Constructor + +The constructor initializes the base class `LM` and verifies the `api_key` to set up Cohere request retrieval. + +```python +class Cohere(LM): + def __init__( + self, + model: str = "command-nightly", + api_key: Optional[str] = None, + stop_sequences: List[str] = [], + ): +``` + +**Parameters:** +- `model` (_str_): Cohere pretrained models. Defaults to `command-nightly`. +- `api_key` (_Optional[str]_, _optional_): API provider from Cohere. Defaults to None. +- `stop_sequences` (_List[str]_, _optional_): List of stopping tokens to end generation. + +### Methods + +Refer to [`dspy.OpenAI`](#openai) documentation. diff --git a/api/language_model_clients/Databricks.md b/api/language_model_clients/Databricks.md new file mode 100644 index 0000000000..f6b2c660ff --- /dev/null +++ b/api/language_model_clients/Databricks.md @@ -0,0 +1,43 @@ +--- +sidebar_position: 8 +--- + +# dspy.Databricks + +### Usage +```python +lm = dspy.Databricks(model="databricks-mpt-30b-instruct") +``` + +### Constructor + +The constructor inherits from the `GPT3` class and verifies the Databricks authentication credentials for using Databricks Model Serving API through the OpenAI SDK. +We expect the following environment variables to be set: +- `openai.api_key`: Databricks API key. +- `openai.base_url`: Databricks Model Endpoint url + +The `kwargs` attribute is initialized with default values for relevant text generation parameters needed for communicating with the Databricks OpenAI SDK, such as `temperature`, `max_tokens`, `top_p`, and `n`. However, it removes the `frequency_penalty` and `presence_penalty` arguments as these are not currently supported by the Databricks API. + +```python +class Databricks(GPT3): + def __init__( + self, + model: str, + api_key: Optional[str] = None, + api_base: Optional[str] = None, + model_type: Literal["chat", "text"] = None, + **kwargs, + ): +``` + +**Parameters:** +- `model` (_str_): models hosted on Databricks. +- `stop` (_List[str]_, _optional_): List of stopping tokens to end generation. +- `api_key` (_Optional[str]_): Databricks API key. Defaults to None +- `api_base` (_Optional[str]_): Databricks Model Endpoint url Defaults to None. +- `model_type` (_Literal["chat", "text", "embeddings"]_): Specified model type to use. +- `**kwargs`: Additional language model arguments to pass to the API provider. + +### Methods + +Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file diff --git a/api/language_model_clients/HFClientVLLM.md b/api/language_model_clients/HFClientVLLM.md new file mode 100644 index 0000000000..2108e701df --- /dev/null +++ b/api/language_model_clients/HFClientVLLM.md @@ -0,0 +1,23 @@ +--- +sidebar_position: 5 +--- + +# dspy.HFClientVLLM + +### Usage + +```python +lm = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") +``` + +### Prerequisites + +Refer to the [vLLM Server](https://github.com/stanfordnlp/dspy/blob/local_models_docs/docs/using_local_models.md#vllm-server) section of the `Using Local Models` documentation. + +### Constructor + +Refer to [`dspy.TGI`](#tgi) documentation. Replace with `HFClientVLLM`. + +### Methods + +Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file diff --git a/api/language_model_clients/OpenAI.md b/api/language_model_clients/OpenAI.md new file mode 100644 index 0000000000..414a3afdc9 --- /dev/null +++ b/api/language_model_clients/OpenAI.md @@ -0,0 +1,54 @@ +--- +sidebar_position: 1 +--- + +# dspy.OpenAI + +### Usage + +```python +lm = dspy.OpenAI(model='gpt-3.5-turbo') +``` + +### Constructor + +The constructor initializes the base class `LM` and verifies the provided arguments like the `api_provider`, `api_key`, and `api_base` to set up OpenAI request retrieval. The `kwargs` attribute is initialized with default values for relevant text generation parameters needed for communicating with the GPT API, such as `temperature`, `max_tokens`, `top_p`, `frequency_penalty`, `presence_penalty`, and `n`. + +```python +class OpenAI(LM): + def __init__( + self, + model: str = "text-davinci-002", + api_key: Optional[str] = None, + api_provider: Literal["openai"] = "openai", + model_type: Literal["chat", "text"] = None, + **kwargs, + ): +``` + + + +**Parameters:** +- `api_key` (_Optional[str]_, _optional_): API provider authentication token. Defaults to None. +- `api_provider` (_Literal["openai"]_, _optional_): API provider to use. Defaults to "openai". +- `model_type` (_Literal["chat", "text"]_): Specified model type to use. +- `**kwargs`: Additional language model arguments to pass to the API provider. + +### Methods + +#### `__call__(self, prompt: str, only_completed: bool = True, return_sorted: bool = False, **kwargs) -> List[Dict[str, Any]]` + +Retrieves completions from OpenAI by calling `request`. + +Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. + +After generation, the completions are post-processed based on the `model_type` parameter. If the parameter is set to 'chat', the generated content look like `choice["message"]["content"]`. Otherwise, the generated text will be `choice["text"]`. + +**Parameters:** +- `prompt` (_str_): Prompt to send to OpenAI. +- `only_completed` (_bool_, _optional_): Flag to return only completed responses and ignore completion due to length. Defaults to True. +- `return_sorted` (_bool_, _optional_): Flag to sort the completion choices using the returned averaged log-probabilities. Defaults to False. +- `**kwargs`: Additional keyword arguments for completion request. + +**Returns:** +- `List[Dict[str, Any]]`: List of completion choices. \ No newline at end of file diff --git a/api/language_model_clients/TGI.md b/api/language_model_clients/TGI.md new file mode 100644 index 0000000000..0a2bf4dbfb --- /dev/null +++ b/api/language_model_clients/TGI.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 4 +--- + +# dspy.HFClientTGI + +### Usage + +```python +lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") +``` + +### Prerequisites + +Refer to the [Text Generation-Inference Server](https://github.com/stanfordnlp/dspy/blob/local_models_docs/docs/using_local_models.md#text-generation-inference-server) section of the `Using Local Models` documentation. + +### Constructor + +The constructor initializes the `HFModel` base class and configures the client for communicating with the TGI server. It requires a `model` instance, communication `port` for the server, and the `url` for the server to host generate requests. Additional configuration can be provided via keyword arguments in `**kwargs`. + +```python +class HFClientTGI(HFModel): + def __init__(self, model, port, url="http://future-hgx-1", **kwargs): +``` + +**Parameters:** +- `model` (_HFModel_): Instance of Hugging Face model connected to the TGI server. +- `port` (_int_): Port for TGI server. +- `url` (_str_): Base URL where the TGI server is hosted. +- `**kwargs`: Additional keyword arguments to configure the client. + +### Methods + +Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file diff --git a/api/language_model_clients/Together.md b/api/language_model_clients/Together.md new file mode 100644 index 0000000000..c24232d1ba --- /dev/null +++ b/api/language_model_clients/Together.md @@ -0,0 +1,32 @@ +--- +sidebar_position: 7 +--- + +# dspy.Together + +### Usage + +```python +lm = dspy.Together(model="mistralai/Mistral-7B-v0.1") +``` + +### Constructor + +The constructor initializes the base class `LM` and verifies the `api_key` for using Together API. +We expect the following environment variables to be set: +- `TOGETHER_API_KEY`: API key for Together. +- `TOGETHER_API_BASE`: API base URL for Together. + + +```python +class Together(HFModel): + def __init__(self, model, **kwargs): +``` + +**Parameters:** +- `model` (_str_): models hosted on Together. +- `stop` (_List[str]_, _optional_): List of stopping tokens to end generation. + +### Methods + +Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file diff --git a/api/language_model_clients/_category_.json b/api/language_model_clients/_category_.json new file mode 100644 index 0000000000..3f6129baea --- /dev/null +++ b/api/language_model_clients/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Language Model API Clients", + "position": 4, + "link": { + "type": "generated-index", + "description": "This documentation provides an overview of the DSPy Language Model Clients." + } +} \ No newline at end of file diff --git a/api/hosting_language_models_locally/HFModel.md b/api/local_language_model_clients/HFModel.md similarity index 54% rename from api/hosting_language_models_locally/HFModel.md rename to api/local_language_model_clients/HFModel.md index 52fdf624b2..162238ae59 100644 --- a/api/hosting_language_models_locally/HFModel.md +++ b/api/local_language_model_clients/HFModel.md @@ -1,5 +1,7 @@ +# dspy.HFModel + Initialize `HFModel` within your program with the desired model to load in. Here's an example call: - ```python - llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') - ``` \ No newline at end of file +```python +llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') +``` \ No newline at end of file diff --git a/api/local_language_model_clients/MLC.md b/api/local_language_model_clients/MLC.md new file mode 100644 index 0000000000..6a36f374bc --- /dev/null +++ b/api/local_language_model_clients/MLC.md @@ -0,0 +1,41 @@ +# dspy.ChatModuleClient + +## Prerequisites + +1. Install the required packages using the following commands: + + ```shell + pip install --no-deps --pre --force-reinstall mlc-ai-nightly-cu118 mlc-chat-nightly-cu118 -f https://mlc.ai/wheels + pip install transformers + git lfs install + ``` + + Adjust the pip wheels according to your OS/platform by referring to the provided commands in [MLC packages](https://mlc.ai/package/). + +## Running MLC Llama-2 models + +1. Create a directory for prebuilt models: + + ```shell + mkdir -p dist/prebuilt + ``` + +2. Clone the necessary libraries from the repository: + + ```shell + git clone https://github.com/mlc-ai/binary-mlc-llm-libs.git dist/prebuilt/lib + cd dist/prebuilt + ``` + +3. Choose a Llama-2 model from [MLC LLMs](https://huggingface.co/mlc-ai) and clone the model repository: + + ```shell + git clone https://huggingface.co/mlc-ai/mlc-chat-Llama-2-7b-chat-hf-q4f16_1 + ``` + +4. Initialize the `ChatModuleClient` within your program with the desired parameters. Here's an example call: + + ```python + llama = dspy.ChatModuleClient(model='dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1', model_path='dist/prebuilt/lib/Llama-2-7b-chat-hf-q4f16_1-cuda.so') + ``` +Please refer to the [official MLC repository](https://github.com/mlc-ai/mlc-llm) for more detailed information and [documentation](https://mlc.ai/mlc-llm/docs/get_started/try_out.html). diff --git a/api/hosting_language_models_locally/Ollama.md b/api/local_language_model_clients/Ollama.md similarity index 89% rename from api/hosting_language_models_locally/Ollama.md rename to api/local_language_model_clients/Ollama.md index 516489c9d4..0f1198ad24 100644 --- a/api/hosting_language_models_locally/Ollama.md +++ b/api/local_language_model_clients/Ollama.md @@ -1,6 +1,8 @@ -## Running LLMs through Ollama +# dspy.OllamaLocal -#### Adapted from documentation provided by https://github.com/insop +:::note +Adapted from documentation provided by https://github.com/insop +::: Ollama is a good software tool that allows you to run LLMs locally, such as Mistral, Llama2, and Phi. The following are the instructions to install and run Ollama. diff --git a/api/hosting_language_models_locally/TGI.md b/api/local_language_model_clients/TGI.md similarity index 51% rename from api/hosting_language_models_locally/TGI.md rename to api/local_language_model_clients/TGI.md index 3ecc6aff61..c67a09beba 100644 --- a/api/hosting_language_models_locally/TGI.md +++ b/api/local_language_model_clients/TGI.md @@ -1,42 +1,42 @@ -## Launching a Text Generation Inference (TGI) Server +# dspy.HFClientTGI -### Prerequisites +## Prerequisites - Docker must be installed on your system. If you don't have Docker installed, you can get it from [here](https://docs.docker.com/get-docker/). -### Setting up the Text-Generation-Inference Server +## Setting up the Text-Generation-Inference Server 1. Clone the Text-Generation-Inference repository from GitHub by executing the following command: -```bash -git clone https://github.com/huggingface/text-generation-inference.git -``` + ``` + git clone https://github.com/huggingface/text-generation-inference.git + ``` 2. Change into the cloned repository directory: -```bash -cd text-generation-inference -``` + ``` + cd text-generation-inference + ``` 3. Execute the Docker command under the "Get Started" section to run the server: -```bash -model=mosaicml/mpt-30b # set to the specific Hugging Face model ID you wish to use. -num_shard=1 # set to the number of shards you wish to use. -volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run + ``` + model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. + num_shard=2 # set to the number of shards you wish to use. + volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard -``` + docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9 --model-id $model --num-shard $num_shard + ``` -This command will start the server and make it accessible at `http://localhost:8080`. + This command will start the server and make it accessible at `http://localhost:8080`. -If you want to connect to private HuggingFace models such as [Meta Llama 2 models](https://huggingface.co/meta-llama), make sure to use version 9.3 (or higher) of the docker image (ghcr.io/huggingface/text-generation-inference:0.9.3) and pass in your huggingface token as an environment variable. +If you want to connect to [Meta Llama 2 models](https://huggingface.co/meta-llama), make sure to use version 9.3 (or higher) of the docker image (ghcr.io/huggingface/text-generation-inference:0.9.3) and pass in your huggingface token as an environment variable. -```bash -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard +``` + docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model --num-shard $num_shard ``` -### Sending requests to the server +## Sending requests to the server After setting up the text-generation-inference server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientTGI`. @@ -57,4 +57,4 @@ Initialize the `HFClientTGI` within your program with the desired parameters. He - `--max-input-length`: Set the maximum allowed input length for the text. - `--max-total-tokens`: Set the maximum total tokens allowed for text generation. -Please refer to the [official TGI repository](https://github.com/huggingface/text-generation-inference) for detailed docs. +Please refer to the [official Text-Generation-Inference repository](https://github.com/huggingface/text-generation-inference) for more detailed information and documentation. diff --git a/api/local_language_model_clients/_category_.json b/api/local_language_model_clients/_category_.json new file mode 100644 index 0000000000..8965dcf411 --- /dev/null +++ b/api/local_language_model_clients/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Local Language Model Clients", + "position": 6, + "link": { + "type": "generated-index", + "description": "DSPy supports various methods including `built-in wrappers`, `server integration`, and `external package integration` for model loading. This documentation provides a concise introduction on how to load in models within DSPy extending these capabilities for your specific needs." + } +} \ No newline at end of file diff --git a/api/hosting_language_models_locally/vLLM.md b/api/local_language_model_clients/vLLM.md similarity index 97% rename from api/hosting_language_models_locally/vLLM.md rename to api/local_language_model_clients/vLLM.md index 4ac911f856..bd9befb9f2 100644 --- a/api/hosting_language_models_locally/vLLM.md +++ b/api/local_language_model_clients/vLLM.md @@ -1,4 +1,4 @@ -## Launching a vLLM Server +# dspy.HFClientVLLM ### Setting up the vLLM Server diff --git a/api/modules/ChainOfThought.md b/api/modules/ChainOfThought.md index f17919fa73..ab9bd0e5fa 100644 --- a/api/modules/ChainOfThought.md +++ b/api/modules/ChainOfThought.md @@ -1,5 +1,7 @@ # dspy.ChainOfThought +### Constructor + The constructor initializes the `ChainOfThought` class and sets up its attributes. It inherits from the `Predict` class and adds specific functionality for chain of thought processing. Internally, the class initializes the `activated` attribute to indicate if chain of thought processing has been selected. It extends the `signature` to include additional reasoning steps and an updated `rationale_type` when chain of thought processing is activated. diff --git a/api/modules/_category_.json b/api/modules/_category_.json index 3488b42085..5cf44ce64e 100644 --- a/api/modules/_category_.json +++ b/api/modules/_category_.json @@ -1,6 +1,6 @@ { "label": "Modules", - "position": 3, + "position": 1, "link": { "type": "generated-index", "description": "Modules in DSPy" diff --git a/api/optimizers/BootstrapFewShot.md b/api/optimizers/BootstrapFewShot.md new file mode 100644 index 0000000000..14b2deff54 --- /dev/null +++ b/api/optimizers/BootstrapFewShot.md @@ -0,0 +1,63 @@ +--- +sidebar_position: 2 +--- + +# teleprompt.BootstrapFewShot + +### Constructor + +The constructor initializes the `BootstrapFewShot` class and sets up parameters for bootstrapping. + +```python +class BootstrapFewShot(Teleprompter): + def __init__(self, metric=None, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1): + self.metric = metric + self.teacher_settings = teacher_settings + + self.max_bootstrapped_demos = max_bootstrapped_demos + self.max_labeled_demos = max_labeled_demos + self.max_rounds = max_rounds +``` + +**Parameters:** +- `metric` (_callable_, _optional_): Metric function to evaluate examples during bootstrapping. Defaults to `None`. +- `teacher_settings` (_dict_, _optional_): Settings for teacher predictor. Defaults to empty dictionary. +- `max_bootstrapped_demos` (_int_, _optional_): Maximum number of bootstrapped demonstrations per predictor. Defaults to 4. +- `max_labeled_demos` (_int_, _optional_): Maximum number of labeled demonstrations per predictor. Defaults to 16. +- `max_rounds` (_int_, _optional_): Maximum number of bootstrapping rounds. Defaults to 1. + +### Method + +#### `compile(self, student, *, teacher=None, trainset, valset=None)` + +This method compiles the BootstrapFewShot instance by performing bootstrapping to refine the student predictor. + +This process includes preparing the student and teacher predictors, which involves creating predictor copies, verifying the student predictor is uncompiled, and compiling the teacher predictor with labeled demonstrations via LabeledFewShot if the teacher predictor hasn't been compiled. + +The next stage involves preparing predictor mappings by validating that both the student and teacher predictors have the same program structure and the same signatures but are different objects. + +The final stage is performing the bootstrapping iterations. + +**Parameters:** +- `student` (_Teleprompter_): Student predictor to be compiled. +- `teacher` (_Teleprompter_, _optional_): Teacher predictor used for bootstrapping. Defaults to `None`. +- `trainset` (_list_): Training dataset used in bootstrapping. +- `valset` (_list_, _optional_): Validation dataset used in compilation. Defaults to `None`. + +**Returns:** +- The compiled `student` predictor after bootstrapping with refined demonstrations. + +### Example + +```python +#Assume defined trainset +#Assume defined RAG class +... + +#Define teleprompter and include teacher +teacher = dspy.OpenAI(model='gpt-3.5-turbo', api_key = openai.api_key, api_provider = "openai", model_type = "chat") +teleprompter = BootstrapFewShot(teacher_settings=dict({'lm': teacher})) + +# Compile! +compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset) +``` diff --git a/api/optimizers/BootstrapFewShotWithRandomSearch.md b/api/optimizers/BootstrapFewShotWithRandomSearch.md new file mode 100644 index 0000000000..dd007b10e2 --- /dev/null +++ b/api/optimizers/BootstrapFewShotWithRandomSearch.md @@ -0,0 +1,59 @@ +--- +sidebar_position: 4 +--- + +# teleprompt.BootstrapFewShotWithRandomSearch + +### Constructor + +The constructor initializes the `BootstrapFewShotWithRandomSearch` class and sets up its attributes. It inherits from the `BootstrapFewShot` class and introduces additional attributes for the random search process. + +```python +class BootstrapFewShotWithRandomSearch(BootstrapFewShot): + def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6): + self.metric = metric + self.teacher_settings = teacher_settings + self.max_rounds = max_rounds + + self.num_threads = num_threads + + self.min_num_samples = 1 + self.max_num_samples = max_bootstrapped_demos + self.num_candidate_sets = num_candidate_programs + self.max_num_traces = 1 + int(max_bootstrapped_demos / 2.0 * self.num_candidate_sets) + + self.max_bootstrapped_demos = self.max_num_traces + self.max_labeled_demos = max_labeled_demos + + print("Going to sample between", self.min_num_samples, "and", self.max_num_samples, "traces per predictor.") + print("Going to sample", self.max_num_traces, "traces in total.") + print("Will attempt to train", self.num_candidate_sets, "candidate sets.") +``` + +**Parameters:** +- `metric` (_callable_, _optional_): Metric function to evaluate examples during bootstrapping. Defaults to `None`. +- `teacher_settings` (_dict_, _optional_): Settings for teacher predictor. Defaults to empty dictionary. +- `max_bootstrapped_demos` (_int_, _optional_): Maximum number of bootstrapped demonstrations per predictor. Defaults to 4. +- `max_labeled_demos` (_int_, _optional_): Maximum number of labeled demonstrations per predictor. Defaults to 16. +- `max_rounds` (_int_, _optional_): Maximum number of bootstrapping rounds. Defaults to 1. +- `num_candidate_programs` (_int_): Number of candidate programs to generate during random search. +- `num_threads` (_int_): Number of threads used for evaluation during random search. + +### Method + +Refer to [teleprompt.BootstrapFewShot](#telepromptbootstrapfewshot) documentation. + +## Example + +```python +#Assume defined trainset +#Assume defined RAG class +... + +#Define teleprompter and include teacher +teacher = dspy.OpenAI(model='gpt-3.5-turbo', api_key = openai.api_key, api_provider = "openai", model_type = "chat") +teleprompter = BootstrapFewShotWithRandomSearch(teacher_settings=dict({'lm': teacher})) + +# Compile! +compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset) +``` \ No newline at end of file diff --git a/api/optimizers/BootstrapFinetune.md b/api/optimizers/BootstrapFinetune.md new file mode 100644 index 0000000000..86540aa460 --- /dev/null +++ b/api/optimizers/BootstrapFinetune.md @@ -0,0 +1,56 @@ +--- +sidebar_position: 5 +--- + +# teleprompt.BootstrapFinetune + +### Constructor + +### `__init__(self, metric=None, teacher_settings={}, multitask=True)` + +The constructor initializes a `BootstrapFinetune` instance and sets up its attributes. It defines the teleprompter as a `BootstrapFewShot` instance for the finetuning compilation. + +```python +class BootstrapFinetune(Teleprompter): + def __init__(self, metric=None, teacher_settings={}, multitask=True): +``` + +**Parameters:** +- `metric` (_callable_, _optional_): Metric function to evaluate examples during bootstrapping. Defaults to `None`. +- `teacher_settings` (_dict_, _optional_): Settings for teacher predictor. Defaults to empty dictionary. +- `multitask` (_bool_, _optional_): Enable multitask fine-tuning. Defaults to `True`. + +### Method + +#### `compile(self, student, *, teacher=None, trainset, valset=None, target='t5-large', bsize=12, accumsteps=1, lr=5e-5, epochs=1, bf16=False)` + +This method first compiles for bootstrapping with the `BootstrapFewShot` teleprompter. It then prepares fine-tuning data by generating prompt-completion pairs for training and performs finetuning. After compilation, the LMs are set to the finetuned models and the method returns a compiled and fine-tuned predictor. + +**Parameters:** +- `student` (_Predict_): Student predictor to be fine-tuned. +- `teacher` (_Predict_, _optional_): Teacher predictor to help with fine-tuning. Defaults to `None`. +- `trainset` (_list_): Training dataset for fine-tuning. +- `valset` (_list_, _optional_): Validation dataset for fine-tuning. Defaults to `None`. +- `target` (_str_, _optional_): Target model for fine-tuning. Defaults to `'t5-large'`. +- `bsize` (_int_, _optional_): Batch size for training. Defaults to `12`. +- `accumsteps` (_int_, _optional_): Gradient accumulation steps. Defaults to `1`. +- `lr` (_float_, _optional_): Learning rate for fine-tuning. Defaults to `5e-5`. +- `epochs` (_int_, _optional_): Number of training epochs. Defaults to `1`. +- `bf16` (_bool_, _optional_): Enable mixed-precision training with BF16. Defaults to `False`. + +**Returns:** +- `compiled2` (_Predict_): A compiled and fine-tuned `Predict` instance. + +### Example + +```python +#Assume defined trainset +#Assume defined RAG class +... + +#Define teleprompter +teleprompter = BootstrapFinetune(teacher_settings=dict({'lm': teacher})) + +# Compile! +compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset, target='google/flan-t5-base') +``` \ No newline at end of file diff --git a/api/optimizers/Ensemble.md b/api/optimizers/Ensemble.md new file mode 100644 index 0000000000..42dbc0dde6 --- /dev/null +++ b/api/optimizers/Ensemble.md @@ -0,0 +1,47 @@ +--- +sidebar_position: 3 +--- + +# teleprompt.Ensemble + +### Constructor + +The constructor initializes the `Ensemble` class and sets up its attributes. This teleprompter is designed to create ensembled versions of multiple programs, reducing various outputs from different programs into a single output. + +```python +class Ensemble(Teleprompter): + def __init__(self, *, reduce_fn=None, size=None, deterministic=False): +``` + +**Parameters:** +- `reduce_fn` (_callable_, _optional_): Function used to reduce multiple outputs from different programs into a single output. A common choice is `dspy.majority`. Defaults to `None`. +- `size` (_int_, _optional_): Number of programs to randomly select for ensembling. If not specified, all programs will be used. Defaults to `None`. +- `deterministic` (_bool_, _optional_): Specifies whether ensemble should operate deterministically. Currently, setting this to `True` will raise an error as this feature is pending implementation. Defaults to `False`. + +### Method + +#### `compile(self, programs)` + +This method compiles an ensemble of programs into a single program that when run, can either randomly sample a subset of the given programs to produce outputs or use all of them. The multiple outputs can then be reduced into a single output using the `reduce_fn`. + +**Parameters:** +- `programs` (_list_): List of programs to be ensembled. + +**Returns:** +- `EnsembledProgram` (_Module_): An ensembled version of the input programs. + +### Example + +```python +import dspy +from dspy.teleprompt import Ensemble + +# Assume a list of programs +programs = [program1, program2, program3, ...] + +# Define Ensemble teleprompter +teleprompter = Ensemble(reduce_fn=dspy.majority, size=2) + +# Compile to get the EnsembledProgram +ensembled_program = teleprompter.compile(programs) +``` \ No newline at end of file diff --git a/api/optimizers/LabeledFewShot.md b/api/optimizers/LabeledFewShot.md new file mode 100644 index 0000000000..d58f688aa9 --- /dev/null +++ b/api/optimizers/LabeledFewShot.md @@ -0,0 +1,58 @@ +--- +sidebar_position: 1 +--- + +# teleprompt.LabeledFewShot + +### Constructor + +The constructor initializes the `LabeledFewShot` class and sets up its attributes, particularly defining `k` number of samples to be used by the predictor. + +```python +class LabeledFewShot(Teleprompter): + def __init__(self, k=16): + self.k = k +``` + +**Parameters:** +- `k` (_int_): Number of samples to be used for each predictor. Defaults to 16. + +### Method + +#### `compile(self, student, *, trainset)` + +This method compiles the `LabeledFewShot` instance by configuring the `student` predictor. It assigns subsets of the `trainset` in each student's predictor's `demos` attribute. If the `trainset` is empty, the method returns the original `student`. + +**Parameters:** +- `student` (_Teleprompter_): Student predictor to be compiled. +- `trainset` (_list_): Training dataset for compiling with student predictor. + +**Returns:** +- The compiled `student` predictor with assigned training samples for each predictor or the original `student` if the `trainset` is empty. + +### Example + +```python +import dspy + +#Assume defined trainset +class RAG(dspy.Module): + def __init__(self, num_passages=3): + super().__init__() + + #declare retrieval and predictor modules + self.retrieve = dspy.Retrieve(k=num_passages) + self.generate_answer = dspy.ChainOfThought(GenerateAnswer) + + #flow for answering questions using predictor and retrieval modules + def forward(self, question): + context = self.retrieve(question).passages + prediction = self.generate_answer(context=context, question=question) + return dspy.Prediction(context=context, answer=prediction.answer) + +#Define teleprompter +teleprompter = LabeledFewShot() + +# Compile! +compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset) +``` \ No newline at end of file diff --git a/api/optimizers/_category_.json b/api/optimizers/_category_.json new file mode 100644 index 0000000000..5d7edc9df5 --- /dev/null +++ b/api/optimizers/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Optimizers", + "position": 2, + "link": { + "type": "generated-index", + "description": "Teleprompters are powerful optimizers (included in DSPy) that can learn to bootstrap and select effective prompts for the modules of any program. (The \"tele-\" in the name means \"at a distance\", i.e., automatic prompting at a distance.)\n\nThis documentation provides an overview of the DSPy Teleprompters." + } +} \ No newline at end of file diff --git a/api/retrieval_model_clients/AzureCognitiveSearch.md b/api/retrieval_model_clients/AzureCognitiveSearch.md new file mode 100644 index 0000000000..b8c353f31b --- /dev/null +++ b/api/retrieval_model_clients/AzureCognitiveSearch.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 3 +--- + +# retrieve.AzureCognitiveSearch + +### Constructor + +The constructor initializes an instance of the `AzureCognitiveSearch` class and sets up parameters for sending queries and retreiving results with the Azure Cognitive Search server. + +```python +class AzureCognitiveSearch: + def __init__( + self, + search_service_name: str, + search_api_key: str, + search_index_name: str, + field_text: str, + field_score: str, # required field to map with "score" field in dsp framework + ): +``` + +**Parameters:** +- `search_service_name` (_str_): Name of Azure Cognitive Search server. +- `search_api_key` (_str_): API Authentication token for accessing Azure Cognitive Search server. +- `search_index_name` (_str_): Name of search index in the Azure Cognitive Search server. +- `field_text` (_str_): Field name that maps to DSP "content" field. +- `field_score` (_str_): Field name that maps to DSP "score" field. + +### Methods + +Refer to [ColBERTv2](/api/retrieval_model_clients/ColBERTv2) documentation. Keep in mind there is no `simplify` flag for AzureCognitiveSearch. + +AzureCognitiveSearch supports sending queries and processing the received results, mapping content and scores to a correct format for the Azure Cognitive Search server. \ No newline at end of file diff --git a/api/retrieval_model_clients/ChromadbRM.md b/api/retrieval_model_clients/ChromadbRM.md new file mode 100644 index 0000000000..9b40759a51 --- /dev/null +++ b/api/retrieval_model_clients/ChromadbRM.md @@ -0,0 +1,65 @@ +--- +sidebar_position: 2 +--- + +# retrieve.ChromadbRM + +### Constructor + +Initialize an instance of the `ChromadbRM` class, with the option to use OpenAI's embeddings or any alternative supported by chromadb, as detailed in the official [chromadb embeddings documentation](https://docs.trychroma.com/embeddings). + +```python +ChromadbRM( + collection_name: str, + persist_directory: str, + embedding_function: Optional[EmbeddingFunction[Embeddable]] = OpenAIEmbeddingFunction(), + k: int = 7, +) +``` + +**Parameters:** +- `collection_name` (_str_): The name of the chromadb collection. +- `persist_directory` (_str_): Path to the directory where chromadb data is persisted. +- `embedding_function` (_Optional[EmbeddingFunction[Embeddable]]_, _optional_): The function used for embedding documents and queries. Defaults to `DefaultEmbeddingFunction()` if not specified. +- `k` (_int_, _optional_): The number of top passages to retrieve. Defaults to 7. + +### Methods + +#### `forward(self, query_or_queries: Union[str, List[str]], k: Optional[int] = None) -> dspy.Prediction` + +Search the chromadb collection for the top `k` passages matching the given query or queries, using embeddings generated via the specified `embedding_function`. + +**Parameters:** +- `query_or_queries` (_Union[str, List[str]]_): The query or list of queries to search for. +- `k` (_Optional[int]_, _optional_): The number of results to retrieve. If not specified, defaults to the value set during initialization. + +**Returns:** +- `dspy.Prediction`: Contains the retrieved passages, each represented as a `dotdict` with a `long_text` attribute. + +### Quickstart with OpenAI Embeddings + +ChromadbRM have the flexibility from a variety of embedding functions as outlined in the [chromadb embeddings documentation](https://docs.trychroma.com/embeddings). While different options are available, this example demonstrates how to utilize OpenAI embeddings specifically. + +```python +from dspy.retrieve import ChromadbRM +import os +import openai +from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction + +embedding_function = OpenAIEmbeddingFunction( + api_key=os.environ.get('OPENAI_API_KEY'), + model_name="text-embedding-ada-002" +) + +retriever_model = ChromadbRM( + 'your_collection_name', + '/path/to/your/db', + embedding_function=embedding_function, + k=5 +) + +results = retriever_model("Explore the significance of quantum computing", k=5) + +for result in results: + print("Document:", result.long_text, "\n") +``` \ No newline at end of file diff --git a/api/retrieval_model_clients/ColBERTv2.md b/api/retrieval_model_clients/ColBERTv2.md new file mode 100644 index 0000000000..2dd31bef8c --- /dev/null +++ b/api/retrieval_model_clients/ColBERTv2.md @@ -0,0 +1,51 @@ +--- +sidebar_position: 1 +--- + +# dspy.ColBERTv2 + +### Constructor + +The constructor initializes the `ColBERTv2` class instance and sets up the request parameters for interacting with the ColBERTv2 server. + +```python +class ColBERTv2: + def __init__( + self, + url: str = "http://0.0.0.0", + port: Optional[Union[str, int]] = None, + post_requests: bool = False, + ): +``` + +**Parameters:** +- `url` (_str_): URL for ColBERTv2 server. +- `port` (_Union[str, int]_, _Optional_): Port endpoint for ColBERTv2 server. Defaults to `None`. +- `post_requests` (_bool_, _Optional_): Flag for using HTTP POST requests. Defaults to `False`. + +### Methods + +#### `__call__(self, query: str, k: int = 10, simplify: bool = False) -> Union[list[str], list[dotdict]]` + +Enables making queries to the ColBERTv2 server for retrieval. Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. The function handles the retrieval of the top-k passages based on the provided query. + +**Parameters:** +- `query` (_str_): Query string used for retrieval. +- `k` (_int_, _optional_): Number of passages to retrieve. Defaults to 10. +- `simplify` (_bool_, _optional_): Flag for simplifying output to a list of strings. Defaults to False. + +**Returns:** +- `Union[list[str], list[dotdict]]`: Depending on `simplify` flag, either a list of strings representing the passage content (`True`) or a list of `dotdict` instances containing passage details (`False`). + +### Quickstart + +```python +import dspy + +colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') + +retrieval_response = colbertv2_wiki17_abstracts('When was the first FIFA World Cup held?', k=5) + +for result in retrieval_response: + print("Text:", result['text'], "\n") +``` diff --git a/api/retrieval_model_clients/FaissRM.md b/api/retrieval_model_clients/FaissRM.md new file mode 100644 index 0000000000..2ef9dadc1f --- /dev/null +++ b/api/retrieval_model_clients/FaissRM.md @@ -0,0 +1,62 @@ +--- +sidebar_position: 4 +--- + +# retrieve.FaissRM + +### Constructor + +Initialize an instance of FaissRM by providing it with a vectorizer and a list of strings + +```python +FaissRM( + document_chunks: List[str], + vectorizer: dsp.modules.sentence_vectorizer.BaseSentenceVectorizer, + k: int = 3 +) +``` + +**Parameters:** +- `document_chunks` (_List[str]_): a list of strings that comprises the corpus to search. You cannot add/insert/upsert to this list after creating this FaissRM object. +- `vectorizer` (_dsp.modules.sentence_vectorizer.BaseSentenceVectorizer_, _optional_): If not provided, a dsp.modules.sentence_vectorizer.SentenceTransformersVectorizer object is created and used. +- `k` (_int_, _optional_): The number of top passages to retrieve. Defaults to 3. + +### Methods + +#### `forward(self, query_or_queries: Union[str, List[str]]) -> dspy.Prediction` + +Search the FaissRM vector database for the top `k` passages matching the given query or queries, using embeddings generated via the vectorizer specified at FaissRM construction time + +**Parameters:** +- `query_or_queries` (_Union[str, List[str]]_): The query or list of queries to search for. + +**Returns:** +- `dspy.Prediction`: Contains the retrieved passages, each represented as a `dotdict` with a `long_text` attribute and an `index` attribute. The `index` attribute is the index in the document_chunks array provided to this FaissRM object at construction time. + +### Quickstart with the default vectorizer + +The **FaissRM** module provides a retriever that uses an in-memory Faiss vector database. This module does not include a vectorizer; instead it supports any subclass of **dsp.modules.sentence_vectorizer.BaseSentenceVectorizer**. If a vectorizer is not provided, an instance of **dsp.modules.sentence_vectorizer.SentenceTransformersVectorizer** is created and used by **FaissRM**. Note that the default embedding model for **SentenceTransformersVectorizer** is **all-MiniLM-L6-v2** + + +```python +import dspy +from dspy.retrieve import FaissRM + +document_chunks = [ + "The superbowl this year was played between the San Francisco 49ers and the Kanasas City Chiefs", + "Pop corn is often served in a bowl", + "The Rice Bowl is a Chinese Restaurant located in the city of Tucson, Arizona", + "Mars is the fourth planet in the Solar System", + "An aquarium is a place where children can learn about marine life", + "The capital of the United States is Washington, D.C", + "Rock and Roll musicians are honored by being inducted in the Rock and Roll Hall of Fame", + "Music albums were published on Long Play Records in the 70s and 80s", + "Sichuan cuisine is a spicy cuisine from central China", + "The interest rates for mortgages is considered to be very high in 2024", +] + +frm = FaissRM(document_chunks) +turbo = dspy.OpenAI(model="gpt-3.5-turbo") +dspy.settings.configure(lm=turbo, rm=frm) +print(frm(["I am in the mood for Chinese food"])) +``` \ No newline at end of file diff --git a/api/retrieval_model_clients/_category_.json b/api/retrieval_model_clients/_category_.json new file mode 100644 index 0000000000..0c3ec89a3d --- /dev/null +++ b/api/retrieval_model_clients/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Retrieval Model Clients", + "position": 3, + "link": { + "type": "generated-index", + "description": "This documentation provides an overview of the DSPy Retrieval Model Clients." + } +} \ No newline at end of file diff --git a/docs/building-blocks/1-language_models.md b/docs/building-blocks/1-language_models.md index 1457f6cac0..ee41e6d665 100644 --- a/docs/building-blocks/1-language_models.md +++ b/docs/building-blocks/1-language_models.md @@ -141,31 +141,31 @@ lm = dspy.{provider_listed_below}(model="your model", model_request_kwargs="..." You need to host these models on your own GPU(s). Below, we include pointers for how to do that. -1. `dspy.HFClientTGI`: for HuggingFace models through the Text Generation Inference (TGI) system. [Tutorial: How do I install and launch the TGI server?](/api/hosting_language_models_locally/TGI) +1. `dspy.HFClientTGI`: for HuggingFace models through the Text Generation Inference (TGI) system. [Tutorial: How do I install and launch the TGI server?](/api/local_language_model_clients/TGI) ```python tgi_llama2 = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") ``` -2. `dspy.HFClientVLLM`: for HuggingFace models through vLLM. [Tutorial: How do I install and launch the vLLM server?](/api/hosting_language_models_locally/vLLM) +2. `dspy.HFClientVLLM`: for HuggingFace models through vLLM. [Tutorial: How do I install and launch the vLLM server?](/api/local_language_model_clients/vLLM) ```python vllm_llama2 = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") ``` -3. `dspy.HFModel` (experimental) [Tutorial: How do I initialize models using HFModel](/api/hosting_language_models_locally/HFModel) +3. `dspy.HFModel` (experimental) [Tutorial: How do I initialize models using HFModel](/api/local_language_model_clients/HFModel) ```python llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') ``` -4. `dspy.Ollama` (experimental) for open source models through [Ollama](https://ollama.com). [Tutorial: How do I install and use Ollama on a local computer?](/api/hosting_language_models_locally/Ollama)\n", +4. `dspy.Ollama` (experimental) for open source models through [Ollama](https://ollama.com). [Tutorial: How do I install and use Ollama on a local computer?](/api/local_language_model_clients/Ollama)\n", ```python mistral_ollama = dspy.OllamaLocal(model='mistral') ``` -5. `dspy.ChatModuleClient` (experimental): [How do I install and use MLC?](/api/hosting_language_models_locally/MLC) +5. `dspy.ChatModuleClient` (experimental): [How do I install and use MLC?](/api/local_language_model_clients/MLC) ```python model = 'dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1' diff --git a/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx b/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx index 0a0f4531b5..2550ed3c5f 100644 --- a/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx +++ b/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx @@ -4,7 +4,7 @@ import AuthorDetails from '@site/src/components/AuthorDetails'; ### Prerequisites - Launching TGI Server locally -Refer to the [Text Generation-Inference Server API](/api/hosting_language_models_locally/TGI) for setting up the TGI server locally. +Refer to the [Text Generation-Inference Server API](/api/local_language_model_clients/TGI) for setting up the TGI server locally. ```bash #Example TGI Server Launch diff --git a/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx b/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx index 66eb63f1dd..059c2a9be5 100644 --- a/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx +++ b/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx @@ -4,7 +4,7 @@ import AuthorDetails from '@site/src/components/AuthorDetails'; ### Prerequisites - Launching vLLM Server locally -Refer to the [vLLM Server API](/api/hosting_language_models_locally/vLLM) for setting up the vLLM server locally. +Refer to the [vLLM Server API](/api/local_language_model_clients/vLLM) for setting up the vLLM server locally. ```bash #Example vLLM Server Launch From 252e9245903c57764a88e49fcb609625d5388f96 Mon Sep 17 00:00:00 2001 From: Arnav Singhvi Date: Wed, 28 Feb 2024 23:07:49 -0800 Subject: [PATCH 005/243] migration to final docs/ hosting on site --- README.md | 2 +- docs-page/README.md | 41 -- .../language_model_clients/HFClientVLLM.md | 23 - .../local_language_model_clients/HFModel.md | 7 - .../api/local_language_model_clients/MLC.md | 41 -- .../local_language_model_clients/Ollama.md | 45 -- .../api/local_language_model_clients/TGI.md | 60 --- .../_category_.json | 8 - .../api/local_language_model_clients/vLLM.md | 31 -- .../local_models/HFClientTGI.mdx | 90 ---- .../local_models/HFClientVLLM.mdx | 82 ---- .../local_models/_category_.json | 8 - {docs-page => docs}/.gitignore | 0 docs/DSPy-preprint.pdf | Bin 460814 -> 0 bytes docs/README.md | 23 + {docs-page => docs}/api/assertions.md | 0 {docs-page => docs}/api/intro.md | 0 .../api/language_model_clients/Anyscale.md | 2 +- .../api/language_model_clients/AzureOpenAI.md | 0 .../api/language_model_clients/Cohere.md | 2 +- .../api/language_model_clients/Databricks.md | 2 +- .../language_model_clients/HFClientVLLM.md | 23 + .../api/language_model_clients/OpenAI.md | 0 .../api/language_model_clients/TGI.md | 4 +- .../api/language_model_clients/Together.md | 2 +- .../language_model_clients/_category_.json | 0 .../api/modules/ChainOfThought.md | 0 .../api/modules/ChainOfThoughtWithHint.md | 0 .../api/modules/MultiChainComparison.md | 0 {docs-page => docs}/api/modules/Predict.md | 0 .../api/modules/ProgramOfThought.md | 0 {docs-page => docs}/api/modules/ReAct.md | 0 {docs-page => docs}/api/modules/Retrieve.md | 0 .../api/modules/_category_.json | 0 .../api/optimizers/BootstrapFewShot.md | 0 .../BootstrapFewShotWithRandomSearch.md | 2 +- .../api/optimizers/BootstrapFinetune.md | 0 .../api/optimizers/Ensemble.md | 0 .../api/optimizers/LabeledFewShot.md | 0 .../api/optimizers/_category_.json | 0 .../AzureCognitiveSearch.md | 0 .../api/retrieval_model_clients/ChromadbRM.md | 0 .../api/retrieval_model_clients/ColBERTv2.md | 0 .../api/retrieval_model_clients/FaissRM.md | 0 .../retrieval_model_clients/_category_.json | 0 docs/assertions.md | 258 ----------- {docs-page => docs}/babel.config.js | 0 docs/custom.css | 169 ------- .../docs/building-blocks/1-language_models.md | 12 +- .../docs/building-blocks/2-signatures.md | 2 +- .../docs/building-blocks/3-modules.md | 2 +- .../docs/building-blocks/4-data.md | 0 .../docs/building-blocks/5-metrics.md | 0 .../docs/building-blocks/6-optimizers.md | 2 +- .../docs/building-blocks/7-assertions.md | 0 .../docs/building-blocks/_category_.json | 0 .../docs/building-blocks/solving_your_task.md | 20 +- {docs-page => docs}/docs/cheatsheet.md | 0 .../docs/deep-dive/_category_.json | 0 .../deep-dive/data-handling/_category_.json | 0 .../data-handling/built-in-datasets.mdx | 0 .../docs/deep-dive/data-handling/examples.mdx | 0 .../data-handling/img/data-loading.png | Bin .../data-handling/loading-custom-data.mdx | 0 .../language_model_clients/_category_.json | 0 .../custom-lm-client.mdx | 0 .../remote_models/Anyscale.mdx | 0 .../remote_models/Cohere.mdx | 0 .../remote_models/OpenAI.mdx | 0 .../remote_models/Together.mdx | 0 .../remote_models/_category_.json | 0 .../docs/deep-dive/modules/_category_.json | 0 .../docs/deep-dive/modules/assertions.mdx | 0 .../modules/chain-of-thought-with-hint.mdx | 0 .../docs/deep-dive/modules/guide.mdx | 16 +- .../deep-dive/modules/program-of-thought.mdx | 0 .../docs/deep-dive/modules/react.mdx | 0 .../docs/deep-dive/modules/retrieve.mdx | 0 .../retrieval_models_clients/Azure.mdx | 0 .../retrieval_models_clients/ChromadbRM.mdx | 0 .../retrieval_models_clients/ColBERTv2.mdx | 0 .../retrieval_models_clients/_category_.json | 0 .../custom-rm-client.mdx | 0 .../img/io_rm_module.png | Bin .../docs/deep-dive/signature/_category_.json | 0 .../signature/executing-signatures.mdx | 0 .../img/class_based_prompt_creation.png | Bin .../signature/img/dspy_signatures.png | Bin .../signature/img/prompt_creation.png | Bin .../signature/understanding-signatures.mdx | 0 .../deep-dive/teleprompter/_category_.json | 0 .../teleprompter/bootstrap-fewshot.mdx | 2 +- .../teleprompter/img/signature_optimizer.png | Bin .../img/signature_optimizer_process.png | Bin .../img/signature_optimizer_process_v2.png | Bin .../img/signature_optimizer_process_v3.png | Bin .../img/signature_optimizer_process_v4.png | Bin .../teleprompter/signature-optimizer.mdx | 0 {docs-page => docs}/docs/faqs.md | 10 +- {docs-page => docs}/docs/intro.md | 0 .../docs/quick-start/_category_.json | 0 .../docs/quick-start/installation.mdx | 0 .../docs/quick-start/minimal-example.mdx | 0 .../docs/tutorials/_category_.json | 0 .../docs/tutorials/other_tutorial.md | 4 +- {docs-page => docs}/docs/tutorials/rag.md | 8 +- .../docs/tutorials/simplified-baleen.md | 2 +- docs/docs_requirements.txt | 5 - {docs-page => docs}/docusaurus.config.ts | 0 docs/guides/README.md | 3 - docs/guides/assertions.ipynb | 77 ---- .../language_model_details/launching_mlc.md | 48 -- .../launching_ollama.md | 41 -- .../language_model_details/launching_tgi.md | 60 --- .../language_model_details/launching_vllm.md | 31 -- docs/guides/language_models.ipynb | 257 ----------- docs/guides/modules.ipynb | 287 ------------ docs/guides/optimizers.ipynb | 168 ------- docs/guides/signatures.ipynb | 334 -------------- docs/index.md | 58 --- docs/language_models_client.md | 313 ------------- docs/modules.md | 431 ------------------ {docs-page => docs}/package-lock.json | 0 {docs-page => docs}/package.json | 0 docs/repo/contributing.md | 71 --- docs/repo/documentation.md | 82 ---- docs/repo/getting_started.md | 42 -- docs/retrieval_models_client.md | 217 --------- {docs-page => docs}/sidebars.ts | 0 .../src/components/AuthorDetails/index.tsx | 0 .../AuthorDetails/styles.module.css | 0 .../src/components/HomepageFeatures/index.tsx | 0 .../HomepageFeatures/styles.module.css | 0 {docs-page => docs}/src/css/custom.css | 0 .../src/pages/index.module.css | 0 {docs-page => docs}/src/pages/index.tsx | 0 .../src/pages/markdown-page.md | 0 {docs-page => docs}/static/.nojekyll | 0 {docs-page => docs}/static/img/dspy_logo.png | Bin {docs-page => docs}/static/img/logo.png | Bin {docs-page => docs}/static/img/modular.png | Bin {docs-page => docs}/static/img/optimize.png | Bin .../static/img/undraw_docusaurus_mountain.svg | 0 .../static/img/undraw_docusaurus_react.svg | 0 .../static/img/undraw_docusaurus_tree.svg | 0 .../static/img/universal_compatibility.png | Bin docs/teleprompters.md | 283 ------------ {docs-page => docs}/tsconfig.json | 0 docs/using_local_models.md | 198 -------- .../longformqa/longformqa_assertions.ipynb | 1 + examples/quiz/quiz_assertions.ipynb | 4 +- examples/tweets/tweets_assertions.ipynb | 4 +- 152 files changed, 99 insertions(+), 3921 deletions(-) delete mode 100644 docs-page/README.md delete mode 100644 docs-page/api/language_model_clients/HFClientVLLM.md delete mode 100644 docs-page/api/local_language_model_clients/HFModel.md delete mode 100644 docs-page/api/local_language_model_clients/MLC.md delete mode 100644 docs-page/api/local_language_model_clients/Ollama.md delete mode 100644 docs-page/api/local_language_model_clients/TGI.md delete mode 100644 docs-page/api/local_language_model_clients/_category_.json delete mode 100644 docs-page/api/local_language_model_clients/vLLM.md delete mode 100644 docs-page/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx delete mode 100644 docs-page/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx delete mode 100644 docs-page/docs/deep-dive/language_model_clients/local_models/_category_.json rename {docs-page => docs}/.gitignore (100%) delete mode 100644 docs/DSPy-preprint.pdf create mode 100644 docs/README.md rename {docs-page => docs}/api/assertions.md (100%) rename {docs-page => docs}/api/intro.md (100%) rename {docs-page => docs}/api/language_model_clients/Anyscale.md (84%) rename {docs-page => docs}/api/language_model_clients/AzureOpenAI.md (100%) rename {docs-page => docs}/api/language_model_clients/Cohere.md (87%) rename {docs-page => docs}/api/language_model_clients/Databricks.md (93%) create mode 100644 docs/api/language_model_clients/HFClientVLLM.md rename {docs-page => docs}/api/language_model_clients/OpenAI.md (100%) rename {docs-page => docs}/api/language_model_clients/TGI.md (75%) rename {docs-page => docs}/api/language_model_clients/Together.md (85%) rename {docs-page => docs}/api/language_model_clients/_category_.json (100%) rename {docs-page => docs}/api/modules/ChainOfThought.md (100%) rename {docs-page => docs}/api/modules/ChainOfThoughtWithHint.md (100%) rename {docs-page => docs}/api/modules/MultiChainComparison.md (100%) rename {docs-page => docs}/api/modules/Predict.md (100%) rename {docs-page => docs}/api/modules/ProgramOfThought.md (100%) rename {docs-page => docs}/api/modules/ReAct.md (100%) rename {docs-page => docs}/api/modules/Retrieve.md (100%) rename {docs-page => docs}/api/modules/_category_.json (100%) rename {docs-page => docs}/api/optimizers/BootstrapFewShot.md (100%) rename {docs-page => docs}/api/optimizers/BootstrapFewShotWithRandomSearch.md (95%) rename {docs-page => docs}/api/optimizers/BootstrapFinetune.md (100%) rename {docs-page => docs}/api/optimizers/Ensemble.md (100%) rename {docs-page => docs}/api/optimizers/LabeledFewShot.md (100%) rename {docs-page => docs}/api/optimizers/_category_.json (100%) rename {docs-page => docs}/api/retrieval_model_clients/AzureCognitiveSearch.md (100%) rename {docs-page => docs}/api/retrieval_model_clients/ChromadbRM.md (100%) rename {docs-page => docs}/api/retrieval_model_clients/ColBERTv2.md (100%) rename {docs-page => docs}/api/retrieval_model_clients/FaissRM.md (100%) rename {docs-page => docs}/api/retrieval_model_clients/_category_.json (100%) delete mode 100644 docs/assertions.md rename {docs-page => docs}/babel.config.js (100%) delete mode 100644 docs/custom.css rename {docs-page => docs}/docs/building-blocks/1-language_models.md (90%) rename {docs-page => docs}/docs/building-blocks/2-signatures.md (98%) rename {docs-page => docs}/docs/building-blocks/3-modules.md (97%) rename {docs-page => docs}/docs/building-blocks/4-data.md (100%) rename {docs-page => docs}/docs/building-blocks/5-metrics.md (100%) rename {docs-page => docs}/docs/building-blocks/6-optimizers.md (95%) rename {docs-page => docs}/docs/building-blocks/7-assertions.md (100%) rename {docs-page => docs}/docs/building-blocks/_category_.json (100%) rename {docs-page => docs}/docs/building-blocks/solving_your_task.md (82%) rename {docs-page => docs}/docs/cheatsheet.md (100%) rename {docs-page => docs}/docs/deep-dive/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/data-handling/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/data-handling/built-in-datasets.mdx (100%) rename {docs-page => docs}/docs/deep-dive/data-handling/examples.mdx (100%) rename {docs-page => docs}/docs/deep-dive/data-handling/img/data-loading.png (100%) rename {docs-page => docs}/docs/deep-dive/data-handling/loading-custom-data.mdx (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/custom-lm-client.mdx (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/remote_models/Anyscale.mdx (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/remote_models/Cohere.mdx (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/remote_models/OpenAI.mdx (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/remote_models/Together.mdx (100%) rename {docs-page => docs}/docs/deep-dive/language_model_clients/remote_models/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/modules/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/modules/assertions.mdx (100%) rename {docs-page => docs}/docs/deep-dive/modules/chain-of-thought-with-hint.mdx (100%) rename {docs-page => docs}/docs/deep-dive/modules/guide.mdx (86%) rename {docs-page => docs}/docs/deep-dive/modules/program-of-thought.mdx (100%) rename {docs-page => docs}/docs/deep-dive/modules/react.mdx (100%) rename {docs-page => docs}/docs/deep-dive/modules/retrieve.mdx (100%) rename {docs-page => docs}/docs/deep-dive/retrieval_models_clients/Azure.mdx (100%) rename {docs-page => docs}/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx (100%) rename {docs-page => docs}/docs/deep-dive/retrieval_models_clients/ColBERTv2.mdx (100%) rename {docs-page => docs}/docs/deep-dive/retrieval_models_clients/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/retrieval_models_clients/custom-rm-client.mdx (100%) rename {docs-page => docs}/docs/deep-dive/retrieval_models_clients/img/io_rm_module.png (100%) rename {docs-page => docs}/docs/deep-dive/signature/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/signature/executing-signatures.mdx (100%) rename {docs-page => docs}/docs/deep-dive/signature/img/class_based_prompt_creation.png (100%) rename {docs-page => docs}/docs/deep-dive/signature/img/dspy_signatures.png (100%) rename {docs-page => docs}/docs/deep-dive/signature/img/prompt_creation.png (100%) rename {docs-page => docs}/docs/deep-dive/signature/understanding-signatures.mdx (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/_category_.json (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx (96%) rename {docs-page => docs}/docs/deep-dive/teleprompter/img/signature_optimizer.png (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/img/signature_optimizer_process.png (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/img/signature_optimizer_process_v2.png (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/img/signature_optimizer_process_v3.png (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/img/signature_optimizer_process_v4.png (100%) rename {docs-page => docs}/docs/deep-dive/teleprompter/signature-optimizer.mdx (100%) rename {docs-page => docs}/docs/faqs.md (87%) rename {docs-page => docs}/docs/intro.md (100%) rename {docs-page => docs}/docs/quick-start/_category_.json (100%) rename {docs-page => docs}/docs/quick-start/installation.mdx (100%) rename {docs-page => docs}/docs/quick-start/minimal-example.mdx (100%) rename {docs-page => docs}/docs/tutorials/_category_.json (100%) rename {docs-page => docs}/docs/tutorials/other_tutorial.md (89%) rename {docs-page => docs}/docs/tutorials/rag.md (92%) rename {docs-page => docs}/docs/tutorials/simplified-baleen.md (97%) delete mode 100644 docs/docs_requirements.txt rename {docs-page => docs}/docusaurus.config.ts (100%) delete mode 100644 docs/guides/README.md delete mode 100644 docs/guides/assertions.ipynb delete mode 100644 docs/guides/language_model_details/launching_mlc.md delete mode 100644 docs/guides/language_model_details/launching_ollama.md delete mode 100644 docs/guides/language_model_details/launching_tgi.md delete mode 100644 docs/guides/language_model_details/launching_vllm.md delete mode 100644 docs/guides/language_models.ipynb delete mode 100644 docs/guides/modules.ipynb delete mode 100644 docs/guides/optimizers.ipynb delete mode 100644 docs/guides/signatures.ipynb delete mode 100644 docs/index.md delete mode 100644 docs/language_models_client.md delete mode 100644 docs/modules.md rename {docs-page => docs}/package-lock.json (100%) rename {docs-page => docs}/package.json (100%) delete mode 100644 docs/repo/contributing.md delete mode 100644 docs/repo/documentation.md delete mode 100644 docs/repo/getting_started.md delete mode 100644 docs/retrieval_models_client.md rename {docs-page => docs}/sidebars.ts (100%) rename {docs-page => docs}/src/components/AuthorDetails/index.tsx (100%) rename {docs-page => docs}/src/components/AuthorDetails/styles.module.css (100%) rename {docs-page => docs}/src/components/HomepageFeatures/index.tsx (100%) rename {docs-page => docs}/src/components/HomepageFeatures/styles.module.css (100%) rename {docs-page => docs}/src/css/custom.css (100%) rename {docs-page => docs}/src/pages/index.module.css (100%) rename {docs-page => docs}/src/pages/index.tsx (100%) rename {docs-page => docs}/src/pages/markdown-page.md (100%) rename {docs-page => docs}/static/.nojekyll (100%) rename {docs-page => docs}/static/img/dspy_logo.png (100%) rename {docs-page => docs}/static/img/logo.png (100%) rename {docs-page => docs}/static/img/modular.png (100%) rename {docs-page => docs}/static/img/optimize.png (100%) rename {docs-page => docs}/static/img/undraw_docusaurus_mountain.svg (100%) rename {docs-page => docs}/static/img/undraw_docusaurus_react.svg (100%) rename {docs-page => docs}/static/img/undraw_docusaurus_tree.svg (100%) rename {docs-page => docs}/static/img/universal_compatibility.png (100%) delete mode 100644 docs/teleprompters.md rename {docs-page => docs}/tsconfig.json (100%) delete mode 100644 docs/using_local_models.md diff --git a/README.md b/README.md index ff58822898..48133a88ac 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ If you're new to DSPy, it's probably best to go in sequential order. You will pr 4. **[Optimizers (formerly Teleprompters)](https://dspy-docs.vercel.app/docs/building-blocks/optimizers)** -6. **[DSPy Assertions](docs/assertions.md)** +6. **[DSPy Assertions](https://dspy-docs.vercel.app/docs/building-blocks/assertions)** ### C) Examples diff --git a/docs-page/README.md b/docs-page/README.md deleted file mode 100644 index 524a593616..0000000000 --- a/docs-page/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# DSPy Documentation - -This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. - -### Installation - -``` -$ yarn -``` - -### Local Development - -``` -$ yarn start -``` - -This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. - -### Build - -``` -$ yarn build -``` - -This command generates static content into the `build` directory and can be served using any static contents hosting service. - -### Deployment - -Using SSH: - -``` -$ USE_SSH=true yarn deploy -``` - -Not using SSH: - -``` -$ GIT_USER= yarn deploy -``` - -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/docs-page/api/language_model_clients/HFClientVLLM.md b/docs-page/api/language_model_clients/HFClientVLLM.md deleted file mode 100644 index 2108e701df..0000000000 --- a/docs-page/api/language_model_clients/HFClientVLLM.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -sidebar_position: 5 ---- - -# dspy.HFClientVLLM - -### Usage - -```python -lm = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") -``` - -### Prerequisites - -Refer to the [vLLM Server](https://github.com/stanfordnlp/dspy/blob/local_models_docs/docs/using_local_models.md#vllm-server) section of the `Using Local Models` documentation. - -### Constructor - -Refer to [`dspy.TGI`](#tgi) documentation. Replace with `HFClientVLLM`. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file diff --git a/docs-page/api/local_language_model_clients/HFModel.md b/docs-page/api/local_language_model_clients/HFModel.md deleted file mode 100644 index 162238ae59..0000000000 --- a/docs-page/api/local_language_model_clients/HFModel.md +++ /dev/null @@ -1,7 +0,0 @@ -# dspy.HFModel - -Initialize `HFModel` within your program with the desired model to load in. Here's an example call: - -```python -llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') -``` \ No newline at end of file diff --git a/docs-page/api/local_language_model_clients/MLC.md b/docs-page/api/local_language_model_clients/MLC.md deleted file mode 100644 index 6a36f374bc..0000000000 --- a/docs-page/api/local_language_model_clients/MLC.md +++ /dev/null @@ -1,41 +0,0 @@ -# dspy.ChatModuleClient - -## Prerequisites - -1. Install the required packages using the following commands: - - ```shell - pip install --no-deps --pre --force-reinstall mlc-ai-nightly-cu118 mlc-chat-nightly-cu118 -f https://mlc.ai/wheels - pip install transformers - git lfs install - ``` - - Adjust the pip wheels according to your OS/platform by referring to the provided commands in [MLC packages](https://mlc.ai/package/). - -## Running MLC Llama-2 models - -1. Create a directory for prebuilt models: - - ```shell - mkdir -p dist/prebuilt - ``` - -2. Clone the necessary libraries from the repository: - - ```shell - git clone https://github.com/mlc-ai/binary-mlc-llm-libs.git dist/prebuilt/lib - cd dist/prebuilt - ``` - -3. Choose a Llama-2 model from [MLC LLMs](https://huggingface.co/mlc-ai) and clone the model repository: - - ```shell - git clone https://huggingface.co/mlc-ai/mlc-chat-Llama-2-7b-chat-hf-q4f16_1 - ``` - -4. Initialize the `ChatModuleClient` within your program with the desired parameters. Here's an example call: - - ```python - llama = dspy.ChatModuleClient(model='dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1', model_path='dist/prebuilt/lib/Llama-2-7b-chat-hf-q4f16_1-cuda.so') - ``` -Please refer to the [official MLC repository](https://github.com/mlc-ai/mlc-llm) for more detailed information and [documentation](https://mlc.ai/mlc-llm/docs/get_started/try_out.html). diff --git a/docs-page/api/local_language_model_clients/Ollama.md b/docs-page/api/local_language_model_clients/Ollama.md deleted file mode 100644 index 0f1198ad24..0000000000 --- a/docs-page/api/local_language_model_clients/Ollama.md +++ /dev/null @@ -1,45 +0,0 @@ -# dspy.OllamaLocal - -:::note -Adapted from documentation provided by https://github.com/insop -::: - -Ollama is a good software tool that allows you to run LLMs locally, such as Mistral, Llama2, and Phi. -The following are the instructions to install and run Ollama. - -## Prerequisites - -Install Ollama by following the instructions from this page: - -- https://ollama.ai - -Download model: `ollama pull` - -Download a model by running the `ollama pull` command. You can download Mistral, Llama2, and Phi. - -```bash -# download mistral -ollama pull mistral -``` - -Here is the list of other models you can download: -- https://ollama.ai/library - -## Running Ollama model - -Run model: `ollama run` - -You can test a model by running the model with the `ollama run` command. - -```bash -# run mistral -ollama run mistral -``` - -## Sending requests to the server - -Here is the code to load a model through Ollama: - -```python -lm = dspy.OllamaLocal(model='mistral') -``` \ No newline at end of file diff --git a/docs-page/api/local_language_model_clients/TGI.md b/docs-page/api/local_language_model_clients/TGI.md deleted file mode 100644 index c67a09beba..0000000000 --- a/docs-page/api/local_language_model_clients/TGI.md +++ /dev/null @@ -1,60 +0,0 @@ -# dspy.HFClientTGI - -## Prerequisites - -- Docker must be installed on your system. If you don't have Docker installed, you can get it from [here](https://docs.docker.com/get-docker/). - -## Setting up the Text-Generation-Inference Server - -1. Clone the Text-Generation-Inference repository from GitHub by executing the following command: - - ``` - git clone https://github.com/huggingface/text-generation-inference.git - ``` - -2. Change into the cloned repository directory: - - ``` - cd text-generation-inference - ``` - -3. Execute the Docker command under the "Get Started" section to run the server: - - ``` - model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. - num_shard=2 # set to the number of shards you wish to use. - volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - - docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9 --model-id $model --num-shard $num_shard - ``` - - This command will start the server and make it accessible at `http://localhost:8080`. - -If you want to connect to [Meta Llama 2 models](https://huggingface.co/meta-llama), make sure to use version 9.3 (or higher) of the docker image (ghcr.io/huggingface/text-generation-inference:0.9.3) and pass in your huggingface token as an environment variable. - -``` - docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model --num-shard $num_shard -``` - -## Sending requests to the server - -After setting up the text-generation-inference server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientTGI`. - -Initialize the `HFClientTGI` within your program with the desired parameters. Here is an example call: - - ```python - lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") - ``` - - Customize the `model`, `port`, and `url` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. - - -### FAQs - -1. If your model doesn't require any shards, you still need to set a value for `num_shard`, but you don't need to include the parameter `--num-shard` on the command line. - -2. If your model runs into any "token exceeded" issues, you can set the following parameters on the command line to adjust the input length and token limit: - - `--max-input-length`: Set the maximum allowed input length for the text. - - `--max-total-tokens`: Set the maximum total tokens allowed for text generation. - -Please refer to the [official Text-Generation-Inference repository](https://github.com/huggingface/text-generation-inference) for more detailed information and documentation. diff --git a/docs-page/api/local_language_model_clients/_category_.json b/docs-page/api/local_language_model_clients/_category_.json deleted file mode 100644 index 8965dcf411..0000000000 --- a/docs-page/api/local_language_model_clients/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Local Language Model Clients", - "position": 6, - "link": { - "type": "generated-index", - "description": "DSPy supports various methods including `built-in wrappers`, `server integration`, and `external package integration` for model loading. This documentation provides a concise introduction on how to load in models within DSPy extending these capabilities for your specific needs." - } -} \ No newline at end of file diff --git a/docs-page/api/local_language_model_clients/vLLM.md b/docs-page/api/local_language_model_clients/vLLM.md deleted file mode 100644 index bd9befb9f2..0000000000 --- a/docs-page/api/local_language_model_clients/vLLM.md +++ /dev/null @@ -1,31 +0,0 @@ -# dspy.HFClientVLLM - -### Setting up the vLLM Server - -Follow these steps to set up the vLLM Server: - -1. Build the server from source by following the instructions provided in the [Build from Source guide](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source). - -2. Start the server by running the following command, and specify your desired model, host, and port using the appropriate arguments. The default server address is http://localhost:8000. - -Example command: - -```bash - python -m vllm.entrypoints.api_server --model mosaicml/mpt-7b --port 8000 -``` - -This will launch the vLLM server. - -### Sending requests to the server - -After setting up the vLLM server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientVLLM`. - -Initialize the `HFClientVLLM` within your program with the desired parameters. Here is an example call: - -```python - lm = dspy.HFClientVLLM(model="mosaicml/mpt-7b", port=8000, url="http://localhost") -``` - -Customize the `model`, `port`, `url`, and `max_tokens` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. - -Please refer to the [official vLLM repository](https://github.com/vllm-project/vllm) for more detailed information and documentation. diff --git a/docs-page/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx b/docs-page/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx deleted file mode 100644 index 2550ed3c5f..0000000000 --- a/docs-page/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx +++ /dev/null @@ -1,90 +0,0 @@ -import AuthorDetails from '@site/src/components/AuthorDetails'; - -## [HFClient TGI](https://github.com/huggingface/text-generation-inference) - -### Prerequisites - Launching TGI Server locally - -Refer to the [Text Generation-Inference Server API](/api/local_language_model_clients/TGI) for setting up the TGI server locally. - -```bash -#Example TGI Server Launch - -model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. -num_shard=1 # set to the number of shards you wish to use. -volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard -``` - -This command will start the server and make it accessible at `http://localhost:8080`. - - -### Setting up the TGI Client - -The constructor initializes the `HFModel` base class to support the handling of prompting HuggingFace models. It configures the client for communicating with the hosted TGI server to generate requests. This requires the following parameters: - -- `model` (_str_): ID of Hugging Face model connected to the TGI server. -- `port` (_int_ or _list_): Port for communicating to the TGI server. This can be a single port number (`8080`) or a list of TGI ports (`[8080, 8081, 8082]`) to route the requests to. -- `url` (_str_): Base URL of hosted TGI server. This will often be `"http://localhost"`. -- `http_request_kwargs` (_dict_): Dictionary of additional keyword agruments to pass to the HTTP request function to the TGI server. This is `None` by default. -- `**kwargs`: Additional keyword arguments to configure the TGI client. - -Example of the TGI constructor: - -```python -class HFClientTGI(HFModel): - def __init__(self, model, port, url="http://future-hgx-1", http_request_kwargs=None, **kwargs): -``` - -### Under the Hood - -#### `_generate(self, prompt, **kwargs) -> dict` - -**Parameters:** -- `prompt` (_str_): Prompt to send to model hosted on TGI server. -- `**kwargs`: Additional keyword arguments for completion request. - -**Returns:** -- `dict`: dictionary with `prompt` and list of response `choices`. - -Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. - -After generation, the method parses the JSON response received from the server and retrieves the output through `json_response["generated_text"]`. This is then stored in the `completions` list. - -If the JSON response includes the additional `details` argument and correspondingly, the `best_of_sequences` within `details`, this indicates multiple sequences were generated. This is also usually the case when `best_of > 1` in the initialized kwargs. Each of these sequences is accessed through `x["generated_text"]` and added to the `completions` list. - -Lastly, the method constructs the response dictionary with two keys: the original request `prompt` and `choices`, a list of dictionaries representing generated completions with the key `text` holding the response's generated text. - - -### Using the TGI Client - -```python -tgi_llama2 = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") -``` - -### Sending Requests via TGI Client - -1) _**Recommended**_ Configure default LM using `dspy.configure`. - -This allows you to define programs in DSPy and simply call modules on your input fields, having DSPy internally call the prompt on the configured LM. - -```python -dspy.configure(lm=tgi_llama2) - -#Example DSPy CoT QA program -qa = dspy.ChainOfThought('question -> answer') - -response = qa(question="What is the capital of Paris?") #Prompted to tgi_llama2 -print(response.answer) -``` - -2) Generate responses using the client directly. - -```python -response = tgi_llama2._generate(prompt='What is the capital of Paris?') -print(response) -``` - -*** - - \ No newline at end of file diff --git a/docs-page/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx b/docs-page/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx deleted file mode 100644 index 059c2a9be5..0000000000 --- a/docs-page/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx +++ /dev/null @@ -1,82 +0,0 @@ -import AuthorDetails from '@site/src/components/AuthorDetails'; - -## [HFClient vLLM](https://github.com/vllm-project/vllm) - -### Prerequisites - Launching vLLM Server locally - -Refer to the [vLLM Server API](/api/local_language_model_clients/vLLM) for setting up the vLLM server locally. - -```bash -#Example vLLM Server Launch - - python -m vllm.entrypoints.api_server --model meta-llama/Llama-2-7b-hf --port 8080 -``` - -This command will start the server and make it accessible at `http://localhost:8080`. - - -### Setting up the vLLM Client - -The constructor initializes the `HFModel` base class to support the handling of prompting models, configuring the client for communicating with the hosted vLLM server to generate requests. This requires the following parameters: - -- `model` (_str_): ID of model connected to the vLLM server. -- `port` (_int_): Port for communicating to the vLLM server. -- `url` (_str_): Base URL of hosted vLLM server. This will often be `"http://localhost"`. -- `**kwargs`: Additional keyword arguments to configure the vLLM client. - -Example of the vLLM constructor: - -```python -class HFClientVLLM(HFModel): - def __init__(self, model, port, url="http://localhost", **kwargs): -``` - -### Under the Hood - -#### `_generate(self, prompt, **kwargs) -> dict` - -**Parameters:** -- `prompt` (_str_): Prompt to send to model hosted on vLLM server. -- `**kwargs`: Additional keyword arguments for completion request. - -**Returns:** -- `dict`: dictionary with `prompt` and list of response `choices`. - -Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. - -After generation, the method parses the JSON response received from the server and retrieves the output through `json_response["choices"]` and stored as the `completions` list. - -Lastly, the method constructs the response dictionary with two keys: the original request `prompt` and `choices`, a list of dictionaries representing generated completions with the key `text` holding the response's generated text. - -### Using the vLLM Client - -```python -vllm_llama2 = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") -``` - -### Sending Requests via vLLM Client - -1) _**Recommended**_ Configure default LM using `dspy.configure`. - -This allows you to define programs in DSPy and simply call modules on your input fields, having DSPy internally call the prompt on the configured LM. - -```python -dspy.configure(lm=vllm_llama2) - -#Example DSPy CoT QA program -qa = dspy.ChainOfThought('question -> answer') - -response = qa(question="What is the capital of Paris?") #Prompted to vllm_llama2 -print(response.answer) -``` - -2) Generate responses using the client directly. - -```python -response = vllm_llama2._generate(prompt='What is the capital of Paris?') -print(response) -``` - -*** - - \ No newline at end of file diff --git a/docs-page/docs/deep-dive/language_model_clients/local_models/_category_.json b/docs-page/docs/deep-dive/language_model_clients/local_models/_category_.json deleted file mode 100644 index e1d91e8cef..0000000000 --- a/docs-page/docs/deep-dive/language_model_clients/local_models/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Local Language Model Clients", - "position": 1, - "link": { - "type": "generated-index", - "description": "Local Language Model Clients in DSPy" - } -} \ No newline at end of file diff --git a/docs-page/.gitignore b/docs/.gitignore similarity index 100% rename from docs-page/.gitignore rename to docs/.gitignore diff --git a/docs/DSPy-preprint.pdf b/docs/DSPy-preprint.pdf deleted file mode 100644 index df4b2e02531cc4d6699288ef4a5fc737a645925a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 460814 zcma&tR04S zViNSs4D2xE2{6n|+(b-7jwUuRe0)TV64rKZ<}Ux8cE)bzV&@Ve1oF!rZME0OOy!R=1~=GT3P@Lo0Dv9qlH`bz*IM3E-@n_7OS=AYO8SKnjx z*QsXF)m-fTL&^XTb<2w?<|Rc=$(sqV!rtpKo!2jCwpZhW?+7+|fQQ7uUjxI+<97pa zgYCdZInmb$csMNjIoJ+xT0r#xqfuToW1 zFdP1C>!?PrRqpuS>fkguubo`q)$^Z>Ov#0M$MHvSL6h669Od<;1~)iXX0j8X+cn@3qJ3-j&Na$Jc`8>@9gsl+{dt9xwJ_Lt#} z1~QZ3(Yxv9%DIfehw;kma1g;M9bMgT6tE)aTD7j+60L@?iBs7)A^?$kHw}uq4bM7hHF? zHlA+#=-%6aqq*f|=c=L*{X>riPk&!L+4m9saPELN0eDObT9rQqlE^c;7GLZ035>C= zEOy2qF)9eqCms->uI>~jgrigQOK%pWgHkIa>D>d6sJbIYs>4h&ca=+ccfL%ej%?qD zkQ{XbvvgC|2-sRjc9`##GG1V#Bcph`a-DZ6Y@wC2XDx01U@t|u*Wl!2(L#T3h6iw~ zKcxId*CgmD*-O;R8JhL>|4Kmb$@?3Rph~f|2h&$Bh#`s~Z^v$i{y>4yhLn(%+Yk<_ zIC^WX)spArCv`o%kWXJve-X``#C0a1B5`G|b&g}t<+u&wJH>lnb$z3d2}6Hu=%mwQ z!Eq?&%BB09X+ABycw|HIQ_eB34dmM+l^SqjWXP3g8xyesMu)7T`wJK&(13VsPlS=* zxo@_jA(>d2GD-s*ik+>bl}C!n8?_>X)d|vf;aAb+Mw0|5Ybh$=U!8%nVO|BukJjC}#!TJZwpH~HB3o|XMn|SSm&$O|} z;n#+y$OdydEnV4rd0VM&C0WAeuf+0uGx!kZ-dY9;?8#8p!yx_hP*x=8!LoEl8A4S< zItp8H?S@`K#dx>Of>YTs2?^Gg3@XgSM^X$DgL2w*J4V1Zz|^qD2yu+RI|+X{EsP`1eUSkl4hP#{*356-$}$d*9!n}X z0~4t=iB(-`e5(T@(T1#iL`l=*{O#{u389KWIxg55lfk0nMFj)tl+GBGVsEi=bx5jB zLxMLmwxt%YiU3H3JYfs0d7L;?1puIU?{sj{m<${>9f#ws{mrRmBgD?jx+Ps6%q|`EwmGem1UHB~=V|T;E z@%o6nJbjXYQ3ca-M}9R0?wSEX`f4Z{&kgc#q=_IQB&!B2qbpg=WV-M=<9pGqff3E$ z*i_a5ebpxzker;d6=f$KJ3bOyBfC{3RN9WsZid@TkRF>lqBgPlQ}u^?rLpi-6R z$Ym*%n-Q-V8fDoFZB{MQ0)JBWy1KPjZU#*Rdlz7qJQ09sV zjb6_{e{gd|k#y$T>+Df@MbeQzg4~VCMCjLG#?Mwqs_gnWz=EX;y0asj9aan|EK2nH zo2Ukp@6lu6ZulJ#12}h>qB^unvOPp&D0kNStW5-kB?O(>^OVklw^Z&7YXNDcAJ(fw z*e~K+1auW*}a%fk z?j(f7U@Z4g2v@NUr#w(Fv0 z#vGcgt3e@vMUS5@w;okwUv45iMsCC*iCCBdCFNVi0FckjdW0xd9MM+`01_rk%d{3c zEck{rK2830sCHdET-I>H{BUN$27|pS|EL5MW0nLcbZAL#7iPRWv)OwC+VZOT?(>b% z5m%>yBbPjhhxHIsj$KXgh(K{vW&X&zOb<9Klbq9)O(94wZSr`ki|@lRV14AMJg)R0 z3!Y90Kxw1gW)N)N>3w)kx8qm z+g+O#TsNCl$))+g0nmuW9G})pY*kkEAvNz_7+)@8#~i>_sYH5E$Q2aeMj7P9{3E74 zpzaQ)qgwF7ITVHgK1`QL)fg28E!#V#J3Zf&Mv96peX)JaGf=v(#q#p91k*(8Y=1>A zPfxN9eKVeiB&IC&=>aY}Y)B}mEy34GkhtgxQgXb^9A`X0vL}_i@tLKrcwro+G`YNL z3v7}#R>os!6D*BNGB;e$EktS(@uzKqkV<&Td|S%fh_gZv_U@xpFulra(*d;+17^odn=v;^j1aH z#WfvTfkb*mYe&qfH&Wl!s2Waa!D(){Z!k|F)0mZtlXUX>fOhkE-0 zCSbUw-d4G0d4MV!ip90q$`2!qbRR#gI)xWY)Ln(!&5#ddP+8lvY0MV__`oO$f&)t! z&Yb{6a6Mqo94h5hn-5J?5DNIu1%9;G&yWQ_7;X&{O6p++e|y z$VN3p*aQw1;_pwp$Tcx+I!QkZZtXYX|NH>fn?NdL0ZEmv;q3n^8Uj|a5<5;Eor&F* z84%{0RNGzE%~1ANXr-%mS*W0^a3{GoHOH4#)PS>n<(5fRW$f+7jQda_#72t0&2Yi5 z(ft@Xw)2b`IOw$PGTj0Xn_>i1gZW`t6(|=|#-={KudqAtoZR64457(l#umLV#;%7Z zDWJ`UZ(ifWfHkfkBQg8oi!3DS22*pJGL57) zAS;n6H^4cL3&(aM5z#WnG4w!i;d96nfZ~pt@)^;T{5!@MTWz)H)HYo20b8SSJ2DmY zcF9_1ai&aHbrDcmfpEk)v|Zlia-A5PrcSs$j@yi87S^V(Z=usQWaS?Pxg0A?D;Sj= zxI^?S8wZ$9^@6}S-Fqg?e zc#Y5}{crYLzvmlr(-mxgf%f@NhefMPp-U-#Baw0&c{p6e#g7tj-cPuorl*#k1x5$V zcB&dS3;G{c^dRR|2Fj9bomOs7=)IA6G!^w&n7iPC^!!T~FiLUAj=Fh!M7=JJ<908< zEa-U>%sD5!1WTZK5gD6Ph7#}*)ZM@V31$Q!zjO(KuE_H95Yl1s5yQ4M;1fvFM5>8& z2(Z2&KB#CmW^=Es=*KVbl(A-Y;J36=xSnAA0VdWfK|ukTzmsT#k*wlKwazB+O!SJh z(ww0?b$U}O7dnRS9h!^4_^S;`L1vwV;i@1ktFjtm>L-}4?(fu_fh*secDq(lDJI&( z(@lG*1+;Qi*2gV8am=alFkKKab!ivKi?#2gbqq%1h!_N3NyBgoJS9W(F7Ccb4cL(B z&uUMgpxY8ebl;OWv%r*w5yq~`05c0&In7cj%uad~qI)1JE^3Xs6?LKHN}e+)duU>4 zx*NBS*%QSma4MV?|2iAam{D z#ow7w#Z2b00mT;MCo#bUMhLS`mWTWGb1-bj7k+#dJw(&R|KmnhS2TB6L%L=W!Z+s_r`Hirs(3J*4R|C!}}SbWNR49gVMt zu+>MG{NW27Z0(L7M9L{Q?qK$Ri_w48|K&_9OdS8G z;>60q{6EBL6>rjId*b<~eh;~Z1^H@vW-?#e&hkhsNj*)5f}1TkJqiWNs6k6K(0^x* zNJ>~?HOh$vv$%WF3s9fYF33~;nbz#{dU1VSE%-$(r&*aNVbIdmtJ|s26g@l<@8{{B zQyCKaRPJjCT!cWCdb@ zGX&g>wq1HH#Pxk1`k;2Mp?~?T<*@s?$cj7sIDh%E`0Z^f@8#V73b2B>#UD;Qz0m^q z6zvvJ^v?P?psXwOtd6-}vE9ApZ0j%9avGrMCfKewUc3a`mbX@Pm0P*bwO$7od@HyX z)ymS4FO>Q?bn8<>QEfXv8#eIun6=^hSOe*zi~t**Z77GZ+8UD=c)N$f+I=f!nD?&M z4y-?@mHD@!uU%roiz)GsbMG394f?}}mLu0>XsG*B&+J@yTvM$1ueaMo>osAy)VU(O zd}ai>YdC%F50txb;;r{meHC!_9cnpF&Ccrc?;(`7h-`EWzyP}q5cybq`mZ+!Y*ZG9 z$d_RE`2D_D@xhI9iqL<-*i=nzS#<&bPy^@#ZqQ$joTEwyd3!T-IT0SUo_HCqqkV+}uDRDN5W-!e znf;K-G*sET!dMrbTl)@ASI2MIGpzI*?<&fQ(nS6~gU$ofd~da0AVb8uM2^RP$li9* zfvIR{W;uKaPvYI^wEoSKw|a9k)7tbsYZ@Oc*CLNyb(7x@Ex-`GsJ@?I74+04-*M%Y}g=g)wcO;Mh$$JbmE4+b1ZQF*# zdGYLFWy!*uD37sPCbDSgVS%ek^{6aXo9t zzIb~+PogGdHvt6Z^iFvUdKCJ~F3Rd_2Op+I#-S!j9PUeyUK0nC=cg@;Zh zy`bxVoc_iOT}X^8qq$GQ>w|`3hxdn@?7rt|5t9mD-GqUlXl<~2PJoL#U4meZ-J1G7}Ca<3!w$?ufD<2~=L=9n7Va}Yv zs6@Y8q>6w1Ex7OeRxW~N0Sjj@u8YjBK@e@G%9IDPqFJu3BKFBy@GCms(@KRTlDic* z#-jhu4C4>Y|Gl{szoW{PX>M!(!Pex}5zC2^&U9KepO0zvm*4>=liCcUj3RoEa8D~H zYkfpHxQV-%Gzlf8>$Ov~mt^cUXWD$Nn!30$4iWB&szdQtj~n?01|*n8DVzvJz4!?>azBrBspw+4ur(FM1BA?qNjAY9Wz!*dJ(WFjjehmeM zFBz&S{S}Z8mn3XW2VSyLf1uFAFSW64BTS+d=OnxRI8v+Xb)S5|i7blzXYXrlHXJtz zskh+yoCgVD*#4Tz$MTs;9;Vl>p=ZqTdGT-py?IB*)HW5<`K(Tlf7co1BLf>wP6c;D z^mT$z2Yr>eX6l9P(C%?y?j%fnUN|cVm8=lu7VU=c{DxhYZAV|_FOtth62{cjc7N0v zLa_hRR_LeM)C;I_Ph-XIO(S>FM8uRlxNlq11W;Z@CbMm~BIji5YiZd%*GRtV*#TP} za6Jo)sJvb+OKetev<)z@?eud!F(ZM66Bdk=-S?GEj>OesOM!}V()a=yaeGeQrMtPB zN|#^}tGGOC@LU#XmZHpe)T0ts21D#CZmFlA*U@hLW-;j#J?xLH`QFzM-Uh~qkcf9S z#2EH~LT^b?AXo>{BJfa2<0U#4A%|-;(8eQmB5o&l=G5H?$F5v=lIk#pxKqoXmHerJ zv|D`flRf6%rfEzXLfU&j{Chd<86=0LSRTXp#>IIO!~F>N#M2yL+Kx{L_>MPBp+9J( z|1g;L3K{>0z4QrT)trCUdL@Z^8OrxX&6#|9G_CMb)Ge zfk2Av&23)*%un^i5Wh9H{a1f9QK}7>Yp`lot|gKdh99lVptZLGw5|e_!8;yNPfSec zB;&>%6tDlynp;CV(8o%}O~{E0OwWpnjlU7k$kR#EO;)4h z6i?ml?jBeR2%;=^sHu;U#YJxzcF7hc!4$jV_}J_gpv0;azxROgXV1k_`a||uD)Y+S z_pP`pAJbGCuE(6osrD5Vmv{`{DI=%9eE=bf6I9`o8fWKssrW`x%wn-WQ>8;?TpR^y zpL)IV4P<%kAsR%cQjm!s9*yV8B<5e@0!IXgG~sAM+W-`zU@pA*5W+#I0(>PlXoNyY zVqqwLXCpM0%5~cr_eh0#NFy`9g2tZJju%eo5v9jfm#G%2OW)r^pZsec(?W)^M0=S7 zkplSz)jHN#`Qgf^QOl+T2%&--+Ev7B8~~+jXt%%&3$v5qV`aG$_nVLII{%nGJ3Oil zR=C8Ec@X?LqARlw0n#sXZuCl%#1S%X>jy~%; z;d*Qts-Tl{`XDqW(aFF*Ov=KC(mn2fjp3`?Z_=G)Fn7cPDCn_UWfuOUaL8IO4-=KK z%7({O`rYvY_M|Ckil0rPh(khb=L&_{K)7mcv%dNlOM}^g9J;bs42oYnGIA3aO?Om~ zlzJaw^AQMuIf9=yTSd0J1}Wa>q^tnDv%=z7tL-MO;F!*9#>*7?^2x;5XTj;Od!PD| zViY6sZ9gFviZ2C5QP(|-9_YYNihTdB*@sQ=d*Bv2+7vrU1Jrsvz_Jr8VRFBi0JlPV z<9IA9^g{#RO%p3{+yUN%mkKd%$amK^DSIAlY9R}&oAH2~2e#)ILpbOMBtM1|aa7~` z7mQhg;;$HMr#OBrx)o7KoQ)J_qz9?A5Un)ILQ8!dN|0D;to+@Yl7C0~k|FlQc$9?= zXDKfdz!au?RUR-!M`O(J1oG5YOx|j=W)hwa14qOmvF0<$5JeXuDe-uuFq6chO=Uii zEp%?XZXBgNZHy$H58sDZU}^!s)&^-xUzYh?DG)Y`Y8}PfzX9j%1KMZv($hhGdtgoF zB$a3OlH3?~7EYXdzuaIfDdLgK&C1-{H^EXiUq&*RdzahqH9HY*kl`5%Q<6&eXUQ+K zs_?0!+M5KhaF;h~ydQy1kEsju!Pkt*b>57PpvadXu~n=^8?Uv6Nu>NUWiGQDq^YZK z-rZX)2UF3MrVrF01|D9wWBU_pe$(t5qyM7gE-EV_x(i(^xKC@69zZh2dX@@rFKdQE zD@QSk2ALqq?|3l*Q5@Ab+C*=K+HGBG&m_SDF1f$S*|ONap$B~{p)-0r-9!Lg>IlE| zhq}@+YcDr9=MtC=N&WaRk;?R-nb)m%XL`J2Z^0D-zNyK)w(C+pVt-L5voK#dN&t=z zRHlB@(O=GT4{X$+I=7!GeRAVsyC~I6eB&z^HMEdL4IDcK3Bl0}C80Cs4;9IV24`3D z!NMJDy)6$Y*Fuleuoo@b9<@p=7HbaabQir`YFLBan$M<8R99gpn6iC(z_)Z@<1&n% z+DnHSLO%&7cw6o%b&-@iWkdX=z+cZVzR#7Sm38>Dwn|(kpC7x6%WJ(0TQkU?3grF_ zx%DhO%~afYY5_rzX15x}CtrTZHAs6t_TFD+SR^P#luQO!A(L%bk>z~|jnR~uP0!S5 zUsCL@w@>GX&)|Sx5`IgGcu`({n@hseWq1BtI{nviX^gv7y`?%_-?rODi7*Efmtb;8 zny3U342`pL*E7i}LtC-~xgk`Ja?Ihee{O;!G;82W@!nyzYj4fTe7O2p0V^SqN9@AYX zk!|QD*7{3+hFZ*|p?F0OGU_gGCGlkLr>VD!M(7*EW>cD5=+04#&a6COQYT6Rm}1{U zB_b=lAT5nuXSFzu7GFsry>s*!Vn7XR$cVDsvKk;%dfYDJC$p*Q+IRcN&xKY8bHrjl zY9qF7qRN*nobDJ&-sUhe>6o&&8yHFnwt7~F7&LKaQR7uq7NOqygX%8q-J3Wk`!!-Z zY_G~`hAKs(;^7PuwHoT$v-iBJqip%CH>Lizw00f-T8HUIN6dp}k8jBQ4K>$8whsr) zE1MB}kpwFRUDDL3zNkKnY`&01-Kl?=TuRcmkmFCuNPB5IK??V0<^A-DqhQ?be*$zQ zfGbQY^+xnHSi{?v>os_CBK?5G$U5#YklKg28F4y}FEmb*kUg<|J*N?^SdKkv(O{kL~G?wbda_aUKHreFFxZ$5;TNK}6yuFYe*rlGUPymK3!=$EftWf1b4B)X34tF09DT)5W`9SV*z-?S zkdybM=bI0YJRb}?XR1VN{hy`hMkdb4 zVUhc_C?H<7GCWeq1W*%q!4?a;=p`>Y9{+Si?)f_aTZSq7{6X=n!c5!th;$r3gf3rQTOl<5#j0!}|Oicd+TxN6Z z6L8y+$9FGP@5~<~4&Vx4NrO@b?3{aigJTu~X9!Bi_?WVt)`$dh8+6 zIC93<7GEr`RZT_ddy0lX6jhQ#UKbxtC+$mIS1kxd)~6-0Ri!kt}IvXP+^WSqZac_=b&*G<@kYVmmQ=#m(GoSA)Rzz@J=lMu>6r$ zArP*o$rEu5{b=bHIUZ`$JtA@B9IDqKALi!V;oeOM>&V9|?Hl-aygMK>Cr|~E(Ip1o z_gF=Y+M<%;f(|jA!-*+=am-rJV;(&4V2S!9`FQcsd&)z(p+l$GQ`GAdvfYFEPLkCr zHYn@+t1O*9wdsC1K?IxkI3IK7+5n|#%*$ch=i`hwpT@6yfPH=XuBYa=$@_By|I_v` z!4wTV<}~5Qw^O_Bv+rW5%ZcOnao4BKn|Ln^m$C%K<0UxQl~dV68;5Tn03q=a=27$J zY}%HpzOkqLA$yuO>qFu>?o!vhj3%fGtcs*!s*onE3aw71LuQ{Qy!w9@jq1_0Dt>7u z+q&o$)l9nKn=~^T#%(ao>Y3F;28;q{mrI;6$C|@8O}l9hZ1owt4E?$Rafn#!>r1!Q zp+2KmttrfH2BN*W#Q4mDo8EaTqRknen2=F=KkmK$-F`n;+MR@6H|_!n#|ACz}rK8$`ehJWyNC_VMx03jmYo3(c%Ml&1ZSubG=g*R}Fqlg8q0C;Tve1D;jnGA zkSqoQACb#VfErWv^}Ha(EV0O}Y8DfGWpV52x>?(Xc1~U#y|&`oX|T{9IvosNU6!~? zB*iz{pVYk`Nrxgh_I2vIPP@DMoXvrD_FK^EQs7c@%G~aBvSZ6MbQ=j4EK7Lw?6wc| z0?TVFt+q^SdCLiBoryHp*1c8KqP7Oz_CvV5cK76R!vt~P+=bizHeSc4&?BNwNf$Cb zGO@zu+urQt(#aFc!^r^8P&}tw?{+|K-1l3jKXo!~~SzQa;IG$(o8C zZ70$ZXCnV~ksy=RF8NK6LXPo1fh2P8-gaQL9hkkjwvbsTvoclG4|}}=`0s82O5YusG*tKn4G4whSI$JA#-{h4L#7R8d2iOLzU-Un-Uc|eUOJSwP-@{HCPChF4sm5CNa|@nyibIB zJ_6-$?>n?#2By*d>CI(z1Sw@H-3J z`qlV*eCPb}QBn)S@_yJ0;{CSUs*Pm3#@}WZxQ!HLzgs9EQ=^D{30gXpZ!@j-_*Cg( zSmmEw)uL8b+V-tfR!T#zN!=+ksV-BeV)Bi)P3fs9@e3R+bv!)DMxw`53i6qr&4P7W z8WK)}5~zaS@6QSc`LAxB?KpO#>x!7A7&6e!JVco4;#T@c@2Q)%ku%ETO8(VjNy);^ z+>hjXY7RTsV;nvCTTUsRz2#}*C=cPzB0uw7l)g!84(=oxlT-gCx)RHZ5Km-TvEK5t z?+V$Pw+u!*k69<)oyl*85QFOUR3_Y)t3a5ApWa^UE-D|75<4$W@PKJr4Dr;Nfbw`G z)*F9!*tzLvqtgkm*{&a5lx`CrGY_Nrpves3X0X%W6U(F8cCt)PO7$gGfeAZ_VrEoz z(2uK~-0{M=#{7_0mKzjpGDhE13r@br)1@1lnTnJx*b!EfQ#O105=_>ndHp^&>wr21 zNLUs-K3w&3Pb}VJDjLLp%s}Hbu%qLQJSB|WtJfcm)f`@8?C4NN6{|NdeA2|P5MPpF z^P_1l&(cTJV~2|Ptq&hT&|S5>qvFbeFsW%(?yE(rKTmgX1)wwBH%Cv6GW?GB(CbPo zXVur(5QZ0zHRpwY5K48B%F2hnuK4l^2ZEV(fN|qoR7`mZ8)GDJLK^NaOc@4^SQv$> zV0~5<8wIv>HTxuth_tmo|-m*HByjtl#uGn#Kr?pwZkm8H>Qsk54Ioz!FP?puqOpui3Y1Zc9M&DJRO^`AY|{Hf+LSe%}BIFbocd{iJ&`!Q2Xq*O%$| zXi!m;Bo<#FPwn%|)_!nTye?6mH~~DjjXy~GkZtG=a?ovJX+-XH0-%^A(7_-2K4M-A ztz4+4mF#>&LPitt=0x3b_rfNfo0gkI61e28)uyFp?_F5kIY1i^>PL;G=BY1W@upT>fO5)io$^jpmTN_YR`U#*&yHFLJ zZn|fn{DEHhIPu{8vOIuO=K|)2E2&XoB^|VnIS)HV(vW-%%J15M7o6^E@D4*EWg-LP zm83Yaakex|CY(;KO_MSoHLElv-~5|GX7|On?DSfP{-RiLpK6_fv`kRlH?_dGz(KHE5+-)3vqBV0(vquq zW)9t2x%octZx?M~!2=D%i>Mz~>D75_IejX=mBvSDVv3c_sb2^6 zYEr16&MPZO;|DIrTce3yd1uH~w6)BVL{ucgyO@;Xta>_vY&0=1)gXVx=iBep664O- ze;yktDc~RFI_C`;*%Bk9^JAZq7CfK6(xpUMbxiv_xOeH;P{)mRtR#;CCag)5df}RV zz&3KHr=Y5AOz`&mkATrFGv~WivzlFJjDEj!8{zx30*ss1;(4}{Lr6zGW;8#=b6KB0 z=d_@a>P@nLgf&xcSaG%wj<6-_%9&Y1t6uNXX91^?r52*cUxqgB_Cb{>6FRgLD?@7a z*#Qm57-Oci-vKw84Go%-1nkG&}v+SH8rI4X4eh3w5m(sl{d)OMaC^g ze~%yoybjB=ogvTGMoEFbtp4WHgg1-9jBO1-=CZ@&tvhYEKX|QqUmohp3dL)6%WkV& z5T7pN_t4CjjI1DIHXSuDyw%zZLus8toY@loY_#^;hX_j)H~Q=^u-w6phZM=+rZomd z@q)2&56O0WyX*tvBv;m~sK}$iH+}R|#IB*C%ugTxs;+{e?vFWfOY(F|GT7?A)^?~p z#O~mre;XsC8`_szD?mIs5n4KBujx7rIW;70vgsXdfoA0y0InwP%!qcc;gG9!bT3C& zSa|WSc>zy(hC~@`i3vurYLwICT$z1ny?4Qd*tWwU6M#`g_L8!0mjPv26yMWmr8sN~ z6$-CO3QKB<+`Zt1gFT9z1--KQO`*EL$b%mV!t*F*(pl4^maeFyQe;&LGQ*!axXF{q zAfq?`4xZI>5t+Y>LCMKlv}j>x_Ta8caOKiYlBQ^xUfC_Yygi6;+=e%mh@Xh0R{){` z3gPIaE&{H@XW+h<;a7Ne-F|gdYIG^5&{YD}tXD6WSrxV&Z@Dw8&ABhSqMdS4k8^{2 zwrq3A<8v{%M6_#jjpm2qW^X?(oikT`l%BUzlbv_XqcLAdjXonUV3=W>3akV}0-s3Z z`+iLLH}gGiD|FYHd=Ff+7}A-o3(BCF01UVc#H_Gsa{&m&vdALNUI<*R^}1uoUfr;w z@C381Ps|)Xm&HbFrrLg!-D=L+XfZ?>Ob`Z7e;Z#|8YV;Tokn5U>*+N&MBjuGKYIiA zwQZ~DJy;qXTz%J>mv_MwR=$)vssjfbS^YlQ76#sIG}kbgN=}@9I7oSBCf~NbxdSH7 zebq++Y1%1SlXmFOE%`Ka^hDKEGm;$$SrBoYm+Ebwh||V zCT91R5~Rp`6%v|~dEea36sPVFF@n_K}tcg5f%_#adE z7WdVSXB5ex8bZr>&5~Z;0H~cp;!g@y|->|TQW`0y_0|!tt3(u-fSY2N$Wq;69I$7u# z)jw+(-!L0cS562_#?!O;syi&jjjI%1jR1`r$zE~xy|Vb2Xu@FGM)4SFu!*2to~DO? z5M1Ibls=Y*5GdnKm;Mfb9W`795KuZdg0no1{E`$X9|B3UU1gLu51V`9$ZrV0mQ{@t z*rxh!8!Q{*!*<9-fmC^SzSTKha{Ys_7DC_L zAqi_Bu}qm1>C^ZC>0-mbyBx`4RSa37vKdh!nh3xd4e-gtVDJ`RzHuLLKgHZ&Ewgvq z#gz?b+?StN%FTAT$0JiTLA-#op&!r%YW8oY<*3dtNy6Cw zR>vxIL3S-s+-Zlv{FXYXMC`@TRy!H9<@DCcVh@#Z^=eeN(kyA zu9{5l>^fLw$?a-c7NiHhxUG172+qAd8BZ*!@G9iu_y6)C3b{PO#j%yPr`A#R7CjB9 zD^rxYCP~jkx8cEf$Qsu$tx4tS(s|P+jZwX2Xln|jBe_AG&@qOf^3(JC`XD^joNX>O zzaLQuHg;?4fZ6Iq3I3euFP7%JZU`+j|2?JWn-mXqdu4-7&97~t6XXUVtmY9EZ^~G4 zfU{tq7ckT>7RifdSnM~*ph{~R8L~m^m(e>574)M>b_gLX)TIDYBuc2fjA%(S=Dg%Y zr+7lN>iXR6qjjqQe()`}ltux(yM2J^AdOJKk{+>n(VUiBAO36i_IS!s{A2u_o`@{> zr+ayipP&AiQ?cyg=TI67kdig8YANEK>R-ph%2~wV;(5WXi~fd_MwXm%v<)tA^Tlv! zNO=D~TxR0f582Nk+!=Oy2UXm0b+?JBxD-&cFieYf5dzZQ@%C@2x%f09-v}aF%0bM-)yBjtRLiCKJgNqe-in3b21n@s}5Br_B2{SIvf{QZoM)3ZqDP|++PQnI9Xg&t9) z)TV8<9FJ;=m(&5#oK>j?edkWkI^cq*lIAq~ADXxx_3U`EL*@Ko%TMM6nX^;PH18D< z6o14wQ%t4k*w+Z3*jwbVxe>nr{9rT|W!uf6%tTz0;>>BMOl(ZiRJS92zQ^?4Ir{0c z_wW!CLfgnN1xy_-zC5DDy{vl2M(4;0)BVQ-H>NRKs_ax*3RN2VUdVfi(e}R;_5S{Z z1s0u1>ne>#V2s1Je~}$Zpp2OL&{^je_l9|4)_O?PwIiaNZ8@lf-G3XjhpCu;Zg{rs zFnmDyp;{I4i#Z!3dds3udx--Qfxd=_IX}F9W?~ackPHTj&frEWnI6%PnhE%5SWs{k z4)<`gY^RP>C{TNjkX6BVV_@&ZoEW2F=0?QtKq0M*V1{_EFb+Q}1=qvu0?pP~S4Cwd zn-Xc-1&xsjVjMc_!0HeQxhjfGXYk`2|7raJdPGXL@{wc8H*nH5wbr+<3U=l2qXJ_K zPv+gr#Fv_=oQgSI3^{mvM@6g++KMR?t#-=CM%G9))}QpRDKr>?>C+ae2p;di{2=-Ey+Hb;wqhbn^pJ8Ob(<2aay7Q+ z48oAP0P~s`qpcC0hWa6T#F7f=(TxGaNIhso5h{ESP)i(1dvpTrFgxQ=WO3R@diYjl z5b;U`8y>%Qxh+v-)61B~vRUs6ho&;u}p9k^K6tCiLgbrVG=|7?1KSJ!sD${SFRjT8*U9j2_mn5|Q z3HzHO))UjwqJVm?KbwsAhoPD*tl-R8CFPoOL9D5R_?ryg?^R;=Sm*LiH7Nz0zCj$F zJXf5u6`$R1a7VB?Gkx`Snue~}zkp>rCrXAHG-5=ixkux%^oo%_O=)vwOB3t8#8@#= z%`6##>eJVsf7(R|a_*Ipa>DDer2f0j7c7|*X@i3>RxB24(<{1SP$T%aU?I~7Og=)C zK(C0UV&54h35BhSt!R|g@Wp(4o9@<)&^k$2wjts6x`wb9Eq%GV zN5b~zVGlkjwa0&M*SgXYttNHKE~o009D`{_m0Nm(M1h39l(pxztikjMr7D%iPTaZ_ z;=dwch9zT!&vC1dEVp+1ohwbOXhVF3w+KLgKwKVb^a#UbtO@$&t)x(&3r@zi{Q)n^ z+r!>}6}ANH1H;X=b{6eB-A2IaxSv*4N3=~g@-i(b4xFe|LXJT14R_(R&Mm)w9}S15 z>sL@Gedq#Q<-nLH#elC|M5Qiq{td#D|AVnl=R|m7gFQoH7&o|S$5;9?<2VmxhWLF6 zDLB|(y8Uu!rwX!q*+zyBAtpsf-4bOEMmkr#IZ(KCIlZI&bTswz?c)Mz=1d&fM3SW5 zoWqEd${E4E{=YaorzTCHZb?^l`7L!BUAAr8wr$(CZQEv-ZQHhOot}${m=oW`#EJO< z`(|I|%9Z)7*0AxSTY%E4djGGQ4{uJpY2;RlWnr|nzsd6JZ?kG`YXI|BCy#>g+v>yI zW2e6+G;;yvD>N~`%DcILjR~9G@!2bfA4X{b)4~JM^`(p2s{cK&^j2ryxv6=+Iy31h zlAb4+6sc}3dhJV>C9w#1sfU-J^fM^(2`|_=)(Uu#muKzy1xMCVq02-ej8MXn`N|^$ zL2EuQ7k!u5`aD#Cxx|=tj-<0ZtFJ}YUWu>&a)?h2Bm&vO3J|6%fvw&!pfhPi*S{nV ze1`686qTtYEzn@?-D5@VjN$a3Z&hH^P$U>=qse012;L*^J)@2^aG4-jG9>!dUHH7* zSijZrTxqbX(ND$wyZ?ydohpoz^Kn@|Ex=u&u3CIE-}N$;DYPw!WR@YsK4-rxl?xHd z2>#PoH&Y~ zCirqg!=c30E}qrA`onjmHA;|)kY3E{ddpFq8@bzt6AfP%&D`v^WXCAsX3HY;7VEE! z&c=Q-t?%6UzW^`SQcH1%!VvG_d8(^G-jObY8)us#XKY|AYuok5 zlOhYh@aY?9XNgDtLEY1&Xyv>MbP(Y8g;nP^E0sJjRCWk)Qq+!ik)sMdl+EnVMz)Xz zqU%i{>pB?Oiqr$(Y)pAlUuewJth48XpUu7V<8D(Jc-F72DX{0JYJ*go?oRqbmQ5?Z z1L8QKW#2^x_)*ZAt%D+X8{(1pk&$u9zG#0L1O29aO>Ne0O==*#n6CGJTaZo<)7Hw6 zb2rRaVsEggf4}@7Y{+RQO7i($cq(;ZbtAC2xC8WAE8?&K4p|p^7C4IY&p2BoQuN3x z`#MKM{)8|Vc0Y?RpX`LnCK9Ke=r{)#Z!Y~AO!1dJH8{t-2RZuFr{ZWrLX7F;8%pl! zP)0})@J%qj{+%fY(Pb9Y_lJNySwIzXl>bOU>PSJ336+UE)hyp-)M6= kgj+}1Oz z4vu+>E3*BhKi=qCOI3%#CaGL{x;V?SelQ60t&PTvd-d%Gul36Pw@3>bQX$Kh{ZP5t zBpa05A1#Gj3K4V&4(<+SR!UK5`*Yh8pYhMqK3^U5I)k6U`c}cQAa{iMBVjQfQw;II z9*xy>>~Ry01S5B%GgBVK$7|kD{Ve;dQb}&~eco(|6GS!o4aq>Hvi_g+QQ1#OJH_!p zus~>oE@wID*1cTpj5``&+qCxA6qvk@n%!z7*F(RJb7rgUO=Eu7@HeXNr~F;y?@*LZ z$MU|riHBle4xr4gAFxTwtBe0MOc@ydfASaX|DOGCwh99a8~y(+e=(G~W4rnHU;g5y zcGvLZHx2thE}tFWb-*K_iO+6Ka6=I1klEU{D+Q){bMWor1G~EE0y+GM(g^xiKqI|? zCj%paaX@`S2E%xb@&bJ%k4zJ*&_Sk#^e9nlhC+ewPoaVnTR}8_KNJtk;GT^nB?E@; zuWM&Bw5U($Y*AX*{7|byA_@i^@4rQ($3clzVfJetg`>tygFWQCZOe%TX=cXUATfs# zidF?pcM;*UUGNd=y?t_hh3+W| zEVDpS3d?|8hjn(z%Z-FO$t#VH5MGH=;PS5z6U;)C0GFyPj7d~C!L8Ei#r3LfXl%%d z;fE4R>B60II?wky?3FMcWXok)FioMd(*@ihY=*D{Yz3>)o#}R%0ZO9YyEIN~mg5{uu z4RYw$D3L*vcC?eD0lMGN6o`c+=n2FDe<4$h#`s}^S{q+O)#D*k3&6Oepx_1nAZ9lJ z6HlrP2duJOjxmVOxV0G1G3;s6ZRb%%F!U2Rh1DENy$Z!=NvU=DbZPi@_E1}VmDFU~ zG=JNfzf?1|GIduz0kcK+Mlra(p+;ocZH$HCZoWOdhQ=D z;ufioRpHE?y^i+y(x~Pwd(zE_jn?Q<_FcE?DBx9nB2E(dXy|xtd_OBi`8Mg*-{r~^ z7hx=77I)l8`O>kok!bF*Y<)V4k_va`&4VBd8I=M|2-J&b&}C!gYc0m);+20?AQZJI zW!i{f*)knLFhmrMg$&shtf-ZUV45bTU^pZkzgL`ro^)DeWNw(~PM;V$7&#AOIR-gw zX1*6l(VT*&#E<=n@_7Jl-cmYIRtAYY!Tw3>Z0;Mp)?V(;HA@^emy5*@;(q=TRSC(aZ&EVI|guqxYbX43g z7nDKeQ0_K1GR8T{58E()I_-8sxIXEr7ai6Qqi-fBCHs>xXimwI;F{s zv(cbB`OHsH{;OIK*3_35XIEttVF%E*qQtDMf$>N^4{?$UI_R+LD1W$WBkWx!af!jx zxc=C57S&=O<#vk~K%=o&aM`Zc_8LPUDIXPg0D@nE+WWko5yo@`r$3k&9XGI*DvvGk zFq1Visf=9)vDY7ah1o$H`m7-Rlhd#Pt0p6`5;K~cYmXF#D;tw6&@?BGF%gmq^aTSZ z9vp)WVA1|bqkSYLW$)_sD|bSE|79f!1796S8Q%dW}dw%uy5J2CJY!%@zm z<}VHFif64dn(>pjLUstk)T~?ddxG}G`27hG_DoMxPtX9)az>Rj@x1o-n)8s$69*S} z&eHL^a}n`Tc)snhgX~n0hB_1yAbcuu?+R3&Y%64;DB(n0O_2(3gy5^)!rcj%X}1-9 z>O|=X1d87<>om~9(`Q6FV?e~v;lw#1>u7)5?Idc9UuQ5bO$x(h6bvLaTq;mO_QQ?_ zhLC;(c}Feqb!a<-XlJL=7Ia|vGmu=nDWo^1{oOn4#*4e^$?8W!XMo*i5j0epkO?q0}f32qA}g|8+lx9-}zX}EUB5B>$EZCbIX?Af%OTDn60&#;3YI73td=y?mwJOxgR|_pG?g{b z-h$=7uCbssm6GLX;o#%}{>R}-*&g!S-EN2AE9_u_a`N5XOLrHsjZR!>;N_H7$~CT3FYwtYwIkzBNkV?fkkrn;I>!EpKtm3NS-!(LL1Iz294 zyrD&bg9XajbBForSR%@NI*a}nlLIMcNad9od4^syGHDDGgK-IZi8&8z2lQB^fa%kB zfw`?jN3P@X@F$_NA0esD!dF?b-YkAma+>bt83fN&Et>Sgp1SaZ zU_`4o7;gKwboKvA(1_m^MnJA#bZlKO#Qq^Jk6i?|UpEpQv+g^~F%xT0PgNF468?F3 zbd}zEs(p~zUc|QXKpRZHnVGZt>b7RV1soPE-|TcNUc??rmhyO0p1&*^t5^H-+ZJCx z-#RsJ()`+Db-gMXEq43+_~OuA?PJqjrUTZO5TWkUzpYArt(*ft<#BX%CrADV1(_U^ z8*9OCGW5WYVyGaL5O!pb=O&~OH%6Zg?&~d|m-i7ZCzXH{Qd59-6wF(#l$QFl9hNHzo*3mpa|SIq*WXQDpUOIUG6 z8Xt-c`pMtW*43caT3M=g> z;fKoa(Oy9XP#6f~h?8z}_~0V77v9u#$uar^%otIkF%hgPPxc}_M_TN7hk5Y3F{1g0 zMe9gGqZkT7xnHdY_^1&K6qaFBKz&S$L?Gg=us7>9s36C_{*Upi|Kd0W$TB}c_NFPiS zaV!D7;S*G5qV~lq?8}4~a?hvdGbE5o)7$^YFYvGY{l^dQ^oCF7H5C4OYjlwq>;Gx zM_T?O0a;iatR2m~6;|Pqo-KZL_fGRp4Zl~3q-cq~G$5|dz0_DxYnpr6sCU~}cly#z$%vVQ? zw({!Ap=?+vSiQvSuAnnbgaT^4cqk0&KJ#2)xX%)X>csl|yk&YNtEb_u(|$+k8p+4{ zI&-;Q;flf5dr0`LVozyw$L@T}n-=VyEnNbO9LA7BIE_EF+?CZuerXmOB~nkNdPR7H zfb9%hbHsFx1*^Q}%4EaglNCQndrGiEldqU$vLJ}kyL(&7IWzU@{JVd!4{a6+3GB@R z3;}kD>J{p@IhE+jl2=CqY|mk{B=vG;vi3$pvtz1UJL0F>Dl`g&>t_Rngs#7>p30QA zG$<@J@HWsRYi4KG0+}`~TZ6TgcfJtaRmfuC!sfo8x_j30CT4(x=bS_vCkpx=Ibd*Mv5~v;_;?&zru6z=$r=D^}kxx`$jg*dqFaeoOE``htkVF1k z)SJ^?a(bx|B!|Qqy)t~RcYw%&St@3pYVnK+P{@?N=+ba%-vDnu=gT>3g>0dheY0P( znHrPNllKPy_WXAu9zf^~R1Vtx2T6`#Q0aRPqGP2pn+|v`jz}iWNFS?smK~c!Cr*4g zu4Y_m98?VLP^g5MM-jnT>&p(6N%$3-i{5`cKbtK|I<%`X(i{`47gjM^3FK}#_{)W~ zM<$U7s=F=CM7}O44j|Z>S67sjOWvJ#o1u_`TuZj8FmA0Xhz#_Sq5<*}6HFMWW&}A4 z`Oj1aCgwl1qaO@&EhS3EFkbP~}z9;5@WaPTim$x^|o41P?FpAFVOgiwQt-5E4C zsih{T=5mK@UpwI)j+8@>Ba_b9;xuCK!pp%Hvd zlz%T^{zlZCgdW$=xR8m;3`>bT=wDA(z9J;z+nFBh>-+(!f-vR^UlY-Y6}yr}sOOkZ z%-gJo4?c6A?uS9dnS|5Zk3txTlpKx%zXVjk#W5}+he@hTCLA4z3tY$#qGj-^Fq9F#Uo4#wgP2>C|o%N;cgc@;`x{4Zcj1?Ju zQxqRzd&xaX09x05*0izQ$-MmSl3>%*;a8J$wV8Up*t|Gqc)zU{KpvyWSM)AGK$4s? zVx|pHx-P5}vZymBYnb4Yv(>4;v;!|EWFG`DFCn5R)7WRhg|(v_nS+g6HH9Gi)<^Ll zcRP9-k-2VEk$ZadFF@|+7}7G4TXT^d8-0&4oZPkmogU9Ga@=; zJlH1=b_D1ucnIH72V2*A@g5NeLjxjptSNOf7wuV*y^HM4TiHFz?CV)ju+6o)KmCV~ zTEu2TH}|E0y|XFXhSK2{lFUc5wj>B;_V8y=UC8YSU9aP^##}TpKT-b53GCAsF96mP zgWY8AGWHKHy@jEaIL5!B=vf4-CU)KT%_*-3v0rmq1RF|-C50o?+-)tbgvnj1uoE~D ze+kPfs>dieOIe{ZeH1U;i=Nez)ER-MMD^~w{-AZS5iWRT!7g0NyI5>-Tz;}h?hgjl z^Jt=`6zP|wgB1G~S&wzMES2w40`3M!a?$5oRAX24ykzbI_8EPPg0Kz^E%as^Cl)+N z@=XuSv`q^`(wL$WABF_065}a3c`;dfO$XolV%VWINb9Zw-GUSk*_7hB?vT@~Fcj7y zAQ&pheO!yYCSOJ`Vy9#Ae;mg{<%Z>NBZ8i>p{_bxIo&VdILtQgv0`Hm)PhMQW>CWS z*^tMxu_fQW*Q-+W6VBqB)e*d8vAj19?ylG}WIYIKV&>C4V78=vk`}hrqCHy5j|lyK zk$o@J;7*QlSRm&X##A_)O|CVNWC!(UHjazdCCyVHbDU>`x`b+5+vZq`T8S#q;z(kr z4C8hI8+%;~LIrgrc^Jv%437xRa7k@pi9lmiJRbZRHRyQnx-{f$@Jv16uX0NBAb#*r z&ok~^2Idt{?_C*kX>QN`CJ{0^X*B|eb+Dfz7v(Cr2H-2b0y6_ZhhHg^R zSyk868|;9Vd$Ky=woXx=AkS>quwB320LzFp^w8NieaU(sh|L;J_e*nspul2q`8f)p zr%#w&sIa=|G=H0N$L71NF-t{{3@puzral=8hS_GD1onwl;8LQ9xFeUg*yMsUrPm!8 z?`Fq?b-?L8P8qT~^K2p?EyLcgjF^1ISJ~(+NB4Lg2eL0tac9rnUJ~9|Pxz4f28vry z+VC*R#K&87ci(6epvvQjS#mL%SRVP4#&D^+qivvQ-BQ`)(nsg^B+!1RKdDfBQuw6 z3D3HoyzD8%-3w%*m~CL((8^hY!mR2R#}8tGj2Q_cT4UsO-^3oUq=K5f)Zr+`F!nra z(V-g{T&c6Q2uaZ{j=5d~ep2yzTA|eeb)nThR>>mHs)-gY&u&00)EB zTaZ}G#8dbP_S7tx&x2mkd|^{Ooc{;KmW1C|85dee=N!pwHHz$i8Rfe2(y zSAK_X>q_P*G^mH1L>p^!1kl0GI>@$s{-S!y#XoG?z(zpls7Z)ETM#&QhiowcCp8$q z!?Y|?SY6^mn>W}Y=@5xSwEGgGAf#Zpo~j@fY%3~;LRkKQqZ4hZBp*D8Izr}dKRacI zL|Sh#x+Vl;w^>OK^x0mEacy(%$+wjXN^_ByvEa$C8~me3r?hTMTPs>sITBoYmH{)^ z6+P4tx-xf6&KX`g%P-O7b}@EfFoZ@n8RFCW2af2Zb^^&LDOVnACun2D&1S3q3AZO8 zKH*)z-QaK?(?lF0iq10-)ssmi=&!;qcD+umK*}178P!Z6F>JBcUc-=^P0@anOA{3j z9eK5H+`?0KoLlEGIe+hxoyY^fGZKueRbvj0SxsIj-Y|~d_LhmqzJ;1pkQ;KHCZ9Uy z>1At(;Ctyo&w(a{F_tkI=RQqcfQeaV{zDz&eb+Moc?01KO5qE*OU zZS^6Xl-s~f?6f7o>2lT@GI;>OzD}8dP3BjOH7c9m;6qC%jvQM6UCUbuls9ts_PlPu zgV5!^7mN&u&|M#-YIdaoT@*l)MKPDZzXpI{I#e#zbuxK+w{_a4D!+AtZb1#ATF8xX zCnBy~d^hr*BdmvzADbhG zY1R)%@f@PQCQ00JE0%e3cH|*~T2t3?q9pB$BvSBXI3b~ZuX*ohKP&|26w0V8L6Bc8 ztncW|fp6;^Xm<=Ng0nP|`S|e zcSkvUEu6n_r}l~Ir*$8l;r*Tv^iH}q?`N#?VeYw-?yZg@Lu4Y5zS?A2l4^5*D&m=9Aw_B3LTKZxEEFnytkJ7ib7pl#L*p>6JU-a`62bhw$XM^0leBx*ff zEk&lKy)1HiQ`#pGuf~nVTjm>h8&vQFlX>2@j6qeI#VjUzS-jK9x3XCu*?7OY7 zBaAF=$Nd_Ca`0?>7{PLs^5rc?)!z9ES+<1(7E3 zRS$rt#)-a=RCu?FH5ac_$ZcI{uE?>;g!kdQ0?v_K-5LV1b6ytb;AaP|X|2gvZ&b9q zLpob6Ek8V4P5o3E7VHo~=xynxNybB^ukGfS@(q~SCq!s62Mo&^(FfF(d#@4+mi%7S9$5CP~ zxr?2|(-p$qdKEQcND_#?b|6LS?iouLZ#mFtTl?mD0SEf01+-S*|KZ9V#mwqVhre#Cgg58C#a%M{n=;uL3> z$U0-(?p5vll1E)rUPR%N#~poeLUKokFRwdO;Uo!Up$llLc{Ug!PC20s+oX-Ctx z(-2sL6TV+k!cTRjwX)P~gyeVN-P;DREpOF6UDBEqaW{M@x#>*Dax&n6OwaIj4n}xZ)=1$ma^T_ir_gEMhH+QTcOJ^ zeCs~po2o`d*+RJ@2w$H8l}8M6z^{EWp;1ANp)@wQ?~X$c$nO!`D1WZyRItRW{T+`{)^u| z{tl*W>8VrM)$my)bCF3>6ZtO`&n<**NA6Y=!^VAN?eogNM)7{c==eP2euQ|xbeG(^*Tz)Vl(J2d zy^8j{oph0W)Sa4 ziTKE=L8jFVW7~rj_)FJjHsU;x@g5m%8;gcI3! zY^|nX$1og_P)(i;lGgk=Ei)Zs|IB1jb?Dyp##18I77!{^%(ms~mk)cWEWi02 zWi^T%9In|)5Npd=`&%YU*QQ;58KbvnwB^in5n}e`{4Im~)^Mw|nDD)GJy=OE=IJUs}a#4#@B(!lJvrwIWhjSfh9#Qa0)-X!395`pt z6^6Rg`FejmR7!hb6(5`)Z({G*`dhwT%m4|K7?rhhNVX5&y54f4z32Vt*p{}t+x_l6 z*iW`FxK7#;ceYrYsDj4ZgN zv`9hon~3^&_Iv_L{B!FQWY|%g%L0fg|-c z)5lYYuBy8y@w}dP)8`t+jkURO_D(#BiVY zoWm1PmdQ%$#=Y--x7GRWxyBI1-!=t%pk^?TPQ2V5E}F7*0m2?Qh5IN$33{(RD*zZ8Q<|vq?vub@dJF>7*0ArSZS};L>n2;r8@$>>_OorZ1nMWr)XqBte(R|8Wy3R!rzW~h8wSccvd&~Tkf$A;tSHU!MBt`+Y#eT0T4#0nDkYgSIH6qzv!#0 z>ZnUke~acz1x?KCMtz0!2U?!evBb4lsW5TV(ZYfY-D4Ki*~yjetCN#F)!NtDemPdU5}~zniU)`U*1jFS^tZ@Wt4W4Sh!*P zTU`keqSZZlW#ShukmHN8uENOeQiDI5b#&tzkVelSi5@3?X)Bv=z`rFh)^M#^{PB#v z!fHZ21Q#pZu>v5o%*pX!gtj|9jh6scxG(XG4#2TTicYY((7|H^_XmvtcN)M756USam(<`28S9CXw6J5Go2}OQf$vVM z6^UI+Ooi+E;yN4-cWJXW{f3>ThtOm_Swm*J+tofh^=E>0_G-#xcRED5t??VeCFF8y zN*kEiC^_Rior=M|K{u@Jh!*@&t_Kqe#0s(#;{>YMVuR_itcVDY*7xpZ9<0HdkXDaU zcCm3L>zFYc%&R7m&{KzQ6#5mk#|t*vd@%%K>+^XW){D#$bwn`S9AqvfrD~3P?vyW( zOcL3mw(0J4q*&p!le3&zL8tI8zn-$e5Z5|W_D#Mc<0lbh?f_E(%9DkpARku#SqMKo zf-Abt6LNzjxnZhbrC^o~M=Y1fWir(5K2OGKElY94e zV2j49O)3FaTT`QNxhha*HxOD^kYtep@T>VZg)y!!(od38k%L;vD$?eFU8q-p2B6E) z=Rf`F_el{jTtD$gry##1-<(|7CNnGho6#T9@A|;U3((QkHmy5TRFhkkUsY67Sd~-T zXl;CiY84Oik>0b&p$A&eA~6%!mtI(uUQ_QCCZeq?=3?dq+>jUNxk=tR@_`OP?ZWpD zRN=b0XKhMF0UA#YRK-B7EKO9!+<`h1n`cM;L3+h$>@OJ*J|*sQ`co7fB2tl;ljh|+ z?bwm@rg=hp$N)YKoUT~LtGvXBc@2Ja5>8ZBvL)Mp5gy5*MT=68@!Ib-W)#`oCcmx6xPec{~)6>zY^0r8>Z}Q}*-GimAwTCIh8OUSrh( z9DTM@$WS?YBmv?q>mzqCas8ef&>NywW5rc;6t8`SfOUV3Sw_6p>Rn`&SuY-^zExUT zlP{Bp9yN^BUD;%&P%8u5oG3B8?)5az-&NNgIqu}7bGrv=Hyh!BO z1oRkzh)BXR^)s`RUceeK1aM3&g2f{k@`mQ zuW-B~o-Z#lpc!O}<(VC1Xvh+g266tIf7RsNWZ!h z`$(I3N8f@3t68_#tdFpHZo`NCf}&(*DLFnT%=R$&hV;nDjQqaNi#v&3)O*dljk*ru|9`naqewM}5r|p)eek>cNaPOBFh2A`P`I=3eV{vyUJMAJEG4@-2dW zh6eQPh1^LRVwsjP-7VbP+l!fu)%IT*dwGZw^E^Z;n%!{;rDt6-z`{RO0#cHG# z0sF&_iwBC9@gspL)?3SX%886*M8@@b>m&fkyrjni%Z&OpOjV?b1Ehgj{6YLSQ8Tfe z{kM~(Ek{C_(C)X-*yL(SAIU2Sbg?Z2)r|+|NjBQauU&z0HhwWzh)7rR$5v3TIB(x* zmI~-6OLSsYDfJT`DaFEM+t61Kkik^#7o~#7dn%=kU*tSO-J$32{rn>(ttLfJStv(X ziB#@7y)IQxjT_D;=IlXY`m{bPSjg=A`-??Zzc2}_)R>szp^3LQgky2mSv@aVNK9BL z#|7jo^4A|U-@Kb#1fJVPJ!enJzI&8asT@gWE^bQa6HG3;ZT_fkU2Cg#j{Q>>|JGw^ zGD!(a1pPv$(AIkN3o~=#3gK|^aD53)EfK3=+fT7hQ;9_KYB4 z(xA2p2Eb6^$cdo5!rqDiN`QS=gaN7~LVOG*<=iURP<}pv@}mxdC%2fNLjn*rxr|o@ zK>_CKW)Bj?W#ta?rvdV*mkWJWu?qNdab)%d!2JFaW(gLmG85wv{;ju>Y@%@!_ul>Y zkMk}VHt$#`{6jIB!|Zur4vaKbbP=g2;aZ?5;W{T~cvm3;k7R^wjL7ER$lV;|*_H9# z*j$(iOe85i6+Vn4?g-rx-z-O!7!)6fjTVdo5M^27gfJk@grq}>yfh^Yd93&sJZc2f zL=M$a&sa=#8pDr~M7mBc!_$qonj#IvZBR*g>vw0M4r3J;s-uqdAre1mzh}WJwCDh|XWF3! z+RSCOg33`N&t4+v9a*5vN@Ko<=xvY=@v#`nkck>kBb+|YWu%OcsUKqH)|%TnPFLy8 z_p-T%2G}7q!3V)BBU%TUkVz916IyFQ#bl7ZOL?!NI>1~jB&h^NhTc!O}1^~96O>nKV5 z40P1d#8LN@C*t7lgM%VfTUzqhVE5=n$qsT0Ma$;!D&A^u=VCk5-Y=B(`rdGCFfm|G z^l^o8pMh$HBu0vjxU>) zCaS7@2NjiLW!gWdkL`6cL|t(PcHs_W&<*W_tg296?BlZHc9d2AoDB%U(tazjoLPRP zK{(ySnLiz_o2`=$3?u#VR8YbxG5zaaZTjw`b1?GaX=!_2%P~Oj_Hy%Ui-IUBt6>D5 zPF4EZKzI|!Qi;{F@a2AsOroeK)3QMAE@tB3NdV4LI_o4;{)VS7zH*>HGF%SX1%&jk zX<<}x5|)|S9CQTq{NEaE@x>ZfoIW6iUF?r2bKVThYu9Vjny~#-+VZXt4$NN6ZdiV# z^Lk+scF~B!#cD39cO#$Q@d0?%s|k@99-FRA!|2#q^S1}CJFZ^um2MWOSD6izf4$I8 zy;!fj2ALJd2W?A&>T$R@dnau%+E znM*(MjN~Dc;Xgx=9CHA($R2`fWPAh|m8MX$O_0%JWo4t~@<87*k$1*Sd2!iov+06A z&{XrWgotmry_rNWZH`ezQMZSG0oqkeuel9}NJtLf4usdF+ z-e!6sea7HCi_uRn`B}NC`|Ud`V7|`{;};j1ukU358*x;sjR356IYSccRjhW0dIl%X z`ePKjaiviC3Z7RMs)Zn|)=y|CbmGfffimdgWaESo`y$%0LO-Z(LIaMM(bFgYp;f8z ztA{^tmh9F-uaAaMZv>a#IF8|A23)E5M1xitCKc zinBj`YNZ=$Ga_|G(TT+`{ia*W}apQ1MzZo&bCE zYw{%KozYNp_xx+}$0nh-1E!_rU?*Uv4Kp#&-Q5g~uVK#=x1yP0_ zEUr2GC0xwq2ll}(SIoH_(RxX>9-ef|svA=y&AdR*TFW7=P|BX{ zu`x00D=bk-5q<`h(#d5wmAL+j_P6an9JIz)-a~J#+Z#S|p-86LvAcQY5AXKFdGRuz zSto?9fn995+-%`33oYqKR^;^~>dg>4%Zdb>o`kI&a(r21rs1;1pSezRWX*8J?W@@` znX_WvIUr&DukNBQxW%UI3NTi*alY`7X3N;+Pwf06A{$T#r6ZYy1kiuY_E zFMspyuHp0CUH;abfjjEWv#iBk=S0tQk_FKn%JGG;Sl-2pxqCWza}~pVx{1p^+Ud)B zb+sF4d6*XEpye&C>Z%n<%vTHqKfqwpib2d6lw^tOR1y+yt4ZwF$wACWGAC=C0Mijt z1(hPt_h$0=&Sga0)#%0rUSDI%VVh+#Ht}ngXNA~!Tg$k}&COg=d(niY?(d|!kr5-+ zDN{xM_Sm0s^d@PB-1@Yx*~?AVScK=`G3OdThewB-B$Qr=U@*x~4AnCWUJ-b1;n`hOyE5966SUZAJzSfLxO=83)^GN+3{$MHP%Ho-# zq@&Kox&{1oH6K9kYD2~W8C#TmkxYj1DPPswz8;n#>!o68)pI1H>Mik}hMA(z86oXB zy*XgNjGM;}(Ehayz^HijHmqLA!m5Yq#SrPGmNWyu^bedF6s3_l;{vX{A(z6+no-KJ zUD)7<9V?D4$w$x!(nTZS3MSqor-G8z7l#2z)&ZbdFj$}elE0J{KjAM#E*!h>>*NJ~ zGeruC8@1R+i^9~vmt0dJ*0H@h(f2MXD<#?vMkbm9$hqTx53{SXZnOH3G>C7 zN@J9t0oJO6rkBF^m7O}tl-*sb8M~PUG_M|GhCCO&c;IBcQt%V#JAC92FG19)!60)k zUuMROs1x2B*lZR}j)TWFs>}0aq?C+e!FOI=v?O1)`vkn`N1wB(l@4pN8_iJWaJ90{ ztj-jKg5@Mp&MsOS3mgX&2y5j+{c2wSK;t0LTjUqU&AMyhek9hg>GzPGF0B{tEF!b% zC~F)50!c7V%Om7HjSv-UzPk1f6IpF{idDY?Bel`UtQz@kDc!i7sLQ(GBJ0j8Twh`& zUtm=RLdLlPXPa|_13Ju`8ezV{WyNBF^Y>oLDurtuEw|=lb``sOhb+I>`5qc9-9!vL zlv9cABcPXnIZ#Ko+&GA?IE?o)Y?r~%j9QW0u$2re1Z!fqK${NTgua3+%xcQz8Dh-# zU~sc2Lf6!-25Q%9Lal=QCkB8WQlDhom2kZ#6W-JXP6=-(TjyeO>J5l#QTmPS(iKza zw&Dn-M>amrE&I-Tm)>TK1Qv|DR2hv>NcS7VsWK)eb!iYC-_93pw>V4pe{LaI82^V` z2u4=s|9)b=SwlQ^V;G@(rgoP)?L`RvV?Q%%RFXhFf;k4R1s=W56cQTLjA{4ob>*b8 znBQ+;?jbT8x&`}IdJNWWYx?0X=rN6RJY`)yLs%f`m# zpQY&<$POpXs`KOYFjJ|@s@kI!jE-p~aE19MoM3%EumsNwmWhq|-3Imfmx={`^Qta< z0>-$*-S_FHa(CeZ+#mNiy-G7$dPm!~km%)|ll=@fIg4?&jU2LBN5~ z<#^WB1fhq$FQc(|_IRW#gISF(Y&wTp{Q2=UheQ{rMj}Oz zPAEBTV9CH@%+zwsQWxT7A@)??`$I*#?r>YxyDL`9S~@WI0JnlxvkH77(woXeD?+2Zz!XzcpQc;ERZF+iH+}>z6p?a9$Z(hF22vb(H^!Sk5 zfqB8rdowLhWhP}*xHK5A-rILDcn(q`Y<^BlEXd>oZf+75$7YoK51Lo6dzbrXQYU(jSuU|%-_*Cx@hYSpM*(C zCV@j#!K5%|qbNbu(=D?2kdl@lrYU-kUiRo&d zq84v}Swi^2JIcr1#G2|;E9YT4_@2iP)7KxXz-JbFpW}-x?c2qfue;^BlqR(vSUgL) z1-5si$kESuXWwc&FT#2ZP#O-#%^G|K8Y>(aTGRM&6BJZ_ATQ{x3;~mu8n7X8dJcQG z0X0+$8(;{j5c1d-n3*sMyY5PCNHExhFIn$<-HZo6Np}@(RCS>xk@t99`0CCTd1G<< z>ZylWL{g`zqoffGtB!~B`0(wJQJevs>FI%6sOQPA8%5igUltQhSq)Jf8d)|IWVJbk zGq&y{wXR~u7awW(+KR|SdWHK(Wp_mgidi@^(Y14o)i<~-;V)+&go&$r;MPF1jL#d+ zcYj@})ko)Hyd$vowZxQ=D(Y-yERU1ohPNxv?n|DSUvsALo$D4ExXX>(L|Emkmxfa|jYG=+Fd-oe>0`cf*(?zyA>;;+&&NQHE^8-B%KwaQN$oR61+(T_T^=&ccr^KKu* z;(;EfnSF-l;daRixov>;G8;`*1+AYz7YCi}iy?x$!~67iJ1s=MrQzpaEEk|@kh;0K z49k+|ey8jtvOue4qd_etGf^^lH=MtewU;KsvCw;L=U^v}MhVf7G!oKDb0IGub=Rv`z*=F!e zI6?)+D>{QZQ-Y(09xeFT7`)OtW+M}qZ!k3PCu1a9Pv8r{>pt;bcjx9nB=2Z7Sov#= zGoGvIP*lPBHR4X)yEgre#{&zctOFlB(`kB7^5Hm65M_~zmTZpa=LA%Dg2C#V2YulU zRG(l+comwHb*BmVoPm39I!17H2hDb$UWYyt5`v}`S?;q77 zA_ZABU}UZ@*)+lk#@poKZe>G0b@SRLqo1e;%MC7CIgiB|6xR?a+C**%qNKi*Hi41s z)JzOl4_!Y%_5&V!W!1gS{AA4^oaml6CCmGmLV_qde=Y4srIx)QrBdvV$55~L`u6G| z!=Ks3W2cUXh4Z(dSx|F(q)0>l@U$B1HPU?{J3vY_zupwHBrX9iapgp_{9k6lFi>P3 z{=tb4hI52UoDd4!INTjOtuP6p;ft^Vt%!x3!u9%}nZO!&`RrSDv7Ja4?%+;+w09if zZ22?LyR1)!E~mRMN=g#+lUO3Yag)mBvf2+zK^8{s+OM;#9Ctt-2*MHifVv|~(QlE- zL8`8YT=IDlyT^u0&|gk2s++|O&QQioSeMT^NL%sz=Zq>k;<1mqNog7;?M_+0*Tg6B zLm~2|cAhg{>!Gy5`;!fJ&YNP8GVxr?So9u<^g$oMwHHXqj^(%c1BpSTkNIR5ed zM!wk+;N?sVrznoYmOaLFQ@5&4Z~7p*lRLd%aA7j3fclZ!5Y-4s+Eh)s{-ilfu#nv_ z5|85L*gbc&T`?tJHVy4|0Y@RWg$}2N*8@pG`f8`1CW5eRmaHJc2vuH^5CgZ8xFY3I zzND$iDQ#MWBoFcaAbWsqpnE!EuF=2&L6S6v2Q-|Rh%mJ4Lnr?qi%!w83JsuO^8D9H z<>u+Pvdu((qku?bE;AjVOC%nLTK`Q zL@Xv{!Cq#Hj|JlzB}jD}lcg5ISf!Uxm^7y#Pl*Wn~A zLZ#&UG^IrCvJMf(1FllaX3CDQdC3w9`@J7kdjNXfO90zC1=VBdvSFgd=DMU zrt>sV$(9IK&T|=zNl$?DZT#Zr>k{C5c_{(?q$Ao0@dphR!srN{Dw~z0@)X`Qae4Qk4!Tsl$ zJ=!HWMkT!Sielp*@V!mHkNwYkQoE;{V0jZLUmzhd{bka~2gpd9hn(AwgP5EcixSBw zxW=3UvaC$`XHwS7VJ(!gx;yj2?xFf^Lja6ZmfA;lh1GDBULtCULn^~DxxUi4X;2W7 z`{CvNHNJ(f17)hT1X9Di)Fd72BC7dvD4ei$;INc(yiNQeT3p*Y=0O}OiLB$gI4B^> zbLNAjDucOJP*L`5j`dlC90p|<^-;=noGv?cq0?kdP0Xjvp@+Jd4dca-!ksTmP|Hkm zKP0;;lAPM?nYpZS6x{S>K}j&jCfI@Vaq6y9w4(tS)Ht@#2462#SVHsp>bO5hj)1MN1)-xenvpM@x$B+t%P1z6(W(*3H_y`a79q6~{gyZel&$TF zu6*=L({Sd~|M^*dyFO<)fr%GK>>TPb{7~K8Z4Q+YZE55JCc(JaWt?BCs=YJmJmqbq zUoS&$r{InuZZ=E96^a4vP6ug{F#42kM{K`)zU>;hYlreO4}r|^+4*TSz51Ll{1PiX z83_c98cKOyX+ey7YyYHQ`EW}kjoVlCH;X^P!d>D{4={F>N2PoaN87V<_Z*4Lba=L- zM0xDkTk~%ax#Z@glKe>I`Pj z`0Ccis28H)RIRSFoQFxLnRUf;Oh=}`)L-@LuZV180-8m0rrDDXrq~6afS%Vkke-XN z+YkYeGdD|^FdYkh9G(=O@bK3Ida~suSv9R1!u;caL?Ad@kk!9fg`{jfA7Ptn%r#Z; z`z#8Qj~>36$Oh;kkk+pJ&yvR3F9iZ9#o3!ot^?^fzyx|Pu=rg9Q`f;MK`wozQ;0cp zf8hJz?2*rR>);%I4|8_45`siiNeiMs@j=kMY58W~7vGr&x@okC4{NjJeLAry_pMIXov|?rYMnqQ24h+hk+paobcb{hh8Y9=RwOI2c8?>&}=zEs)SM@ zB0EWDA*Z;)?Qgb1Q4(iAhCJ>dkYeGC^Sj6m>~}r3@7=>$4H}??SK;;IC)=-IM!6Q! z*%P@)MzTT(9^4yb;pLl2qZl@i(N z;oZW-jSk}3C{mY59F)CN{g#REuaPqlx@?*(13q`2QZ>EQ1*k2aO{Tyan-Lh(5_ja3 z@?0hx^?*Yq9t|DL{UWzWJww|pl>B>=yY z#rqu7I`R0hQzwqh+E8_cCX$LFF13AGwN;5PFw)#k_mQrBje}32h#-tK>K>+SY1642 zW~=q_y}xMHy|juiPDeDcw{PB?qN!pnkN2{#*YV2jvW~RYT=`ztrTh1EQ5mVm*L`qD zsd+c2TqK)5Z1H&U+1oITh4*FNubAFGOvAECZDg|7`?t&US+`alBXygrXWWQV8{_9- zs-C#FDx$GCiF7|VmnIo`+15;po9mReyRm8B23 z5`;<9A;pBAE<*q=I!dC$AhXpy_K}`*J>|FabqQ2bm@NL0IncVne5sAPk2XHXp0PrJP+&0qdzF1&2tM~izNXCp6kKpOoK zeTP$@OknmP(>Vk?RXMvCz50j_D%P;No@12GWWRVOH-9Xh9LSWb35#oVhkb_)TKF2fk8lUeM?BK>@U+r;Bv1%tiR5E{uNj|&uxQ=v`;#pKmST# zSh~rWt~Thnm)w;Fo^JH+5m{37k$h%or9uA~VOo1Ex(s4Hdp~KgnJv4 zRIcY?779IP9dM*{q<^t6mh@=l+nY&cR<5z|Tn!FAfoCD;-A2V`7YqH)q+^yL-$`3* z_fr#r#vWSx@gn2q0($$I@F4&}wATC4>D9c^{0`OmzGrEsQm-8Xo1JMX4uSOMdbC3- zwJcakZH(D)o2yeiGvBzMOBs4Xh#Eo1m`tg64ZgKw;&SvNU24Zg|F!>IjBmpc!&c`5 zJ_%nGHfifNk*+QrxqDKaZG6tvWY|4z_L^koUy#WE0v^|vNMVn%;euq5V` zB+w%wC#t-YA_*E*u_GxUcLcZF6|NgSQo|FjE^6zxjF5qx2Awv4*%ZJq=ok>XIUYSc z3DOj~fCZFcrEgCA+q*3uYwIXH~6+v=4ba_r_ouAZ9v<<$91~9^CA&@KsHsS}m!jVbq z_$4MFP-Q`%1z}3BLboxx#xL0TmG3UqBeTy*#XNV7Hq= zjXoicctiF*fDwKuNj&S#(mVyCn-YanfS_~{kOqbLL3TWJg>k=AqA^!_Iw2Gfy#6xk zs~!l;c{H_Uv+6B@AM2Gf3psOon1c@r#RP!!c6gGckd!4}zAL`P`zURi?#dhogg~{(-AvpA zG?tm9DDwRa@Fr#y5k0?-91L}I;@#1>M%KUEnG{jt+r5dzT7%1RGr=O+p_5w^&L+KN z;jXs4oPU`f-w#&iiM>-U7D~f~Ee{+>fKoeCPp}sqiw+=YCr$$1Kl&}2d*PG@13y#G z;7f)?wjVJaoMdsmS=R_|0$CfIx*4AwoaALnIbS&)|59pn6{a)m7Dh>Lkrbl(nfqIS zb_T)kaM$3&CRiauK(!t$#BQjnNQ6NDb-%*~TQ_d2xJ#Y!v9HwmWWRDC!Ed|q;imMx zX@s)@S9~{yY zZ~LbzpXRO~*Av0=cR-Q9>NKC6iXCH`+ZLusa5;{=H0W8MgZZPMSZHft8q&IMY$8`y zZ66!1y%^l!^H8>lVBfLihjo+PQSoBHNa5rr18mu@w;ll?KMuGh^Qa0fS|;1wts^?f zPQk(ZSFIK9WOP1$QABN$Jw&DmO+?`M{$On2>TCo+axU8Y(14F_14{3BQU`7^Yf)B4 z)S%Qyo;z|tg0cuYc}%V5uNf}{h@^Y7)9Bwi?wT%mvT3-8V|$Ln1N{}&U9;eLZf$ku zLIw2W^E7WvIvEbQ9@yDpG&8lg-!0PM0pSN$O|m^BAu=lNuX0c9h2UhbpWn1$ zR(zS6JJ$Q%l8#e)cm2~IrC_`2R*dV$DNCI)^C^}4E@$j6nc%JJJ=da7SZ(KHm|o zJ@B{k#fm%KKQP;9QK|@)s)GRe=9)p^+UMo?lRI<~#r>r*xsU)wTdM&UZC4)#m$y4D z!KMAhq2{)yE5}gi^Tzj+0b}Y$rQk7EG>cJUJZE#sO!i3?OX=ae{EkJ>)Ix7{pL;K` z^ap#jwtmT8uFm$0S2eaCyOcmj4V?f7V9Fe*Dt_h1%;rbUC%|S~eO!%UPDi4yh!fK_ z7gBxR?pHmv%8K-VhMQ9q?1L3Enz;Q+A@iJ!77Cos=~HMDB({=L`p(DxjmgrPEI zBQa2h;A?jHuWiV;ZM>)FOSwO!gV@mV;lxcW1FHE|s5H{@!Yk4-nSIb_aZUmJ!-d|J zGs*Rv+Ei21C*voUw2`m{Mn$?T2JT*x|F zR0SFkvW4k2>qk|OJ|u91t41_SDTp@{h#sle3@FTd@BrG4m^f2NwW+WR*r!prodkIh z-KztL1h;t^cdzqTf1vZs<8C1WI3e?+mU3~TIop_TF13_c4XJrf4D}*xTQM3>XgTEj zx1X2C3Ydp6ASh%Rviq7UyGV)E7i79l;8?e)|9Sq{QVUK6n4>3`&)x+!?b8T9WVQL3 zi$pVagq@Zs?72bF^pK3PHvlHOh0Gw@0EyifaC}WGlnF@ zFnqtSxyVxF88!Ja_EMJKk^KQ0I$R@sf|*?F#ON5Lv78|hUYM-eEibiX#<;Z8Ez61L z=lkC0Ph|nu1q%U7MzGs2q(Sxdsv&8cMk-ktf-!lkPlfQjzqu$QV`6vyIl_Xr>`IeW zx$)O#U!_LuoWF>i(6QdFxcW`$e4Er^Lg@S~e1Y{>)`G_E5s%DT*p zCw@?8+Q+;+6Y(3HbHjL!?w@_bMuRCr%_H{?fUBo_BJyZ6d?N=uZ!dQwE=87XFyHiJ z!{X>{UHZEzV|(zl;vYE`OoW(H=`xN;>?4*sKBGXby-=*Lx^bhE&Rkz7-crFKTV*N( zCDi(=tO@NlJUbQW3#JJ>T%%|5L6&j5T2Qm3NO|5;2Z3WMN;TydmP(1qgo)`q7Q5oo zhaUwC7~eE3B9?k9A+P1R%n%S>z-v%6C@f!j%;L#I9^=`Whbrk{UTx_XPU8!>!aYp- zbkoJ!a!bV^=nfs+{aS_b*rL*J_WRVO`l|qZk`{sESbD@NsmL^8v6IvgD~UFp`F4J8 z0;^;QD7u6|0FJ0t{6a-)h~;k;BuE(R7UjcSm$0t`=K85Ml$Hmh`Y9s{rgbq4ImP|Z zO+a04SMsyJ^Qf;*dr&cNBYBRf$~t1jyGWY1%o_6XEN}>ox!N zX*?Oe&Vpg{V`Z|sw0`cx(ut?%!jsvkG@GW~N8*o(C1w^jb{9NanX$GxhDA~@%Ag+& z3{6dH-GV&AVH{mLi0n?u=-R=pP(lq&=DzfJKeRk_+nN91a_}16^)T#Ccy5;t31yCo zDlL;Wk1)CDi!8#V|!xwsrH~j!$_(n=@%ew!83g*1(+K!+R3? z(eCsK+3S^;&r8Ts_=?fZ=6BS8dH#2yIp6Zy@GG$JI-GfiC;G8F2Turv%q(6q2HaOJ zg_FKkaIUIhaj}a2;Q{4YG0`F!&s$7z&0nu4D+nnIQ+83r&ujC#-#0P-=48CG=DAplm%vOSWUn{Zt*5kdZ zhJG)!3-jP{5uO)A8PVq-RsENQ#EWv~pd)D25hZN!IjvF|5QlN&>VY%(&LI{Hu;J&kylw=Dt(nEUz#8363>LdrP zF!cU)9HsP?D(B<3PfKllMc@!Nu;8bHFD9Y*yhy|ditNBE?@-PUrv28XOm1U#N z;Ci+R5vOjeaqJ&!L$n%mfSI&YC>xh;gfe79cdrgw^0}-@6}Me7;JmZeJK7QD6qE}L zPPq6+>XIsG0ekDRX-xo2i232X_E3g-<4W59q(fIVVYy#CNIBKzyeG~?(F5H9o0RP4N= zKlmBBuRc}k7|?d>jFDDu6u1~j7*~|q(v@JEQT};pWYPkl)u=o?FpWd0-XFZ=(xgDq zKdMdBj7N@kMrm~&_s7e1);UTBmtBvsHqb-N)W`X`UT~S0+b0cXuZA29mVvW%ZrX5V zUMzbWIE)}w!FM`n2iToH8&+rSS0-x|W1@4W;7%W`z0`V+J+wBvGD^Ks(=sW4)1&Rb zXEEnT1e7(cb+knG^fHX#l@R~UpI+Y0b_^Cbq-7p}NGr%IDLJ)oq0WN=Z3!~r&$E(V zv}FiKZ7a4HK1>UPmmU(_6wB@Va&bU7V8Rk{a5$3Hi?#Xil(!dOc8&|gnQ+4)T{Da% zyxxiBgPMu8&u0{D5b)z2?-gjkkeO#7a0DG+EA;8vbAO-xMSTh&*&I|9O9@D*f@b01 zDK>&IxjXdyQI=Zua)7GJd8>=)PelDzb?rWtOK5L@%{d~uIP8j* zV^v|yX2eTK`OY_soHZE+C>vEM$7@KN87k70S1TwMT+qIYD&qP#=shM$V6daW3o@+q zqReWRB`-5T$$T!DvxOOqAItYxcHi|gFn#{&l0gWee8X~{hVy7!GTY*qtzE_af!E?uLMeHJ*ztTp z`GE>=#lxS&TM=FszrBOvJk_n>9kveCu}ztViI`ZA7VqcO!=pOQ6a^i64N=Md=nm4h zwF@|~Tgv|*y+KCy|1V}Z`!8JaznS3->UyU!uBOqn-v=h2VH_6B0+2i z%^Z+Gf`}#&^tysJQH{|UW5$>Ftk!rq4HeoU#QgwitrPpJoe4X91XL(WnK(j!UcM>} ziID|{<=9(D7085}4F$bI-h@Deev;mLE(?k^Q3cvxpcVt~g(ygnqY`zAKTHBnhSF1f z@o^*q5Ir*mdvh?zUed6W$$v`q+gF$iRs>lO`Y7}bLL|VNm+1YE3W6Ys(IE~&#*?W? z2-@Qf3EI;q0h5y^!u|wgQJT_=_izWgnuxz-3m}lQ-VZ{g$bl4fno(3DcO;V)51=Uzs~aBe`8~GJRNQ3`?Kjt_OO`+aJ)wXzJRu0 zgr#r6yyf7612^=JtTR;p2!I`W18vhkjS;#zM2Bh4y|N-8bC+F?7GuwcfkIE+$q_UW zrvhsEJF644GwTM+fj>hLuU_bJGds`|w*ny_V-a)UlWQjkVk1y$)B}|usX>zW9Z!+n z@dOH}quLxt4B)4kn4&YkL)Vi`+tIN?UVJ#>D~bmjL0@E;x|K&f1`Dd}C1OIUIUAfH zyJBCdf@}6IHk+&pLvLv9=NVxB8x9VL?_=9@9+~dPk=ACj^I%d>qv~76Z7`AcLia(# z(u%z|u&kk(Yi9M6A|JO7K9pCipYQKC`y#|a3@f>#jQV+dq&t%?xGZTFubaEN`1-m& z8&CYK=Sa975Nw?Dnwp#X=1(~D$qhE>4?hDg#qo}9?F)TeXT-q?y8n|Dxe8Yax2f*F(DAteQ^j#ae72dz2W(ZR1WW~DcS7BcO!ewZ6bVOgcpFW6Cpcn zLC!ZHFPl!s8YEDLW{fU$4QzqQM+KUBtIlEC`Wq|WNQcBAvAh(U5f)*UpeHW<3)V6) zA#S0PH9##9<6E2D)EUC+&k}#n;P#F|NNsqh@V!}LUfy&{PQotq5 zdI>US5fol+`6UN!_T{v)YZo-5+^~XLsWQWMr62Je1qV9T{U9d;#KLboXMIqii=l@7 zIVXd1(Pka1hz-vAbi#@SUaJJN3|DLn5vio<#JX<1cg6UJ_BeOF=mFwPmI(9-_zH!t zBp1pZt*d%4fn-~%3)H&;&pHZ^!R25PkEa%s z(t)m(x+-l!T+J4vu4?r*W0`FAn)=^Ez&abFr^HEWlA#F|ZiLFPJN$K}LcbQ05V@Mz9VuTK~dA8X4(l6CxaY zO7{QR#Gfit3gmn?~E=e!kS(L+-LoZF891c=E@g=*RH7WJHnl6SDI724G z8OxX~{KbfY)XjP%46uv0BqE#PaOJc1s_RUXdJ=<_3ZQ?b%M1N|s_eTJJ8MxZRE*Ve z5|^2$s~yhNgGV_Fm*MKlcv(vs3{+Jkqd|jRC;phOQ)(pbEWa}`HeOQD*amwaW}8_^ zA-#c{!^Z7Z*@hKfjq~tpH^MoD%WGM)@Y#yVZ{o`DP6`cqaljo=)A~N~#F*qfYJEO6 z#{F1jYw(1+4hUF`G#8SJViaxa(+PVj7AgAi(7}5Ge&He1)=rkfX}6m6>MdKjo80-?mF-(UA@Y zUmCf*?>g11DMHXP!16u3YYUDADYqGnJ%~r8DOAsqiIy3BvrXMfnIDRQ6k)N)>&jOC z1YL$a?do)6)E-yXXn%_}ZpVM4iKDa+3;K6jT_QSQhZliF9B@OFLcBk4rX;?m@bCx)Y*zTOw}G72cBE?tG}JznyS>$hUg z%e57elB%654IdB7rmeOAj-eB4AK2xLJ36{`!-r}Z6GzICDmSk$I?pm!a5o!nY<=3k z2Pj#gTcc zI=vSiq1ZHVVv;Yt8jX8T)iswkPx|g>P_IpXJTn}Cgrpgv6DOF=VR)?z)UIuw#b-L{ zOecw*td1zB^&J%VCG|TR+^eo7hbK%bSE_HwDjzAdLpq?p1@Q&B+l9DO>=Sk7zDt+7 zaT=2A=^u+H@(x(kD8m_&eU4kQNcB(HJJmNV9;RkOU^%k(S?1@Bg}zx40@H&bQ(|@ zN3@{5HiVD4Q;(_BRxh~nNFpZt2~NcvM9A?_S7^n#l!ypy&RSxNxY?t z)m^%%B(Fa2elatv!LDKl)S8&}8k_7rPoYurL_Fj0cp1rZ#&+tHOSLjUpEE>IK3|W9 z_qlZ=H3Z3z0dK5gZ}l6kLhC+kt>yxN0oD*otymb;KtbmBYVIDMyLFMIJ(|Axa-6gN(z<1&A0|Yj@hSa zo!`8v0t?GI*t<-5ZrSQ9a0z=UI+&-PDt)T6-dwsl1eg&T?Qy2R^@D16r9?)H(J?Qt zJAYUFa|R>lg9DvbdI|h=#X|s#jS@=)E?PmmUnRN!CS(iMwzlGIeA&XA%m%n>({r+s-^=gxVwcBK=`#ht{g&Dk^oR&wujwY22)aHA0 z83_rUDcuX8iBHMBMq`q&%7RN!wU3zEoQ$SN*LCnXYKm4e6q1k=oU2_{SSOmd$4SM= zeHDTpG=D!{qU}a=cAR*m87B8k;u~Em^KGenFT?E`%`3ClWR@q+QOmmIOs!Wu@ft~F z!bwWtp$E1BgBRv1tSg~$HP%-CdU0K4t%}s5O%rcahv3i+oqSf*s1g--ccOuL+Y3i7 z0b%yUl6me|vRrIRE)BGF@G(@evihiW^f_aiVSJ1&noEQ6OA_}jw3{_>P(aWTx=&m&+b7k5VlU;{*n_z}yC%gdSOzM>-U$$#@n*x8Y116d}f%}!>X z`Yk8aL+0K2uk61tMY#_J#B?o?;4~ZV;BC zK_zT%V=D-Nf4rK=Dicxe`jDs;#(1O?rMd(Ajlt5M)^_`#c5s}50E%fnt}*La-OMz# z<+d}dfhhJI&QWczxgUqaCp-x+|Ai)nZ{>jy8G~IZ$vt3Uw1SSFBkzZeKp8pe*iCMZ z!uPq>-l;}{ux;%^9HG75_>-{&jye+f)Eo`wi1>n5VI%V+jtAfV+_%c_2xtB2(1lna zY$*=8(V{Gi#BB`|S)k(t%nSt+)BNXnr(kr6QIO>P)YHAF!o;9U^*C6jwlPXbRYqZb zq}s0lmm?e<`Ebp-B==Z22@sGO<%X8NzrWDG(#jBTTdq$?Xy3U^XCFzs@Q+L#rCN}P z&{}ND2?F1GETpPH)?mUw#=y1;7)YBn(@os-r+wD1ekNdPDjO5jF8+<4%5G_u2^thYjf<$9X~-0IJRLUofkZ)HQ_W^22@Ic0 zR^96ZL1d||)C`f^afH9W%OmpkCvjfypYV$mitzgCotSMO4lA;Y?+t{r_BXy=`dYIl3ISH9O;sa}1OALxn>n{*;7j^&rzl26)QgVV6V!BpCFkr&VOz?2FaIZd^l$YaEyP|oM3fP4ykn64n{qUPSz4hrA%Ze5 z?V)$yP`6SudBnbxn<`sz$?m3))0jG_`dpHp4;oJG6Ut8IkIYV_#gN11dlPKI(Vozc8k^I#Q~A6Xj9faFcsS6>+2@C!gb%$0 zNIbr1}+-~sl zm&ak*&;vw7Ze!-Or$509Ccc@I@_D6M5ajSo(xty(Q6O2QA9V($XyLzmFgagF`z?DP z_(KN+vC(f0I1MC-6K;#8jR(^C%lnqwW{l`GXF#11p$E%PkaET*tnbJbU#^%vR&j7{ z+2eRn-}=UdPnK3qn3`}%bcvEKKyP{~4={P9kE86YvoL)Be9bruExS4jyPqWK8F&-! z)wZvqu=-gpNcnU5$J?;uM5N$|on?=vh_;F^*?n+Vg);%KLAVtrP3$QZu|79=a!`?* zpI;#jH1ekT=y@+$#?`#FJTtT7vtqrS3t|_-$7uE0(f0gBHsMTE>BqFZMn4%Kk&{@ovW|>WWtY$VG_bs+k0bB#mWEoCOWF-F-sZIR&$Dx1$TtS52Bl7Ep{We z!9CERr$VOYcN_UT zA#<}iV){Y+XnY<*ZYUsbVoYE@)gKcV3KHYhrkmrL#~PV9A+Asg2_i$kwGszDwR@W0 zgH#o7NkrQsA_Av7JMO#{}4b5HCJxXp>0S`B~11!E3ptfFR37!E@ZL&(IDiJg_FS6Z%{~241Quxdt`&DL5S&r8c zA-ZbeAjhb8>ZSaNY}*3InNsP_zj@EnBQ8Y|Sl^Iz5&{+FgV4}Qv#IU1Ip0wATuFe& zR|jl(FDK?zi|gxr?S9qC{L;ZoBD`Tb)PjWd&QVW&9%!MoMN!BsG2c5p(p?iKGY0SH z+_|DuOGZt0R@O`Tc9=|oWF#ZLSboWS00QC+{q`UA04@QAUUs_dflFLJSpcM7-qPFRue5#jBJUG;PLKS#(|wCXbD-D~X{_jW^dM`>|?v;59kV~J|`#4vdmZQ zGd0ZY9s8RX9U)+(sAT>Rx(ZZhRve-%VFjr2HXX&z4IutBC~!lmg2;&yomH@7M2&qF zM0gPqYJNHqMj7bfDWt{m>>0l|>Gk@`4# zZq{JQv}z9ioS3@nY$zXQM@$Q_LD_@!;FS&FA?7T6XMXR&Z!l?k{ZHbrV+FI17c?| zI&XDnu9q&OCR3nIs^hJ%tC9ImjwKt%#;2{M=$J}S1j-l{^u683z}`|S<+qtj`J;BxGAK;lXd2k zTqfca)gZj~tV<5~ydZ;1iyuLoK(e-z+dZe`>!g4lS|9Ylk>S(l6O2_01O4(0eu0qC ztMn>xS~i;b0mAS+buV1O<`8|B8LkH0dI7DyarRQe6N?*_dQi?#~#WV zGu7e*8nVohqpVV5gP`BBBJD)T?T7@O88N)81L(E}1GA$c#rY5`Q*>Z({YUja*Kz?OrtbE<-h9G}sjX9}@%qGohXNs7IQ}+%*WP!kt{o;L zIPC7g_oQKkeaBtl98B5A=;geebVY4Bnlc?uQf^{*Er*W|n}Ma9r4J%F^jGbF zcO0%3bzz;pzzz^vaSJE6B==ui-95$$Q{?gd#*tm`Mx>Ido=P}&DZ2)|r`avrHOcej zln*>rJ~+A==MLbros1UNc-NIGAU$(mlSuGy!{_oroFE2#*s|7%^Z~qb4WG^>1FwXP z{ZeN`^lB%Y^$6&J7trd)`c$Mj}tgRb2dx<;RZbBM9z)wq~T3rb7sHW6HCG2oU(7w z<#K4K=r~a)Erp?B!S{H|5vOXQC)N%)FB55@b76oI+i;axNavjtH6R=gj~QmIWTfFN zx&gY5;@vi_e9!H$Ks#lcFwl3;kK7^_hVS5QHbAvTLdEr6RX~*P%RCIxC&#(@x+G4T zRe!?{&M}n^b*0j6t%|AjeqT&*BaXha%N0pQAl5@*Mq$O_oJ70^CoNlNq!b|;1w2sc zq?xHip6BfHV9706qVpp!{riP@&VFK^xBJk}&hne3I&&qC)=hEMnR_iu6H z$@Z^2&}Mo(UJee{OLa4hQa~h#3)hF2rGxILGRVV{(zL5gcBge$*IQ;LU#>zUeR)5B zD>qGnB0`w<8zu-==B?V@U$)`*tw8DiaXe*>w4|342kaWWoUFIb3a;9;t@a=Y z7(_YYLKr1XSz&+;h;BhjRevfr-l7pQ?|(T;JAbpqqp53c@CP9^D72rNMvBku@{^zF zI<=Gx~C9mYH=l5<-E%EE|7? zRCtT*TUwl1M)(Tc|D;<;VCVHbqk3`83nPX3c>Q25y@nic=cS^YzWng4%i;tnJ}YNi zh)viK|KP=a_aacPo^TZRI$i5cs0CJVL(>+?`$K^W4-m{JTzYBb^gA-zuDTD_8*dol zSZ-`zyihYf%I#^ay~7rdK3++uz0++LR50nTiLiDSOru{~Ovc)}YAl{4TN2V_X2Q7r z+d!4f%x&w*?9#qfd5W#@)^6VAJuSWUr%6`<);B89r!cGZ579>$X79pce~VE&9b4;@ zCxpodueq*r)}{>TCIK+?78crvNkNdd9T-Z^4|}}~t%oq%8hKQKBF@X{6ViF0Q|jAr zZ=RkQuGlF0H~p$uQK_xaC+=s!cYIg%4y(KXqyems7@p%B-l(+vRWY-{IXhF7{u3qe zcTagFq!j7z`n;CV$dX#}%4GBHLWF4ser2TsJcGV};3odlt3G+E`(Hg$P_K$XjwhGQ z>W8F6KgtwC_M)c-^Gm07Rv&JQo(=0b|0*ZziWpoVAFst{#p!VCeOHa-UvKZU>X)1Q zWN*#%Y~{f)`l%@aJ*`hl&Ak}Ya%NLj5&v(P+zGfEqdyC*cl4y`Hcu6@DCqzWERZ!c z5&ki4*>q$oUMeUKf|87?BUKHd zE}!%tC%rzDyM=oLI4XeR!MGrX#5heg4+?Q5ZTDObu{KJcmuj1yO~+>e8VncDaCQEL}Hj?wIjG}8Ke`VIYpDrRaQ6H4$CviAmU zLn;nbUxgJ1htsj(366Dlb|l>#(Rh3N~yS8lK;=*8XJ5y@jL8tu@jPk=WnVAs#Ee zoP<_xnoP3dH;H~et1o$d7!!lD1B^np4XVQn&w|+XJ7?SEsTw22B^v`*$(_?b9#)9P z5rzZfhQpcdLk&Y^WPPkXERsw)$ERZQ9oHacagC6hKqQu%;_kh3&^#81ll4Ufq<99k zRG0r~FK&q&z!74fsYB}|F}GH)j96XRKkP-fTtg7pm+ z(!!USgZJ~vON{aB>T?M@t>a^sxa*_>uTo)aYl;KW(73Ky%>pehqgr~{{FP0KDUtwL z4yP5dk;_+nly;*Y-I0x8pbJ)|I^WevNL`K4@Tu^pblhv=QWra`I#_8~x46f*|1ZYQ zp-m8AUD9RSwr$(CZQHhO+qP|2b=kJMY~EhYGk4A`&L(q_KOpmqhcqo+~K?T5d9lL_RxVtm37d``@wO=#ygQz&1i4QvyuB5XX`^y=lB};#OW?mIrj}Ng;<$!nOPDx zeF7CKQSv-JeT3=BC32WuhphqBjl1iS-eX)?H>ig89@WQ*55`r0r_&^T_kNCZkt%{jWi zfJlyD!*sNpAO*OdP1Qr$tg`qd$fL{N zj;CD2M_Z_WE%j?hiiLB`Kv8E2#r z_Am~aT2Mm39*4`2^#iQT!}=Jh3jhy54#N6+K_>K)Y7PijUZudOo0@rZb)n@IaMqJ4 z`DSZ^=8m=AQJ)K1n>foap(%Vfj;J8bi)8392U+P5fbx4h)KmOfujYb)=aqp75(cX9 zZIVFPTouI&VR5FY#0`dY#(+rfD0By#S_fc&TauEGB%wnesOcs1xDA<1!;;iGMbI9` zak#@yw%0JyC=eXb9GK$2fPLxR%fKPzhCw1!>3q!U^XYpBJCGMMLRldb6sQ(asAwSB ze32AxmXGs|P4&T7vEn1XR-)9YeW5vkE9D5@ILj2sCzPR_p{U{eIT`+jE;i{SDB`bu zzo)v1h>uwSRWt`tmS}(k;|xVF-yDdjH*;*{P!aA*R0Nsi(h~(bS_;q(B7r$S@EzW4 zzcJF=1Azos4$*Kw&v>EDgBL7*0#h)Td(=yke+t<{0U@q|FmvY!D@hhqR1 zz;@CII>xWiA$-aiN*l@A9Dimi}q_RD`7`)4U6FcaMB0R089Y<7+A)$ zI`~-u0M#}_SUN|KOp)X63x*f8Fom?RIu$M;a_(i%{+eRX0WSot6gXhadNJfJX!<6tuY8iN7S zkl(PwsSYm7^>ev7cG`_Sm^Ava1t7(iVxeE#e69a}fnL`qxfxA?{bNrj4XLBtP|#4A zb5A_N|AWi|YC`aSWV{##obWDm4blNwRa59Fb`XQ8d$5F4%p4#95a8E!@P+x3t+9QD ztme^-Ljf}%_37j18;T+s%rss{5$K8?tN><ne_x2{c6Q0eBVIP$7mUZBa-P(t|8qDBoGeaJ8$Xj-*dj{Pj%WD{FZ22vIAa{655i zKtAnthfN-(DU7Ry_<Ggumx({MI9AFZQYs&wPMSN!rD>-oNCeYd!H2{?thOazEM%BbF}O_GUfdqc!>{!A z4_dD4cT)C&7Fy_~6bH`tk>k~H<5h#(w14j-OsS62f9_-z&fx*Gh8x`!Xcr^sXAGPh zgD8G*00m{J;-L|Ih#A_iu#Q*Eh>2Cd_7xV^3Yl!;6fVj1Gfcu+gmYvsS-nX7kaZ{x zwYEc*))IAMfGVB-_(EY*eG;;f#7~?Tx2!IS5&*%eT3L^TO)^F4bE=`Fk1yfNyV>ws z+P62fBPor-c>*XXLjLiQdzsvM=BvTtZ-jc+;U z2@VnCHd3H|BtOSfVuK?=D31<5umCz&3b!9x=XqQ^odq0K)_y}1u9ms>OURMS8EL89 zEKq;XG0dN%FfuVvr99fJ7Eb7*W^D2za-0IHgmbW$6e>opz}w?;$Cnr?o*pk1HsqWo zmKrGrHS{^qV}!ml7)HAth_3`)1GgfD6LRhWV%X`s8eG3a3aXxxlzV`K%nb-fjOZ1v zEGJ<;c1U3J*rih$tk88$G_*th5(}`(=#}?@c6N4d7Iy5tXRnrjM||o?cqVsuL=!KA z-g{IX7Db=Uy{U^s!427JYok$kcOSol6=D~w$ITpkQ`e_pM-dWJ%v~_}6YdI;g~pfw}jwO^3}AxokmhTY`2L7JdKnr14kn*W$jqrFcU*|EnVIQI_?<;woj#vIV~y1qu_cl)~Vrh6udis zr`eWk`q+C5>~n|S@7Jl^bWTaJplAuyl?qPeSTL89_rBN#em0;yn%6E4G2WG@_nBPr{P8P#u>Jd|C9mhGC69}vmYiJfigz$Fk-|~@b*0|f zMPCv-q|oodxM{r(W`#KJCO+;izV7S@(GOMVVC1h}{!*wJPpbbPS)W{l49Hhsbdtz^ z1YLF)#Kq3*f~Th0>0Iw3=1s{%oFS53RCyuwZ0!7F`r;3+w>sXjx_{S=ACFk#?6&Ey z$D-9yuSB5UB4KKoY;G}>>F8J07&PA0A*Y{QpjA}P2PC{M|9hf@ z;dib3){fF>MK{3^dKP-$f@M6G1V)`OrN8XA!#N_rOve?w?&s*|QaJH4)Vs+abdbfp zhCj$Qqq2QU!yQV^$adpY~uW&%e5JZG|IvrBT;B1n9^63aAZGt88Pj+++ z5Idxv6Fb`yQGhCr;;|$9Pym_Egr+-zP?8n`9Lz{(gV|nMJbwp~gb5-S!W}V6aQCy^ z989wB;f(X?yNM~x92&+thFQ8Dx!8_c$%QTE{44Fbeg7b2MI&S%pn|xwu>YlEx@mcz z-Ia6_CV6VOM2~W=JSh;)F=zIezQ{4(acyLt=b7^BwLs|jsR!;^_-?=4>J zAZ=~H!;q`*Mrp#9CIjrfd z2fuK&FI8)B6~>~Sr?a87>FOY`BkgvOiNp)q0e+@B+T(`z_fG2Yh1Xv@@5mTGVFAmu#`NN;%L$30?!5 zybZNc-kUNx59U~ezdtt%x3WgNWv3k4$S!M1!?diLGO$zRpcJ60&kP|yHV&{dDRZ+TpC#kp(G=Wt$g z=b?lF$a+YC@(QF4X_Mvhb;2GFm|GGeh(%W85X|GxcXETC!L^_}j)Ax@;KgsoAms!Z zQYovZLmPI9S8{TL`_5#f?~+>F8d8fWFch)s*;&K^bo2`%lz-rtP5n6d{~)>KbNeT?pFxVDYOg_9*y|-fLOT z);CX#8hBr+Hg1vW&;W zKYRuz$$mc3-tk;8NzbfymByPH2kIukBH>C+rapr%L-VKY(uw@_I~?+~n|g*bh>ii_B?pU->1!#$X0!26cIANh{ZVf_T{`qxfX?{M|wizTQoQ$J5jSQkmKr1 z%>BX6(T7L+qshs%x#=bo{uKso&hDL%5FC8V!B^89O5zY-1{P#G`D)sC8nMS@k^b6h z8tp`X&Ym=yt~A3jiqujxfL3l=JRo-hT0Ajc>gB*s=^Eecm2=PH19CD5gHGvvwlz+- z4TMLaad(*wqs4PTf);kKCm^Gt-VUaqYdquPt!K;{%v(Viu1#dKKb#@%Ql}NCUp!{V z8_!nP$II{TcYLZ#Jr_Ln4_$#1Te@1ue`{ZpwMxQK-_o3S!(U(PAl7xW{N%d||5|w6 zUarDTM{vpaj<3xPo|uYylaBQHoqYWa#z8^Tvl+kopHs^fPp9s-V?ztql5RBLkwV(x zTDhfFOg>6&?jBH}nB5aHR~!+r88sLeJkH8jdljjHex+|KmP3WRUJ|G^)ibB$K(c-E zCBxR-SS2g-2HPsev^OClR_R`?=jkKV!h7dp42Ugso1?Ii8kn}8b+ilu~7`p4d z)OU~unRQ6DUxOj$exPva4}BmoJ2gG_-S&wPNciQhT)AvH+{=pgr4bNEd{>EmDZBit2y)AR-KF*Z zn+j8W?c^s*FjR^gvSkJqvsHpF-8(g2s{J`?(M;9{1R)q4R=Ij$2B-yW?PCt$N=t^{ z5I!%TM>UDgpiwc%7wx#?`L_<;&i&ex$7OoJy?Iz0B-vzF6UevLyVqLhWcD;K2u(BX zmPjEze2VCi6$ZL@M6UWDJ>*=Qp7+*))oZJJ!@kCwC^ON4V$hSjA2DvNk%6~JnKNki zyT%86W1S#Q;mdcricM=Xfj$|?zki7}O=ap+^DijY-EDI9;M|P9fL7iO$0=l1t=ea> z{IZvB;vh^(78E6EdDD%CKFbcBD43GBPzI7>nCvP@M! z`vM_U@r{?gVuJ;<$GTsgc+6CUQNHn$P{mzPLR2_JGGGA`65065r%&qS(&vs?%*aIN zf1{CNJjidK25zne!*g%3US;WB>YndeJzd8o67|eL7VZh@+q>H~u2f_+3!mzqGR29c zNTOOMK=A}2{vKC48jSyfKj(!1{HL7F$?;#JXjvGT{ zKq7Ht`z6zKWMU*w)tY)7kjQ|vhC-RFLY7>==NDK+GUaA;Mg$m3Hc!veg&kp5t;yo; zdjIrtc$IF+NIf)U!Q9=OUrCW~S_vszW7a;sK|d&a)n=>tn%=+T=YD_qa`+t@Bww6~ zq=I`InMuor-v5O*o5parLj!e0l+TN>X@-Ul_EWvF6^&0%q{;rV|4|k(%#r?VPya~K z;GWu@bHxJp)K7obI2FWRx25VEK0x9@@yQCG{_L}`+}o{Uc_;p-IJMbUca?g~!Ya&f z>4{zT_Xt&(ok5dRpI!T=te;a{i_kx|IzQPtM3@|aA0*O7-@IhG zFejkBGinHkgBVXBL^gUECDA9;7L_5>jZheSi++{o!8%1u-4)z_%D?S6v6gmA8W;4D z5)09>alO?okeUDHtm{IJ16?6&gv!IvGnc{w`|BkTQE{oxnKcr%+{odqt*!N&#C*%? zs;bGYAXP+Eupf+d6tQ8E#yz!V8tZq(@2<>Nt#-%3d_kpKmNp|d+_ug1ByGF7TH2t# zraSL*n1`ruw#{kBqY_Xw@Oi{?erUYJALmjBU(V4fI^QB0T)V996RIoop`~Setq5X3 zxX}$;TeXNJ8T7jWiD*Cyci5KNnSQ5UvBbLFgd-_>KW}Yr00=B0gJCl*GB|zRT5If~ z>=U(^566nk+|MnS8)+?~t4QDyBgO|odA$-lkZz-?2M}$r$!4dh9?$a)&yrBJRd#K5 z6ll71^snH<_eUw685vCz$G9)-7Wqt+3tG5XsSIHRk=YC|d_u^;kWYv^H%6-Mo^DX8 z=di^jID_Y|%NVv1xEOUsgSnzugMBo^+@kPG`2zCxLVzp%Z+9AY@0FKbVn;*cHYcX_ zoBx7~jp8{j_#T8GBW06jRvF=&kVE_}{SZ&7;;+2GFJRH12cDUx!}UxvhCllnv8iE`PctKq8X{Sl{;tm6n&$u@d3tlQdKvp&P1x0ERDId>t8PR0 zS1<|5nF|7TPC|4R#sunvQB+$?-TB0Hi}T0%EaEAlmW}zwhcgTXGRbo>Q^Kil#?n49 zK}$3qWZ4)og%YZFIyEMFB%VNgM8MsGqQdNWutn4OU<*DeQbM*#!dHOou+=15;QJ

WLrSww%#$dn8VJ zL^C3>eO9;SXtFXHVwr?!?+aO*fg)JIt@}E?SP#~WO7J3}lVxbkh1SVr z1hlYt&{&93bZ|hs3^1)0!H6kr%ECL=L%xr*L%1CmcUWcj1`Pum<&sVWizJ6XBVAI; z2J5x3x$K}Ut8}sn=HeYXA(gnao?_Alzr`le7nv{&5h?Kv4N+XFTp&CjCI;9Ik)qu$ z%iLNzWebQ8$=1Bx2t%?@aU6<;w@G52RmV0jG*Fmd^VihB{!^XXcSmH!^j!~vMKd=Oz11>O6pO21p_+|(%#yq}h$O+rAwHRu zWR3{Qh5XDcCE#>W-)bSpxXPU{rIW%-xVWCNW%&BV2*uc!_wa>qS_wR~Ij|(h37Z}U zs=t{K1*u`aHONC2u!?vxd~<~Sl(;Il9Mca6a`7v>@hm;{Niap=IDfMn9W2dz>$|^M z-!rWeC9wb-Ph^~0L;1>xpA2We2hl3SJ-6z>tT@EC`~jqS46>FZpvL+REr9(7qzCANB-C9* zLDQBt_GM&>0V}y?vEy=SuMt^2v}wc!?0mv>=qnR|@Q`z?B`uwA9zXkBvBN#dgD4Kt z%-+k7(8`bJE3!gU6{A^^;_RoVb_0z*#-)?NGMLXY9EFD@$TNeKiP-N8bzVRraI<@p z8l1&YGUhzS?3UhJZPo3xs<4>o>D82$>XKZiX4?df2i=x}B)Ba19)HK`E}QmfK}!!e zVNsI{qCG2o1U5KlHTJq!SD@dE^tZf5SP`5CSjhn=ZrW&d?YG3ibox(!tB#+ty^BX2 z^%Vf!QK!w`1CuOJrL*EnC}DYcn4abs4Wza7Y9-bI@YTH?a;%9N98xx%G36u$;St0^ zc~PA$K?~BlpY5E*gMbhslh3s}y3Sr!?Mg?A8;usP)vhXmp(6&B9+%1MgykUDIV(YVa@zL@_NKP=nB%n?%@EdJz(%7=i#_ z6h8p_j}88cvliJ|qr*|l7+~|eDiO_6nh!}qY7LofB})s7l8!ig+X3Q->1)e!cByM0qP!z)h6TR^P36HuBS}I?*jV#UsmxQOTdSdM z>$36b+O@7Lf7=;JHqdcWSE8nMxwdilv|T4lM4n&$lnd?$nw%J-_h4f~_DV7n_$}YX zspUGdCr}9XeHa;!+;jLh&Xf~++2Ge;LXs{jp9W*3R<*=JU zL!JFr=*-1vV}2fdp!>-xt@IT!c|jvb6nec~UuiU!XgaEq9avDdZoApPTwdGulhVm9 z0`9*qM@bN&vXVT9WEW6j_xNpmP5u`vyw+*c$ik3X3^{QQHUnUr&J)vee&1<_9vtz6 znHuRnCK-~TV6a_OC$6ICj2jubl{x}(X0!QELpEO=O!Ex{A>eddjKAccM!Z&yWM8n{p@ zh&$dQFw4odX|DQg)4D-Vq(UL{`g=F*mEe>BtKEike-gUwO!!Sam~W}`c?dSVuvmCg1rfsRaH6Rq zrM{S<_#ykm@0_2mRwpO&Bm;u?v^s3*p>ZTA&uAjh3=@Gyb`4h?1v}S3pgUkCTLNk* z2b;g^TvPDSer$tpA-VUvbx$}9fyjfrZ!ijzBeo^;FBXMc!?^b8lK@7F&Y#8bz^5PZ z`ipq_%yU9X&%%{IY~oK7i>4X5U66v%FWs~^e%7u3wv7y9r)kF-g;s6YSrCpCdLez) zPp@Rz{U8cY0AApVU~~kuqvxbN;(#~qFAv5%MMFfeCj$+O{ zVu5Mi@Cm^a>^EsHI>XAk3WDGc{Vt~;6mX>;>6zF>bE$^BPm$$Dlh_*W3F@~6d(Aje13h;Ijx5z^oj?}?GXF( zJP2_~HdZL)O9pigfe$A9-Ia{J#dCuJp2iaC{ReJUe3xgV^51xq!nd@7>t3wJ6zd+x z@{!V~!?3jUo(85??^fF_n;BedigWTz77-P{W1{VU0br20(diV~CAoK|VFUHc9wM7i zm{$fLT+j6fTA>{vIVqgF6N$w2A%w#Nl65(Xf(2oua7}MZ4n!gS5rEf5!tq=!oK6%2d+rLQr650o{sx00j^=XnWtu&xy?+dix0|Q zGbJS^r+;X0>hw}S$YL(ng|UnM+c zv)hV8>?tlT$l*af1FcBZUSE4AXG=z6(A@Y<%19i7x>u%DL(gA;2vsv?KZa{bq+_kJnM8=Ki zOtc&@w^+u#OmaqJY`rU`O&zI9v`m$9SfQS!c zQ6Lq%IRU$ez*-qJ^>y2k@~s;TNdaWsUZJ;!wDO1Z~po-b2~Jo5=F zLdWE^Qj0$|`IwFDm$33Xu#}ukA7p-&mQPbQXsx~GV;cd?UWW^%#^9m5X(*v*Sq9ig zzESL9K8lVJ;ej&$4ybPs4Krp^OG7WuDejJxRB*dQs5)X;;((80m^W=%u?)Bvy%qfn zyzmrrOzb|+9$KS(~x8y*=y@?G)ec5667HwSb zC+zkt;73Oj&cQUey%>!v6ca{1(F@;`zd0WKdTWn5i6cEXI_%ALmM*GnN0IqvaWrSk z61n81r}0j0F5}7b@;UkAm1h)Yt(`RKG=7EMVzp~g-b-z%r6V?OQB`JMb$_p~ZW`yA z?ev#4XMc>?rGrvlz`L?bCnY*G&D=R~N7%+8wkc4GC)1XJC1ZuyLk33NzvseQh7BSc1s{&$=PeG9doUHuPa~R-ck!dAf9^+Gy+2b@b26 zUS)kg>gZ^o9ftYvN1Rh1vcQS6)}26H-5i?8qf(;5GvTnJA=2YM9=|xfsxUP^An!RB zPvAWcQt&+?ote_`lvkx|^IB?JYqo2ljZ(X3+x6C6s?AnrbH%6mJl}+2e=|(xmse4r ziGeR`SFRLSb?Wlox6h)o-vGi<%*y$wvenvkt35w1i(T#3bA ze15Myeqp-JrcgIT3(>1^D$BgDME6Wzpzk`wB?&lrP zJr$*F_oy^kJZ9FQVhTc$w#;BWiQ{UrtQ6MTU!bb@)~a9fVL8RPcE-;-8GneYQ`^l| zt!z?6a7AmS4WOsRT=y>CX|YPJHs_dz`QmCQY=v5Aduw}OeJ1{MFIq;Nz~_curI0^+ zqwkcZmw>nn+(OO02NCeqsZ9b!WDK6Bs}o@!bJu{adC%0u6ugUby_#R;8jRtu?159r>JG33Dn99&wfi`_{rj;6_hy(KTA0J6!>r=~^AA-}f)E)|wN8cp z>SMW@@8@561KZrIjduTd^29UFx+}a*IM~&Yg3c!3EPcJFp4s1SL4SBf6aphGgMbe~+0erg+0Ydw4&QIvjBHVeN0e}!ibux9Y&dd9fY6yEeYg6s zHyJ2|Ik$?o@s*eC&YhM-GA2@=0Y3s4VLPw&CQ-`+nRPG4omL159P`y{YpBh(r<<6b zGR9Rpa&_osJB%a!3u8MI(kZwDb8}2ob{dz{ds5x-+5qS6OoBJ8WqJ4BS;98dS+)90{>7wOOt`EY)yc%$gyZ?e9Lgj&(iA9*{@r7Y)R%7{T)~QIxMNF)8 ztx$i9(`T5>A>YFS*wU2Kk{|}*?`ieG^76y!B*MMAX~zrO_=V-fZ71m-Mv|M!0|eo> za<-Qp4LHx)all`mZE|jWuZ)&IKJv8~Y@LiH1SjC^Ajct*#I>3)>Q45UYWAX7^)Y;y zqm>qlwf?E{Gp+2`jI$waSG#MRVD9?^mMGupNA=O1MG+VPgw2Zl-iWWDF~}Hl@}4&V zr_EsMn=z|VJ5VWXjo~CpA<&bBmdt(31A@x$M#rU#(xhFPN9k&*xwN9rc{@p+DXnWg z9F^1DBUWdz9DmMCi(aMY>Oil6F;s-g^=n+JtpqmNll+bYG1l}c&}?|qoHv-cFiHkl z(pWiGu;I+-5F$ymCe3u5t+&pN%n$b-L}?6j&V0Ae|5r$3|ChV0Y$k6_+^SrvJ1`#C z%k+=u4o;F1XF(YgAgN@od(}BC`sxEuT(Mw{0dNvd#jH$K#j5H84JVAYrjW`sP?G^V zmn1Md^a!07z7y!QAM_*zDe-f1gZ%dbTwgmY+CfklR*psM_7Tr)78RmQ$}su@hx%Kw z`4}C;y&6LK#0?(9&Vc+jhpacp^hf?jX1Ox5GBF#t4MEg4ixfNA0H};q=%OCrup6SM znGx3^GiG>1?>X#}A^m|%*wTY&p&lo^-W>hrNJagPsK}&tfkhK10_J(w+RiY_Z&pM z=*Iba3fM$D##PzHsv*foS+RxUUu%PO6>^Zteq58l`;||G)KlukHqZ`{4H4oI&D($X z#~8*zDZ&^1B?AKo1QpgoC({;~SwZ`3xag=)40w=;oQxG-4iiI-R^|I|Y#pp0%#xA4 z$m(zc`U7T7D*W1o07CL5(^U<(nV&;T;mHgRgYMV{UgH4w;tix6oiIN1-f*8pnK*XW zpL>4Enu38R%5ZHGVn^No!m&=a?F9A*`^4-D@pW)dfV(G%T>>Fdl^IXxR72jfK<=EzfyJPQH|Vp>YEpMic-E4!*{ zwMF7;vW>*`YB@C&Dhw#4YBjq#lx>!fm9f>=%%NS?8BRsBS*D`fU6TmuIjzevne6`T zPw?yS>^>?4EcKWejNd*TdYMn=kC!Fsuz9w&F`>6ADp8&-f(84X?$FrLx_VNSKJ0Kr z{{(Y_u13wdRAaG@xLQjcS_Fl57A^><9sAo$_CW-OC{+h-ux0CO`cr+J0nl&X=gv=Q zKF6{_#Jdoue@TH?nQX)=-`jMIxvS24W=ZU$3ppgKtb3z1&%e_U858V|N9E4Xfk}Bz?#7RXOWnb--ZrhxK{VC=}E?W zcTG;4u|D_bf_I$17{`>N+QTTE#xEYTV;Bm_#_-I(6(T@P^RxHz_O8yJ?K~2Wuyl~- zcdv;~@mO&J4)<-mifR1IY+cfIYZS%>nEj@#o{28U$NAMCg@gQK?3KzH3G4XcGZ z5^3_eUcD8Xya9KW>#fKiF8bO}#^7}put9tKxWHqvKTehcVC63I`56fMY_&|gYw^ry z#62>VT90?)*Hro%b1gR1eNe*oSgJ0yC%xfhG{rYlW{_uAoX0B>a(c5R6Z%qm`TC** zl{3YX?3^f{(ynRHvSXdSMX=!8hTzLE-ulwSA7)yg7>IyVFJ(N$7bcLafll>tu zl9!L#`W}>1==@UfqkbGHiD2FXkG#JX;1A6cvLd%ZLAB`NR-*O)`WUU?aKXR{brs)J zec+t!9r(i%6?R%p4tK7690FO6#ZZsiGCcQq`K&UvSH&moHw{Yu|I5IiRX7ziZxO7o7R1YkE>lr;&%WjfRqZiKVx**}*%TuAx=RWal=Hd7C1- z;wwjThA>W(#$~bnm>DOC$^_L;HBCxW1k$7B=una|fz(R)-Wt9}UbqC$t=f5Fs^E#h z>8S3*XzaQC(;SRv?YcCo;*9kdmfpj&05h(BXhBFdez#;Fp@4O8scI0=w;7PU&!K|a ziDZ$E7MSF17~FXRWth?D?AMZYr?9HZV@LH4b55EH^|2ISLg!AVW4eeqK^f32u97qX zX^&owemlC`pdW5Srxo`;T7I!E#DP}!4cz`|xY_OH<KP>pGErf(45hPpk zhu3a?Pu40C*Yp@^TxH1+Ynh_mRDw0Xx=YX!P`-EAKva7dI&~Rug6z%P3c%}$3%H5z zXOm?oDs+vSg6H<{GDzO)gn=alS)diSDJ}gvg4gD~%hs%GNuXO%lT9{yekc#BoSEv> zi(@1pRerY(;B?pInqD;H$P~!Q#q!Q9@$Y|WTJW;45OZ)-Ut7X$C5jUAc%`NaE_VlH~ss7qo9*?n#`m?s)G{0?KUp7fA#WK9xsO zHP1s}N}1a+lA-fyw7ZxDQund6<|N+OOzj#gDdo#asy)fjvl&A062=hvBwY*TGMkH- z+Odg%Y_76=UBv6vTKnSQlmOf!>mxwGopDVXcpmCcx-2LVcBM~xt1sd5zIc576*%m! zgz$U%B_@K|W_StzCA|1`Cvj>fX|$)ogK9TG2}U97{!MZKnWS{X3;)8$HpBvMwYxO^ zyf3%hsU>KUFm<~mW&Tx!)TjEB&pYW!VT$4LNRs)dX+E1559Ex|fV?qVsVQ8FsMU@B z%h&fCKNy`i=|9HQfBy6THKwX~I+zmB%NtoKyVw%Yi&@&Zm^%I2Z46yZMNEzDP5vE< zSvonp5O8oX{2xFh6B7sH{}&LcwP(N00q6Hezi_-ed?kWNR8pXE;77m?z5^I-i*H+Z zA>8uPHaIt?8M%CGSHB-sxSZWXf*KuSb zuE_`my;zbIePzUfEjyT2wrZ`fMh)9KBL4e z&VvY~yTYh0M7mfAs}>Y9J06B`%jXDXC59~dBx%1ZQ_5Bp$s`JL(|8RC8oFD=W(a7> ztedsd1fINzj?^2fLIQJ|TuFy2O7eca(kPL{kZ^)b<`5-53fOf@MV`bIr$Sb%Q6o&; zcBMeCfr07{dEuOekpZ}!Ok2<+m?}-3A+~B$S0Q7T1}c@><1fhJWgCvAofoTk`hFc1?!Z5jmt_0bdwd5(huGh^~(q5%zuk=YoEM@fz+Yu1!C zeSjXlrX1zGvO_hhOXxI>v>OP1Krm2W%z5)tO35PZS(**wC@vroL|V@(6wf9~Y7(ua zYl<+oP7hN$B?uPKGXmrlc&0h8CeLpl?+ib`Yws`Z$t$nl zH=^y>gyH7Z`D(v9-*+iv{4WXS4RSQ2% zX3tiY3?%x9O$pDwA-AGIwh!N99NhR^zrX5Nf2T2wboAlBN4+@swc%1i&WpE$gvuU6 zSe#W9P-qmOI}m{g+^8a=&`t2Et`p7p;V$FDE~>V^x(IMMq!PFtqCTLC97s4CRpLP) zSJ4vj1R_OxP1h0gd~jyx_4{|9wE+%i&3qg6=9?Rvfq;Hti*RNb#6?hq?;vMFjGh-T zNEbC2*k}N6X2Ey5*m?8^s*6$d5kBo00@(tDaM6)b=MhchV#L-Eu_UuL;A(Z^?>hL{ z73k@~H4)(5yuh%xk0v*)e`t6Lpw#eo4p0i3J_z4EYR<|7jg5A5##Hr{#51aCT_37pDR}|qz zsG+0i8wXc-!AMXf`=U|b#_BWHgfxLwI)z|7j1%hCwgXqJegF-|&V9^2+OhY<=?|z4 z8C(*u5^ygJmJNK z`dztlVo>OcjpdPeS<^M9vTk^(T$9fCGO(!+zkC3J-R}`B(06F4eV~(Ef^1RPAVb>( zd*smYO7OKD#cYn~w)0`9l}yBJOR_s)tjNYaYdDZ3x9{Q+u?dmx*!Trfqw^sU`FpqG zDZpnF^XuDp*>!*Qp_|p9#4;w;>RMqm=W{Bbccll08UsZP;EJ!5g2sK zw)^?^`F8I20SC*zNEyY$AZ~4*jtji#$x}tEgOI>oZuCHR8pEihGy-3)S;h=n`!v3? zrvQ+}o^-KJ8L3z=;sP`poFX6)w6W29dYiVw$A$zMm8|KrDjW3SXDnv0?{c#Vyu7w$5wu%`JQoc8BdcnFkERDFQGR<4eIKbgjPV>pEMxRrA;8_4r zQuHjNiI;8UY|@G4OwDX+dw3M7g`nqAW{df*&}@o8C7ff%q`v&jw5ruiKl8*ON0Dke+S*EBWi7Z|R{xMh8xqlh^4A+;-pzS30&5u~ zq?ZuVg+N|Emh!`COAn;A(a>D8mZN{T44dG2I~b9FY++Snj+wmn1l*i7)2j(u>>F;Y(Y)w$J2U z-IvYXE<*`mWBR!w1L7wbVN5CyVYXF|nmQz0V$$5dGpQ_I?8Yex zWvbjMiK|fluXO5ipMd1w=+u^9q|!3devv|@axkApcYwnRE$Mkn+Lxj?z52I{wLW|X zZTGFVZ{#Rzrqcg-QnCKm%ui-E7PkLK`ImOz>VVrlQ}>|ESFSGEouZT5DydwWB+2Z| zl`|Kdjv3WL<4sv+`F=(xpy)T+%iLa4H3=x8I0U`}gu$Kxw{dV}>f`tPrkexr%qJIQ z>hQ;KNP}JExKB=izcCNy@5Lro7hCyzSN~a^-2O`LpYM<{1-1C+__VT0$!G(5AO3NUwPzGR4k>S# z^Stxo`6KYBt@P&9bZfpobzxeSdnma{9|=w@=bh6h{ToR2k9>G>#>p=B%0yRatjd%f zrMRl=a_LMrLN4NPfajh9y7yWYrEEP< zHXu-3#;clqsU`M^+E5SnNbdPOgW+jxDCOv4C0#xjkd<6O%W0V4nW^}E2;w!?IoWU%Pj>1mlIE)yn;SF2?jH z>q>K{Vs8~&NqOppw0L1SyC)X}EuPqchtKsU=JrNYQr#EuN>k5aekJXyQyW;{YKt;rKZ9l?f4 zjUOZ($9151k0jht)h8VFFO0e%ju3b$H%HfMYnPD#!@3G8Y(NLvw<_*Nkm5U@*RiHN z>c1mwXZvr|x~nNU=j;Iz8_h?aFxtWN&zi?`x6%nBC~=wion9ap~Tg3SC9Hr^Kgz3s2R+h)Tfc_$1V4C@@S6Y&b zzLz6p*r#7W;km8|N}sN;dYo!bo89Frdr_*CCZe0ry6uip+j&(7A^l;F)`5?vetd5& zz{P?4r4>V4#xorL1=?j15PIUHM3P&Nk@|)qE=H8J93k*|xXRUwzN(1fgYFk;tq1{m za0_f_D48MvnuLJ|0gXem#S@|NU>qq|YEMGo-;ZD#Y1uQBshafv9&-qxaxM&>^4H!X(Q(@yP651OV$KH zw)x!76ewKiHyQtEE}aWSt}3~KRGXf@Io!PQSbQXK1deip29{Y!+=Y%_yJMrcfQsaW z;QYZaS`m3Yz8X`bO}Jx>U}d;KP=r#tDOYSeBbK znhC=4kNer>1N@z;u{jT4<*cZB#ZIgcAF>ZkELC#=bXOJMlwH*c`jbU=v;lslWlyp! zGiULHMu*%sNIJ(GF-;KjO%dnq-BYh^c?pY?aCr40sFVTnOO_d`7-Ui~0J|Y#yRb&z z9^5CH-iDrvGXu*l;+lzlb%7mcB8JOGi@FJ-*D;|+yi|RmaIoMoXY!EqA z;TUuO0}WN;02lyxAlTda%><%l9mZnXR5**Av^rDUQq)8&EBwY_u}lp_Q@LL2t_Xi% z0lxH(<&}6&KWRuf?UM%Bqn9K!T>J<|;f6!b{r(BCUrucgQ)`aH|6nm*oP@S|{^1pi*601si7?vrojVQ_8kj}X;K!mmmuTXk5xnYF|ED==WCG^s?Tq z7;JR!Zk=LczhS{U@E@PGViN}u2y$)-Ev@~BR@I+Dc1y~kb@V-O>c;~BzeM6w85`+a ziGqP|H6erdQZdW-B2CIeI%`yH4;Zq4w_S{`7Z*`tISM@sS(m8y<=u6$_CY*V1Zmk< zH5#GzXFNhx47|I^ohc!~q5%=SM7Zu}3Xx=*R2qZA1Ce+Id@rMKNE&a*Ln*fmzI%ZR zW(;Dy1`d|doyJuT6eL5hHoz#Il411F$-ZfENQ#=#&3LDpbndG?G%?y`tr6No`0Irx zn$PIWzT`I^< zcr|cWK7C!ZG-1T-y+>o`#?(LDIOLB0P7M@f!?5AJr2CE0Tjr(5E`SQbk;Kn@+y?f4 z;F0*rcPv6Oy~(tpA(r_==BZd@grXwrg(Sp6hj%((T|dZU{MFbL<(2tf?#rV^cW>Un zqDOMhB7WFiEh-{5zU-{kSA{rjtX$5H1l&!iTK-~&LZPOW_);KQ}j4!FLKDqE7)s3^;O0kt&`{$65>Wj~}VBvVk) z=mOsuzIxuWc3+||vI!JmV@v}H9v@NXEVgD` zzTRtNxE`3SuT`cs=gzg1L(jD4bY(X;KFE*Ec!!HS0imQ`p(RGYs$iNGLW~eE8b+DP z(|ChWKT;jHe@`$@AaNq`(b!8upFltUmY zli~knO?j1(61p1EphuwD>ED0Yp=jU#Y?$sfJE0v0i-rkJgvM<9=L-LOG1Vo}xoiry zGj7ttRz%CrVzus;PQqbYr{d=O6!Bii(m0!j1$V68WXo?lw%Y_bEE~dnNpn(_N2-t4M5S$+413 z1xK*(CbG2=pwW6u4l`bEVNt0dO=;arsIL_p_}@F4dmpv`9@*)GvT74o{^z-{l&RtXXwPbW3uZz=IAy8{D2n1!F z7LUljODlSGQ)i`0kyKA%Mq8F&DTH-+nS#5SFY~1Dka%1>=Ne0L7OV^W&{+>q)T|MW z4c;fWMjPCnl*wk<0#|k6a*z$d0`yB~Siu#Z==uip@?7!WUBW)i=O7?htyE*_-GNYw z)U88#OSZ9Z`a_IFh5m#fU+haduWo*%pHo-bb!LW5R)*Q9sVO$oBi?(q&GdBZO!lFJ znpPO>8B?HUxMqiXor`??!E*=?+%DS<15%CgcU!($R(x#wySCapx5sFE5mnv!jWD z4V3$4kLEue%HH@-hnh>|AuoUon7KA4vEQs)?5#8HEG231qYw`kinHK5Wb66$2?jKt ztk1qV?{zhdR|+EXqxO3C4;ZsRl9JWM*6QN+#yv{ho=K!j*QO#P8YeN!ehEiTtQh{a zyDMDTT3!Ag@T-;8>*?}*KBEdK7!YP(n4c>&R6O$O6T5cPxnh#aXQ~i2lAe zQTSY^vXf4whE?{|t>_XJo|h>HD-Y+D2A%~P?@K@>BK4gVM&vw-9D-=I)y@l2msHH9&O;v~xZwK*ewQgDRN>l8AQ1G#7fS-q z%>`j(7hujmgaDw=!gCRoq~2jGMY@MGzF}=w>wJ)%FujuQ25t|5@b7L`r(&Cq5?6p9 zZa4t6rTW6>Kthe!u4?8)o=gBH^{uGRxo#IGu3DuIS0f7!qM42beMeYq@2tKg)AKZC3f3jbN$79;&-d8 z&Jf>EGVm>#mVKfXl2K|>RdHj(BGuwCy#{C4?_+9x3Q`Y$ zvtwutCx;{c9PK6eq!?~9jA4og*~PRA74Xj~jd z9C~Djd}cjvoP)wcXqzVL1KMRBh&~>P6;Tax0QvzX@{XIAzRJQ$)G^abHJB}R@c=&E zFD-`z$9Hn4zO<*aq8gwbQWF_YZh!JiWDsXtU~4*+=rtuejlET0-c{OBqcXWI!&@#` zWbrg!do@NJ;Z{`2w_xkP4#r~e8C&|kAq@xUI`jIDT#fy)q5L97JA%xjDYgB@^|7;AfdzAHlxUvXs$I*vO+c1|OAi0q zjfjh%_AZI;tO$)ceNf~aMY|{}IabK_gB&7`TO3b_4Qo}gMQWM-rKOKx4v_~@O$SVNxETx3{=ALqML0seWa-AjOLmI4< zbZomUsc;^{=Npy`68eMFX(Nfu7uoo2BYCPoD+IaVo?#k*_88d8txLT`j?9@mcmWXd zZ>pqC^}_fLDgc}}1iz6pJ?wuV_VGU}>$I`sK+DbmY)uHhNZBmcy96amr&We<;4@b5 zKwp>7w@00T7uC+_#X03&+!(UToNzG-9g5`rErKM%dN6Y^FCC9GET8=1 zc6;v{2<+qOnoGz7znE*)7)4AxvFGc_+0Y+`ycE=^PvG}T8$ZwW$Oc)93Vla}>(<=* z6VXmK)T(B?f3k_zm*UD@87)&Y>R>i>vkz8bic_em!uY@jr8BDOwsJ`_@SxaL@XJ5& zEF8d8AZIT`^F*ju}G_Y1FwmMi~*Ho3FQ(zk9cDPjo zu%jxkOsI2d$zk)HMgU=Db7gbv(gVx-Is~z{LoE_;(eMvmYi5LhD}I4=&Y(8((Qr&P zax{fx6R|B--2a|-towJODUu288p-BNSnP(z15sq?@lbuG)mo(c*Vxm@%CylD(&}F7 zU-D?xa#}_FL%rjMEMgB)OEq@Dgf_gE))k6kvelR+5>(c4&Y4@Sj|(ZUWg$zf4>4Ji|2YoK1bM>{6wg{mGm!L7bb$K+wCV# z_3F!3*qmT_lmOq*yfl#IBnno6K_0!=|`9prW)!K;OOfPe@zeYott*O&S8tBN4( zFY;Jq?!kr;*w*`ZuQ%c{HmW9f?P8t1s#y;E4iP(!IH9Bna#@QFbWx7wrExRZv=@(( z9ru}e+S&;|0o;U?b496CiEvC}$(fYIOF1;g;aq~V&S9*=@MG(MkipDRgYGy)5Hm>Q z;YXy~X}|aiWGLl#f!dSzzfdl|JC~j>QNW2wHheJ{cAh%PEfm%e^)*#COD}xzwgbM( zM@MKtEpB5p(?oYq{O(;$lG{6&B(xsvh+^>YP8U5aGnEE^t2fP?+Fvs|6|JIU);*1$sOpmT^4Yh`#UDJ)9maQ^a1vL!^z8ugIN*o-SU*|l=!tDk%f z${rV8s#rA;ODC`@3|_G}>1d^a`Kh)o&pC1jf)s8ZlSc}z5sopg%N1Pejv)qi#S-Oy zhWe$gL-7K};o=FQ;|d9g^}A5sHI^-OV!R+{)T}XVAkc`EB1zDIWgp*u;E%$(4LWOOb4M3Tk*sI`Ly1@l8oTvB-bETaQ zV76o+i^kl{K_vtvXc^yFAB3iiy%4N+)n>g|w-Tt>?+`Lb)$lFk)bkLE^^&#zE zD(Gd+)DWBW<#pn*jIe%h#AJOrM!-iFsBimjCZ6k#(!zGjY9+Vfq^_^vp0sY|OJ$Xx ze{M^0%_9#`R|?-amDf2RJMEtLlp-4HnZ~j+-KBgH&6G-A@5OQ`-x2A9i6$R_zLeVx zo^*KOv>i_87)^LS+e)eJ9U2-P;zu${iz!Ca1In1K47{5&PJe@}JpSC7A@(fnswfi^ zI#BL)x~yi&w5!u84(=qFc)e0_JD{{dj_kz*NrTqgWuZ2WYA_Nr8gu>m zj>TPg<#ZKxUvDT9m~*}^SHd=Zt(O%&&>f3)mfZ3k2)del00R(g`VvS=({ypWY3JT< zIl2!=qG&_a`6%23a8W^t=v$475P-2gkxgvn+!ZHsT z7SGbHec&>SRB0^ZlG!D;_nASd{wI}4(8QMXVFNW`7?BCxb-i#e!oZYmTkdT=ft18? zP}Q`a1@Z+S05Qv!&Ntvc*M_DfyNPSsnTWEF6PAct0t-Cn**eteMtwU2RcWE>M!Luy zdmh&1P(ovdky5_BG%KUKo0=qXW0zuKQQX%N4x)Sg%0$!e3KFrd%mf{LD*y;mioSa| z&kK|gsbK2-9^=+2pq6hdf7qTETakN{1F}jL|2VZwL*h6Zmp%)sd~e! zSFOtS|7q10%`Mk|TGeMx?L0w~st|HFqitA%%fY&B>?QkR!Wd5<3JLMC@N~rXc`Z+e zTt3y9qQ_e`4;{K1h#x>@y`{?Bt})v^sfi8$b~pPA$s}od#*i;}n}&2yicA8f%YlH@ z?Qy~{`%1M&Q|EQ*cQ>c+-TC$OmF-_xDxzM;hYj{fLbvCA&aY&1nH5}HaS<$tQTEh+_j zTa;-s$6Z)&g>juzuHOpl$|O$KhAMpqDpxGx zSQdIY=}9-=eUV!9V2$CWn@rMsodmEWXFHGlyY~Q(cjzNoQvn=9)&OL2gZW|3R7NwL zuvxxnpTpsInJLptvDu_Tricm078RZHP%gbnLnG7iz6< zTpm(cz2=#EYW1>%JbJd9y985}EW+cmW$nsMXA8?}a@hUFXF{vd!RNvlmO2Nlgr#H2 znTDrWGfT9uE%ZpWL$5zZ3l>Uq)4rxh!dzOKCrk`&;iQn^!yX|WK*fE__N`QW)e6Eu-iCweVtP{}<<`8t4iPd!N4<(}$E2r9{ z9&;Bc*in+~5jiyaVO?F}hTyYA*_m5iRndt;Rl;rPRl;+3y2n19-JR{ZJ{)`_@VO%a z*oR~MBhFWW^Xn^i*t#_waUxve>|KF?l{B(!zR{0B*}Yks=QJuK73btxHu9#AOYwy8#QWoN_B2~ zO~Hj<8oA|uoY}=4z}|uh=%j1V zE7Ys+n{bVzmUh((_q6Fp;6dV+=ULym>`5Y%67B+V@6>fn0SQF6c;omzg#k(Lo7F<- zf?z~%Leh^GjrDS2Hz|w}tvwR)aoCq3?a3s*nan?E^PZ@=o1nBGurG{~Z1Fu6J&1Vh zdlvcH!L_*8x*+*S)(%8*T-}9#^T(?Av%ow#%P%lV_n*+SG*{=S^EA}qU*=e&HN%E z9#HBz4?#|s3Nd09PaF-m2FtTE*0VdeKKJs955h~^{w+Ej)3V^&t;}Tk&0M6F&K?gZ zu~KO~R87gLt`6)0TkJs}o{iz4d<)Ksur@@qL;J=(4wqYSDk9I%Qz(%R2Ix!dX>P-P z7dXG^fXjw^YOUOdxC=kmck}Prbyozjqahh&S|Ier$qCKl?60&RcPJzVYP|R=(3Ueg zS`swoO(0!L5Z`^FJK}=vY);%Vs90-ERQ-~p`pK)fXKFq21?a-~Y0o>12iGO=u!9H` zL^vLs-Ke*?iYVSZv?mq%I22!OOs>2ihJ{zx~cV|~ih!iQi(oA{~GlhEFot$D6= zdd$WjtHOt3lWf%K8co2zZ^;{S>1a7Ap&qYE`iq?H92~hI@D2&L>0sqNo97>v_wfA& zeGkFC2Ycv_&eQu(QrE^GC&U>qgU|!$l2|$Q1ktEn{l)%rk6S+4peg;=))0~*l*9#F@P#=z&e&cnN&C(o_PEjT)j%O>? z2ziq_1W8{%U}Cw4&)M$@d5c2qJqxFo*6lMLro$IwAQ*Si>c0b&jQ0QC>h>R6P^O>l zsHV6dWZ~tt!M)a=`{6+qOdsDD@?P4mYPGXg0|?$o6?c$Y#9e<4Oeb7;PO`^-?lW3= zDdEmIKG_!6q11p^b(4S_%#12kENT54HK2%6^Z^!4jp_=r;2G_YRv6snW>e?9o6Jo1 z_eV)^{5`vWTW;(a|8EzNaQo25FvKZ>OZ!chcVN&7mxa&gX!X`jY|iM@`|ck#g!rznjhY=>2yl- zqc@JR%|x^b8DyGk@=p_nXltRR5Uaob)e$l3%Ymo%;g%b`+-@WDP$Z(gB1yic9`q=o z`9Iwt)6~8(H=v^7JoX)y#2p;C4h2@oSl!F-`G3Jvb;rFnQ0+KCPd>peK5N|>J;*lA zW+c}d>8Kkz75MM_P19w*V<&Bu)lOy|&$Bu!G7FmKq-gyLB}x=*XV$`P>dz6q)ap|u z%-W=i+w-F|rtWy5-Uausl^!~;(AzQ4#{8z9AB!+B%^IzMl%i=V#*sMpV&mUy>gRn; zZ6oCX(>-g!ozms+L7Q9T^KSR%QM8vsUAyEZ3k0V;3t}k}vi?>NzW3L2=jF8g{pr|@ z)pcGMvvw@-yKgK!>#&*i<3in;Kf++alX zsAI_SOU`*u$TIcnz>R-&+szP*Xm3cm&)EZeHY?ER^jQ>KuR|{Wdou=^w03P?HIvMR zHah(B7|ah-XB9N{^#*Hh$}3N>T&3-VX-^tv5^_~?`qc^Rc(XNc3dA?iI4@JQmdC?b z{btAEmCa^ve|K${Qi>Fgzh1{ZGo)WD8?U~ojU9^wGF#`5Q`JF}Ik?Rn{W0Cbi}ctb z=*sKd1IP-)MRU^0X5u+nEKA`?AJnh0nPH_xm&K#@?15J)ZeL{;%BnVYI|DQkX3!?* ze36+d&n7Xtk4(f^fg07{xY%(X#K?^8YSfOz zX%kT}VAF>N{?eM$LOFR)H-6LC5@WBoNdQI!&QkK6R6lb_2E-4L0C3!Er_RhhEWoea zSOM^OhLFr#Sok6b0T{gU8$dZn*R95TX{T;5hS9=ey;vT#|n|vFp0jIyl z*(@tAIhW-`UV}WUS}m(M11pg6)cxq{59jGsQIfUC$C-geyrc8eK?92fjs$I{fZI06 z>+Li78I```KNkwZ&R|B0ieA;OH<{3*kA(9NP0`q=CFMV}3U4K4v3KW+s?|Yx)jCgM z|2u$ZzUXrbD{&vk}VwCiK*7sW>r*NGbIKTnEi&!J2Dj;e~NUwdgZ<2*p;IeO;8IYNAQ+L2N~ZTBGp- z%(M0^oWjqUTo*Q_Af|PV>9;%;72s9#y*rMP2*LKSmx(2S`t(;&#@TN`7BQf_FCEJp z^;WGurOo2JE7=sYh()~T&g4g!!3*L>b@Glqyna$ktB6g&LVKi;TDm0YyV=5%>d04K zcAd~X3S3`&WzOrlEsd;5*!1aBKe3VJ=LlSt;#g7;`bv;}aptmT20TK0tE4Mz5uOX9 zxpYieW}kMNWv?<*y3opwEe~)`Wx@3`40+Sx*^FDpZyhz@a6c-4<28#$;BO`M0eWP{ z(}sQ2%<#VJS-38)6|dqXKgG{D#*sRE-$f#l7@h^y3iECC$(?QJSkjBq8ep)p#q%hM z_-qTjgads zPKj2P%iX%Z54=AwtZ0W8WX4Nx{=7a)M%1QDSrzF#KV%|@9S!TB+07KrOMJi^f@~}| zXEcBO>&TtW^w9>Q>Qe8M69zkmJzB4J=za~T#)Gjxen8COKE9iNH~uC-n`QlO9{$d5 z4wOw1$g-FJf|6MyDF3$}X8FGs zh%z#BF#V6)ZH@YWdWfkK%gtOpT5^Ujh+RxGFSa%48Eqa!k#uM=apz|se;m3 z6?IyuNLE(N|5@YADb1K zng09@u*vj~p0uLMYNM@iM&bg1Y@@mg6b;@3_``$Fnj}*!!h5Yz1!x3Tx?cthaW#mo zT&mQfORJrIp)-c;T$~NtQ``P9gAcpIEKByZs}7r^+aTsT^~>)5aF49|^ff?WbDPfa zn=i!Ctvf5iZ6Nh_FoQpYI=Jb25bymL?av@hu){Ly87jZvCbA0pcJzV*1fV2)(QvS( zc{ZR3N;<}Y)j7C4%PhhN!*qFpF|RyUsL+JEVT5*RV}(Q;AXBgF<8e1u zS3?NKD3MG5ZEP1Zo`d}FrpqQAnltQMY3713N$)(al(^^)g2Hn?z~d=(QlEVy**s zh#2ILVK3VeXpLd&i6E^o3s`Z?EtcC5w)%O{ZurvPFwty?!x++7=PXC?G-sXw%nriBm&u0U?RV^`{J}@sZP@Owr^kFIJ6H82>VJ3wFo2ZZg z{DA{ZHBgi%7~pgTY+tKOC?+PiY>^h=61B2QcqMP(Ik>keEY7Yl4XF$%X+KX_s`}s3 z8uwV|t5eru5gf2pX1bCDfOfHXfWSq< zg$l|Au6V%7WpkE`z}s$hiTgxb7d0S316^b>=e&}cVbZ)5<9ATDk{)SzlT~Fq7;RAR zO_Ai54bK|SBzJTB>L&E@EKpc3WP}nIAti$ar8|AxIFi;o>s4gl`C&_wB^>IXxU&#n z{o}q=j`m8A0V8_TF_#)e30>xk(raV_T7Uxhnvlx#If{9=t#r?5VWo9R=mNSWtRCQA zMm0sQV6y>dTO6?K`vsomJv5PwI?{N394k1lnj85z$eO)gSy(g0i%7o)w{obu00grTkoc#eX( z`?3u3G?l0Z>{>`+GW*o-6W3*%V^`(xfG>FCQl&rTWi<-krzcIvlj0_1rPQ&Fdohw2 zGotUKf|Ru}qGH^eyAqA&Q-rN!KI`T%#Lub8VU$)%_*rFv)q3M<`;j@QZr7Xf=`R)S z8A1tvGez`)0iKl&%Ow)u=BZ@u&QufK+N2i=9!j7mgy+NE&P*W*;HW_1wsP6ho)(cERDB%K8ySoiUYx1HEuRLE22Pb-qDm6td90m{18q_LaYUg~nMeCGZ z0G+z-=7(h53O;_;*&(3!30HF|B!DrdfG*m9n)tLg%8y;MP)l?Wb%^P4hY86`!4jrn zO*xjYTN@kBD3@%Re}7c9GJ79bAlEo?zNgu6W$pIxE2pE+P@erozkUT|6;I@YQ83q= zCaX2>myDb7n~K}8}u~oOOu;-NJ}VE#CPOylZOI{X4;dMmArF+Xmab91{xH zeCi4LYq=ME_8|+T$9hYAuL8Hq_uhxj!-WXh55Lo6i_B8=4*A*V4@hPa_Mj@`S$&66 z-!lr5P@RNGU2cSn;%6(v0`=ErEgWiRr0^+{E#6I7>agOJDH&UnIxQ*6Hkv|NBQ2Ky zNdmoL-tP(?&=*jd?QD*UfyXl}@S>x1iK>%3u!yd!izpVI_(L>85}trpoBD;Ed*w0zT;X2aGp9lljixN32z?3fa>YqrrzW=Td|B z63vW759bA)bA7|z9pI3eS| z>*wkc{r8Iqu`B-gF#Kk12?T)B$ZLdWtJ7dq#(*a$*fpKL(e<xXJEcM``=?A z1IvG$MgH&Ud^VQ^8)oTD^g|8*m2>CT|)zz-){#F!O+*93Yz* zq54Esw@8;o6}pJ;yT@tCzdF|Fr6eTs(Ao9cxA8b|VUOHGy7mj~$l1mF^`<*1;qmT%yJ4=i3Nl6aYY7d&7?-@05uvc!b;O1nT3M} z+xz0+q61#!Pzmu&rY7uAMS9`GC<_>R$XHGMtRgV}x{NS`(Ry&fyo6Z>NGrKflNGR% zXhE1!K{(bVvf_L>$r{Svd6dq-ahV|r{_M|82U@+Jvhx6dQC@@D# z0tNhhFlo5WCeu7!Fj|7&Fl9kt?v-BsiyAqE7fNYk2T|;75a$e1ca-MT90t4P51k0- z6raJ*VT&4SXsRLtKA^j)MQSsI*#rhM+;0ldYNp1;z!d|OJ9$AzhFPa!I9=@ zTkl^leUK&%wNC^&z9hs^g{FWK-+ji1(T#S3$7-4U`&V ztHA$U*%Ta9ie})B1C!{TA~0>iKS)M&-p$r)QzN6=IvJ-}>|q4UY@MP{&!2F$DQWG! z(WEmomK?r216eXokAF9r`JC@qPhZ-&4oiEkz-HXeJG87!cJ7HI+ZY;NJy!`K~ zes*O{Z`!8ZO83|m8cNP>+bz`0qe?Bv*6bQtL{)5yI>}ou@{(WfK0Wyxc|xC zX#WzfO9<9OAB6Kada`|X#1Y(>@6-q3w`*&2@T^Mcx3d6jnL)RBrRYefY3Q9VDThpi5d=QG zhBnk?##^P4Z2hxby5ys%-K_6B>lX}vO?iLNG0#g%Qq4FnkYX)dEmb0`Tu<368{&^D zVyi;7mBp|!!6@jINU4Dz<>InK`In?NE0ert#zhG^7%U8=OcZS+Q>H=|T!fd-g_D4U zAEvwsxHUqicD6gx(uFs*$Axfpe;0X`XRP1IL2i+(o$Nq;`v|wz;-_n<>s!h7!}4#J zSJ`*c>vNdfM&F~v+xT%c9^6~>d`9k{E;>lU$19-)qU+58jHs>_9p|l;YX_ULqtgbT zYgIm3vq#$=D&W+07R`m>1iHZd5nAa4W>;n`Cu&~l(h0N6!Fy5c^|6U3XXAUw*{EZc zdI6(D-%at0U%S%@f22qWISBxof-p32s1qZQHgd9)d_I?|mCwA>CL5-_Hu#&krjcSV zH%~Om^_6kTTy_xwoc$yVcoUI_oRSn9X2sK46QLF<^ShGZ_-yTke!L@oIvtxsYDrRz4!^0db$oZ*jdMt-d&R_vsso#AmZr~#nmCWfGT5-DyP-wD)O9= zOJmI0+)LvKfwqd-IY|Hp2tV$KmlI^9o23R~&97V#{PFuAo@Gjn zW_%zeVji@J?@cp=$8mQ&P$T?2*}h^%;#jh5*`~cbSzyOcFKmBdKep88`T$uz6~RLQ zza!^Cj&Mf4o{ykPj=$hIL#m;eap8AUW3~&kp0kXB6rWv*G+%b@I@N6uOMG*yJH;Hh z@$udo7j>&piU;d*1d6@fA@FKu<8=G^b4^_IVRbxhSBT0BaQp8#99bCZp-HYwaDer0 zRWApC5L`HQovuY89$kactW~b9^J^|`8t}hOIWc>)dwE0!L{!%?;MS7IzNHHxzXut( zjE+6b;&N*_8yne=cT*mJV;{rs$cYhBE9Q-fkzGpwYDo{Bpz9Kx9J}OQF;{r;!3D+- z>D*GGN_o}jyUqml3|;dQVl?1arE;o;P!5$zunk*3_^4DG)swzDhDPVcFW&`EIl0_p z#kI;MRUL)VRM+a>%%9n-B~y_U%R7Cap&AyuVcbX|T^b2$XxB0V3>!(-Dx@Tzpno5x zX^-X7b;8FOyXwsx9sFRhUZiOkHEMx@3A^UrBOJqn@)j`DvvOZIZVedxaAZ5fPBurG zKqRj1f!ZUGs}@9CUUg}uw}NaqZeOkkTE~-Y#>)_7hxL0_COvm<;M+9&fb>M^-T!+Y zV*S6`L|EwA|L0n4g?lS@i#_%39g66ao|a%WJegb)))?Gp#+WptIBEI?peHCqvebsK z@i_Ik_Qm%*7u#f`gm@)_tHOs~;?YqYFBcnLBBbe=I-AGiekKdt?`OcciFzC247}h8JNLOxIUcjf1R!9xF#~ppeWEFEy50i?tRb=htL5|7vf%lvuhtqS1wA77H)zm~;tNj@RE0Z!fk0oCk_m6} zD#F(~|EOS4De!gpHDBo*cJwur@ke5(8>@4f?4ElU9aDNuxG4lKhiXLYjbU;q@zpG^k=KqMQ03Cr=#}K7asGA zuTRlQqL=$AOLP-eW!I%wcF(@s3LE6rs>7LVzN(QtLK=Xkp5K2ux=dkJXYV$u*V+=K zWYME(kUi@an+)>2Z|2H&rFjd7%F$Jqjsp-m7)wM;x2q|2Td==18~n|o47a0u-G0_D ztiUzJAGKNV1?gVxY2|>D;c?&6K$-Cuund%7kD;iW7KcE4NN`D?zQMAxvSZQIS83I1 z7K?tyZ>`qVW|v5|V)=w77G|mMT1V?6(1(q`AJq)yHCsUa0Ay?iP9aqdI2-QQZ$uiH z7;O_Jov~>XBYKHGx8-VsOH!AI!^jC0ydPBphB$9{{30E{L<6C$i9B49%WB7(<@7f0 ze6g)(rw%YS^vwq;bc_)qL+<>?=pXOPF*yK&ODrWMG;LTJL!b7yeBj}(s>qmSemBw> z`~tP4KAuusaVWT27Q_mpSW*L;9wl1OAUjlHbiXVrpXz*U8tEL|zvvnS6VqctU7Kdn z94>z``ja3LR6EmI^W3|Xx>L28oZ_a%*v<;q(OC{vAnPr<#tq&8pEpC^?0++J6)y|liR zzO25?Ud}+O;K-C-9<+i!+mBnz4{AjwGFTimA@-IkM@&I~0EK{axy%`^W`SJ5KCxS7 zOfx8QEjJ!tm!=wX5FU6)gze&stne$D$9x-EHE zL0w_iWYns~`3S;X_9AsK(2+Im0?e7*9sDap-Dr=E92em)0&)`dIb$n3?EW50_4O{vyU^rw}0*|R=HPIQ4e zsQoB-wK=t-8x00V#jYrMB#a7pyRJK9v44(R#)1%qJuM0-hQL1y=DfrrMFNq%bJyZ? z({KnO8)-=;mvD{DI1`FVk<>ER-+zdc08fTr)8dumMFqtt!XPZ9MY}U1Bcclpm_V)Y z{A>$70EP1-h_MD@9sZ)kL)j=?5EbL#s8t;zm&SK z4XAm)5MXkxcz65#alR-s_tg5K1jCgAZ0U z0Ml8CSeV{=cgBHz^Y8sc@tn@SPg&vksy+g4PNg?vS&erIYU4*N4dCu0&|m8(g2YTP(lnd$*w zs6X)0ANH(*yx^o7S2MYC9>c+Js-m9Yy>iox58H^^4jr30XhUV3$T?!HtM6D9@6x*; zCx*h}m=^c^{x#Rdwf#D;p%LQN{vev!k9zTFLD4zASe)DMCBF3H$n#b`FP$+BeK3l7 z-mRnUb#^~MLRgIZHFcs;R>EJFT5GcT7^hKm6f<@13A&xr@4y;JPh%8#u@o)&b<2xc z)NaIGGNbHf7#zLpakt4oozp(PlNqA@|=$troD4C^ zqq%q_PV4DFZ4Y%>wOhO={}@a?1AemMZuD*BOgyKCd_uNsZLcIp-ZM_v+uYue_h8@D zrxtyE#pW{^axt~e)7jV@-GoZ}`PQL5Suy%}fLodeAsF_)!*#=luorFJxvE6?iUBP;Y}>rqvg}!tZg}Hl z$~woJj_thfwuEijp%qd?9?};|oZt7*^1+hNf^EB-*k996GHR|NdspkklrrmV{M7h; z-D~L9y}oCDH$&%8SUO(T@?2+DYT)DX3UYFH>f`f$Sp=bPF~mJ!MqaVZ`e$r8X0?IO z)_BuB*`?S`MSk_Z!ShUE^x3|AhbHOS>1d(7>*0B3@@burN6nj^Dq!C4``ii!2u6Pm z!93BD5$`i}b|}speWw1Vu#9!JYCJ9hf3-O}P_v@36aS#}2H#jCY^C!b5aH=In(EQ@8U7s$ z$<=IRuODOM8|c8lKlYjbL&D?&c$N50HM%F=tMK7^c!QuGcc7rylg}nFN8#;(8uC{$ z0w0Da1zhMY_&mf<4`l=6|1tKCF`{%qyXe^V%pTjeZQHhO+qP%+*q+&A+qP{RciwaV zoG<6*<|LhTx{~f>rR%AxwF=Kmw-7@Q(>oRVE%IMKg(xGdnnS1Wy&WopATuKI9T=U; z2!q?8@|D)CuNo>YY(&l zA?THFSuq!NB$rriJ@=ARXZFaBtcGUfoKSO}Y;&*lHQEJZb&l1QUbGr4L%NxJ5j4^s zV=i|-e~x`$-j4+Q7rV{rf43N3fF<dK3V&YzV zH{Qu!#lQAAh-(~TUQC*k3(a-88o>X(t~tdzD7^U4((RKDciTQ}fw`!mWFlX)*R=hc z!jUIWzD)BtsK{v&v}(Q`d5f@x%1uS{)@elO0V;u4Nt5Ir#I_$2ui)>c2^=4K$jq_+) zr4DHxrFvZfo~SSrgI+oqEXrySV`wATe5gP})n4GD7Xgub@0;J;_%uzA+dv910DD~- z$dws4$D&Ex@Aew*d_)BdFh zskOqoFW|oZ1)Y|4*7Gi2Sc_uTY|sOA==W?%%VwT;cOhIUDq(`RY64U@}CmPJ#I*@?o1I2}*-+nsSs2VGwMz58Jn^dQPv}$05|>vJSn9l)vsVbvT-su9@qg#w;AJQP>} ziB&;SU`C-iuM3>Z}Z}0{sWrjS23h9irJBiBhjGX+DsdC-kO5qf=2bXR`99ogTg6Q{iFU;hmHp zd$vj&mp&5RH-Efib#L{O2TW4%Zo~*0xOT@S>H(=%Vr>%(P!JQqXyUZm68P7SpSJn- zag_;+#3&sU_37m@0;Z0b0P6k$7b`npUNEdyF%livAfDjmEu?|8oV)|o=EhrM=)T5j zMZq5Tz{;oCIV_yD*%vAEq*H8MOAl308v&yr54g)q7>06(j~x1!-iDJ+;_X3lFi-OD zq_PVDtvX0ALy{WzS}#*LmNVnGw7{gjfi&)z0M(BPIl@-0<<-1$h+)qM`5@>pJAP)Z zV5hP8ROavd>GbH~vX5~Wg%eY~J|%WhV0wyEVJH19b<=)n1~zEy5=BOVb}l2*bd_>L ztp~I5D7^une?c;&!c*`cbnn}xgF;_S_{NhLkzhtFb?WR2{C6pO`U+2Hu!p^2nqJ&5 zZyA^`FbtmSH`o$f1ts|RX;@Nh<2>h?kwvaQVF_1+!Y>7AKeNXZkRlUj72P8tk zoxfPg3kyo)%SiG}qE;QL>;Ozl9`6!P=|e{EP52h}A*Klpv9K!xlH*RfRsxIMY5B}K z&jBk~Vc$wpudNXO>I>)~QxtQ9UbCf;Jv8(Y>&Ql*ThZo`2bs@2_O+74i`(|bCPM}yq?*yMzku2$IiB%bs%;M_q&Kyv|?O9qS^_=Yly6yG#!ZW#;D>;)&o zU%k#cyQwNGw=r0bgY5*Oyb4L3{b)!=>$$6PBu94c66sR)({e(54QFa){U$^~?|4hg zZ=j8@19vk`%dKLs&Mjg37XG%2%FAupvZ!P)ugs!7lUs2?aFCRn>2^&1v}DRMd6t|y z5oI+zI%xh6hQfXbfu|yPSH!du!WzyA@`q6B3GF1;UJW^-Mi6Kz7P2H*%v<=&3EMml zwHA2MN^Z>lD@Df8Bf5Ef4%1yLKQ{ykF#&y3M({A`=L< zyy<|s$YY(tQnWe;i0Y1brgD2+Q3~C{65zSob0;Y2(=1DLKMiNyPnM3Y*6`<+2&!oM z*BnYtwHdM*$i@|EKFS3693ii%{tezNPI4c?#1kbF5SnwCctjEiZR2!@d^+ko^EaYu zTjF3wqv9*UVU|W2D*&eYf@c2(olgjRkc>(xRT7v8^-B=k9;Qm zMI}F?_N(GM>qR>lXl^9gB19#k@^|2Du|+{LT(P$_Ueo6l1NNet@-aNVz{_(8oI$0a zD1=X<@@q=U#+F!Tud`%VwD$-|T9?kbDeJ|B4|x5nGVXUE8RLV-UXf4_8$!2=xC^g? zw3~1!4UFo1jTJENH_x08ug@B{`t-Qf?f$XFc7LMfeL;WML{ByWJC9Kw$ z%UhN*mVm~a@t!I5c`MM!_R(TLziSASc%~J`PsBltvGhQ8>euN50_MpVuJ~yy6Q8-` z1>kB(U{PjA10)%Z4MY#sG|LNjvT0H?HlkP~dt;n0lLU$=j+WmH(1LnjHjuj*=<$gc zxLQ%l$&&h(-3BXy--}*BsQ?&dN)hkB)~eG;ILK5| zu*m8wV4AslGz~?Hc8l+itvm7&*()HcECa7_>HBN<9lo6Yz$|~zTGJkC(;2@hV|=5G zC~oyCpy~1a(R+3|_qwtmx4q)IAPY{epSo9@XGew)_5=KJplb~g?Gg`p8`B%!x_q#H zJHRMp4KTM@8)Ra}upQR6+^&rJ+6xovV-Z=;EJ>G^t~@ab#o+2|+e^>f5Zk=4pfk20 zlFX&70u|M`8U4Jm+ZsUGk>caPki27V=w<~rPvUQ1ksfT^CPf_?pb!LS#p)D=l%-Hw z8lk@<0%|Q8QIk6mB}W<dg_cmM>ETa+J(H+pFZl^p75?&tiV`8qQjjFGY6Uc+z z^owBKC8LUm^>A&)y4G7}?3F^@JPBK3e1FNlT&t-QX6Oht+3oBNmfZi8Yr=k*;wki8 zdU0v~4HZ}!@s`)xuuBdJZc2wIAd4~y>wG{i;G8iy-d49|^Ct8)$O{BQwrWyb= zb6VV*MM80+T(D0dC3ac+UNDEz7ac;S|2JiB72>H0vzc)z(={kiJi4i5bs&)1R#XZx zIH^Sg%LI#OxMru+*JWy)<8%hZl`1MESziG&jgcfu7aIcPrdPY|220MT(#&IAK^?_G z0Ycx&(d>a7eJ{jIw?8TD#P$oxP`=1^fM+4at9zX+2?POOyT{LAp}+dYCViI_$z&1= zj<(4DSVz4oE1||`R4pOo;@EVnKkrFcWQ1xZ+Rd5u5m=Q%0d64jFh$|d44lYz)p8PQX{72qviUE^63YqBxK1RO&_a1wJVk|+?s zmuQ9;(nXpd4k0d%QTQea&lOcKx*4nOa#tLrE$eLQrU2``*UUvixN;DAPtf|`)dL_x zOhLjxXNmz8yHoQ`Lx%%I(cHAXn3FAQtqoxscA3L4eW3c~8%*S!4%imwKlB8eo~c_3 zJFVa0x`=fcb=8~~n9H7zoD@t@Kp!dI(Zr@rEFbc`9LMh%YCQU4%+Fd%GEL<^4l*rm zUZK9$)^cB1sD-!Y!oGl}8yxWE5OdW_1xt0@CN6KImcgN>g7DsdvGwDuKGV&B>imZ+ z5`e_(*y-w5V0QZ;=Tm|pJC9P%dGIxA|Hj}AK;j-m9dtB>1V#w+v}4sC-7)W6%CVJG z?+YihZ+T}AH5hxGm3c{tWf}`-OXBuE5;yMOi7Ss9+7ScoO264sXC4wMV_jMBFp}MI z%L=%qk~&aA!D~Mt8M1EkxNkATGJg?S2>MwISUs62}XyH#iSPVOdDw;y)@e(A$_;DQ46;I-l@hl?xWv}HG zOai*1Js6(L|4}&T+vpmw&sT3=UZUwoo`TxZosdWj{}E&WMl+5-Oia@k(72}$aoc22 ze^&@VCj;^;sLJCbeSKL7KTE6Lb{Fmw*Pb@8=i(x}(2>ZkyapR@I14{3{rNLV9cLDb zZh>U#am|zjpo|@~jhB*fC~R8usA1AWSJdDySHg#1i!kcPP`B@{4VchKpEQQD(vU2t zad#Hc_FXQ67jv-<9@vjw`u7l_h19aejlNVd^F8i!? zLB{J|Hk~H6rFQVx{7jEbC>CXS8{oHH5#zbF^!Oa;_Al5@mbQp4JCLuO?FiCNS5HT- zR@Nj<{Zb?~mmckSQs=Vq{?R?5L?fGjz;9sl(OlK*m2ZI$qY7!*XxJn^T-_K&#P^g} z{fiCLqRi+2^d}7exBi5go%w$9^w|7Kp8zdFS!dvxKmh9`K4-U1ZJALfmmBq;4;pXm^1hB_ zlBXC`hX`}GYD~Gs%_LA7)$iA?9G^Eng{s|LbX(|rd%n)r;9FEEiC4@)8iP&7FayXT)X_rqWc|DGr-`NVKD~PhK|sD2P(&IbGLgAN2=xOH z_?b+(ofZbCJ`rLYcWi|bKpu9U->+ZL+32I+xV+X*L=2>&VsyP5hDz2gwJg~@{@Gf; z6`X4~j;CFEe?w+F7(M7GiBhAkEuI!p1VsUgVFw)G4UWd$!VrnFdItlA!NZEh66q5* z9K>%+e5)aVVt?PEvl(Db{X{IFl@w&O{P-J^*4MP8m@JuZ)UKV--0~aUT%#}orz-zz z(os{{vemA%ebG#N1`}^`(Pf6Q5PQ&LYawWYF%n1DywNq}C?22fn&~>IIoH%$d};Ts z$mm0j{K2D0Am7>mvo={AZbDnn{oc6AT4Yg=M*GH zJIKwhlBaKZAKS=5<7vFW6@bIlzP*3)UavNQ*)|wA+IT#07ZFOZUu!r3q%s$VMtn{r zo*n$(sl0IAsx1t(pv005+-ATiB!{0$Q&xqRo%M$9#zg~VUTe!L3l9B8P!|Q2`|{J+ zMw&Xr$poD07E9M~GZK^F9Q|J!)nW>WOeig{QuO@gtrMCG$LcKAjvLhhfOe^V&^Um4 zu4G{pDF!-cBgxItKg(sEx~NOv#i$!!q;yR-^mx%cROCx7H9xpjdgtF!(dl6idXi$u zj{%xiVhe~!%>JE}Gyyr5S~?wOa4ML1$x-&>pd;wma5Cz;Aruu^zr~2uXewPz;dtgK zrNWujg(gZM+^#7$t={&Lr5#lFrb6RHbaKg~J$o`FU= z?!731pm&G*k?{e5HoelF`rEb;c_!OsF4ZnVE!Z_PB~1!f;(u)p^||f+#~~WVVl;6Z ze}@4FiM@k#*c__44pjE+yzJK2JElYrjy+HD9fG;Ffe zxT~q;g=0EjQNFM^mpr{sL(X|EEy-qdQLW@pZ5s6ix)fQt-gDAzA(MzD?^Jk}UaY7n z93H6vVn;l@NsDNVyECsz-+`b2`Jz8UY)Rkko*mECW{pEje$P$DowdHzw3M)+Nl60h<1@;IW#YUsz%TuX8S~kC%W#ttU+B z6lwVW)7QO66aso>g_OY&JFVk`r*;u{ru82C~)w4|I&Y1MOGC|8_NCeFhH!WZvrEb#7 z;PFggH|z(-5}UV>mNHVt)E%^KFS~9gG7#WvOttJ%F`i>bZBTsix2cSaDRxyaTO2@@ zq0i*u!NwhtCRVBpB$vfxcZls93R!xER=!uOn6Cf(w2N9S4YqKATE|Y*c}TQ8p%6) zOOkZ!$Lwtt?XE$~0a}MuA%Qb$b@+w|<;(#^1N;$%uUEZ}?VU3h6@xcDq-jPPgCiz@ z?%7Gur0CQ#;bZd`C4&o~wP{PhpdJv$Gql6)_#Q6)-eC66XA$}iM2G&@KLVs;4=+{iFv$E(0VLc;sfxI_q@coUY1E~w zMQNk9V0DtWTBLBX$*pbZ=v4EMwV69Cdim8~m^agKcpNEIVdjFV)mT!tQJ>1#RHu4u zEHHtAbH;3`rg${EXsO(+C)GM^Jv1zd0A@A-d-~#ZPG*U8Q^7USI^2c|^OA{Zsi|ye zhciDgL)833`b)Mlf~Ws9;ch7^M+^-DdOAcd75ZFqPjY3Zh09RUdm9dVulo`IdBM^& z47&uZ3IUcGUJ5r9>;Q5IN#o(sAqlggvfuBja*9THdOM)HVx>d-C}F=h(!Snm%Qiv% zpgmi%h@U`Pb2paC`$+hj=l0~cm)G;+^dd62sBR;p1Y?O(VNJme*m<3nv+<)n{pQjOS1pi(Pf_@hib(N7;`>$J`zi2C@R7FQH}Y6VlENrmo*WD8$KcG_;; z!3fULVktCvQ&~u*rP7hM(mwh$e&xgxD)lj&xRuNW-R-}$jOIxXz=~nP9z}OrqC(qm zFY`QdUl^8S0#&nj*nsCaEVOU>1NSrn&B!Cmq4~|eF6gOj6pP&e-vp4|G$xRgco)nc z)xEVmx4P#JfT)lCFb0T-hztmb#Mh^c^@f=n0K%aDHg{Npq#aU}K#VR^91cWJ0jfVw z0Fa~rKng0itBcNNw^Zt#wSlElH7BxReaG4e2CbTwdyK*qU=7$A>jf(X9@=!*UCdT? z)UyPH77Omxp<^;y4S!U`D(2US2Ym^Yjh^rhK z#Y@Lyk_n8-Fp>&vVfD@fL9+L2qOcs6G(&kl9mEO`@HKZNdk)l7%A2%2qxQUmLUD}cd!lv!<4BGh~u3N zCzMyHF6t&PXB9OqtqWzud2p-3Y8lV3y|gEeE^aAgt>*Els|++piw(aCt9ALeEJ}3K z)_Zod3D`sPjLJ%rR(72u-)ZzeA{UzY4KT%%8yb+8wloW4*E8oQXirPaZ4#zA zm{}Q6_pfU@jGy?e4EbF5!`fBq=(fLxqQ=0GwgdOnXk)F7dQOlZ2=(rFwg0x#!(mNm zvfXax=#pniLq09K{KdxPE@%Y{y6E$X|CT-}W88!(Q(2h(Q{9ofCqTaWrls3Zr5~~u zfPHh0z4^W$z-aJXq01_!i{?v*V^sjpN*2~T0Gv1e=2CmD^Ma?Jp)|p_q-a;e@VTn- zOfQ-4Z$w;r{7P?RaXCE@@^^k8PqxW;hKf{a`A(9bx-hMX4<`=0c$N#7V6=8s!N}ki z0J2gMWup9w9MLh=G?*CAqe&k7YAN!K>RiK)0=&~qy6opIsB1ljfgHv2H!C7`dzo#b z>m-f~Y1}<_<)rL3B=TVXHPj=*v%SLANlgNtJ#q1!4y9Pma9!c-$BSY|A7)*f3l#T_>2Rd3-H8;u__Gb zDyo>{Wo1no!GaB@9yNMya2Z2sp$VHDa>Jt4!(2NsoLM0ojk)h1=#m($mjnA^G`Q*j z;!INR8muQ+-dT`v@|qZ5oa2&NY^U{HZ!W2DI$BS7>{Y)aNs&? zHLMkBj4M9QP73(XgH0`j$yPR`t0 z@5E-CecR&x$imXwp`zLkWT3djJ9$H<`6}tOwKr&G)ta~^cTIxuc9JwNhuB1W{&G+c zL5}O^DjK@%_!|?OB|r_Pum05?A-4A!c5qP&O%7<-DDd^Z4>-xe0Je{uNC+W`D3r%M z*B`s7eRybR6=e2PbWO+f@(gZXj5agWW#`3kKasKS3Be@kr@jd^jL~+JG$G1S@Wnu| zAWn)eC^5&woNXQu^~Pzdxn22)1p3xRMVTZ-e0vdNjyDxAvgSEV`CSnS1oyh)shvvM zhbF=5vFaTISlWZ*->C=zu+YTa5Rn@uzQwhgG=`B51#SN_uyd zIeOChKS1iIHC#xgJv^R!;3~)u9sZ%HJBJr=G8&j7PPT^a?&C^pgRE8BXXu2PH+Gq@ znfmIDm6cc1$paoM7+!dlDP6m=b}G-!Q891za6*tmCnaQ=^sJguts9mR&vU_4{DL9;3_q zL{`U%4iIfo!SQ(vhAU%*Osdh8S$=Z2i}r z=GW6BX#bJV%rI~h2OtvQ+a}=83c1h520w1$L&jg~LXrdf^MXSwVWF0}rL`TSfDhmD zT(-4$!^U(NFGM?p?gricdNQTAv+eyc94w;WC)wft;Y-k_$8MCNVtD7;<=*;fvE#jJ z=Q>jB>+>-%#kalv`6Al;*PGzJI?=-hy%nDA#`h?S#rtj`kO}p3U&(D_KoArmjPlr8rT{*o*0Q{tvUj+y=w8rfF8xR!GAC!m2 zypu`cklvpmVN-)$t2Ak1jswL&=?URcQ=}Z9NL=(AU8(7}`;(h)Pgv|!K3g_Cwn}H8 z9?n6p9v`B%a|zHauAAL6*-g9!njlXSXx`*MMTQcabT2aPuhl-C9egDhFIjQL&Mqs~ z@c){&j99?OsuUWnR3+3K+q-5Ey6yOiqryfuKD?K%GiS~mw^;vu3k7)gP~NsoeH`f3 z=ya@hO=LaR=(}YLlRyNAI!*rZ#LAXXijMqugV(UNes$oma6PY= z_4&4Na^TbPVcYMXQiizA`b>S;nPWUpw(S}EM)}*q2Lc_K9lBHn0?8C6iu3y&1^kF? zFlTQ`GAy*GewPQ|)3Bet&;dS%Bd-*7#1>9N60=GkCE9wIG;0^*kMpHU%(;HgL zAM&iN<2DVOB*&j@-lEJ|+hSY#t3Y0qf37LUvsj$6fGT!7^-Ky(Lyd4G{YsL8(im>%AC3tME#pMV_PU$8Rjz&&1 z(d5IaGmNa^_JPBzKFkcGu+}_*z{8@_mJ$I%j^>HuFb*(*0~Z$L>j9E5kSI9(BnA*9 zZ$E8GD($1qNg2Bt^pCP&Zm0XkP%aHVPVSr24Xw9;>J_pT3*REX===^jJ5KN%>^@_HhQ>E~<}&szRWiBZsFW)mDKHE#0| zkfCxneUS2Pm;kmnEL}5cN_aYUX_%SHEcOGTpd@Q|URTYRpJd~zAF+ErKiZjh^49D2V?1z&f z#@ndJmZ6YT2Kd7I)^;Eqh&39!A*0dMo;1-`gq-FWRf@;~-4L9~f`joS`Brm;-Z;^8 zZN+djv*iu?(0i599lukjupcQZFi;hhBhVQt24@ykMkQ{#_e@W8;P>O9QxqWc+fpXu={) z)u^MPn7jt-91Vm4O~@oj#{o^?`6z=gCm3j?q5=kMyh)`{lClaDVmAEmBAUsnu`da5uoaJjbiR>1{y-$=??v#3WIjY?tU+gu4pfe6B?Ros!XL4ne7d!8LJF4 z@?6YvcEl3ZxL{%Lg~fRvw|BZ74Ku&>Z$}wSk6kVLf@!*1lR8USXQN#vgwQJQCOj43 zl=JX+F)SwLDvAn=lm!Y79tA09#Z8)x`k_aY9*zT)5MVVJs@?7s)oB0OkYi8|ImiKT zUH576{-u8YaEPx()~^$U067g;_meS#1BDuYRxg$Ix^3_b73a&3vVH?pD5Da_ zElz|ZuVx$9FrvXK)12$557Py74#e}r`>$>v@_Sc{OE5Rwjlg3EJa9B?;suXw-z>!< z`z3H>Jmk0-yy| z+tUaL3>h=ROuvAmCe2L@r`N~B$r!_R)MF-9VXI$gF~cPOLdzz7?-yEbk(R%b3S$ck(O-LD6~B){pdax5BG!s%&eWld9{xj{P(i9>{Ozt7S5Na^$Ki_eU)19 z*w(>N%&3lUR^8-pR^JRFsZHbrLMdwLgd@ph&ZUx%1QOrh=g#pzY}um;B&jK*NO;i+ zBn5AN!l`2<&6a-+eF247MYiFB4O=?3OJOp*Mh zi;p>R2sC4nZKY~V(!I5mnymAxO%M)XV^TlDd6~Va4*(nG;-wYJ!C62UsvaG6^!HiP zH2-R6sf3D$u2(U#oBA+Nb>nt;Y?E; zYai>sZl=Uj@Hq8ec$0Ckf6cqKcP0tNkAah+=#-KQ&!L$f3Mx>o_p3*si$tN+XZ>w7LZ(#p|NcIT^dJWhVxKqpFEr{G`+LJ7 zOi?%xL3tvng(v@C94ImvjYWu)GyN(IN$P*!Fi3|y6gC7KLMODn|18tHR?Ble0T!T@ zy<%_4gMYJ+VY8Xe`MmP&;lRdI4YLdV$h=u^$az!p@_qLf7fa1a#n^Ys67ry8Nvj%v zK6$eAaVZ+1cyLPf>UeRCwW-m4s*4t(c%`AjPR(yS(OWuk{TI&^JJ zjF7(!#bci>*LPlUOwJE1T5aM62en}Zp)*61@mG*6AVv_r`+ zCLaBU%g_FLKf`%x&J=L2M4ikZb*U5(DLF;cqYoJ7DbPa5hmElPkgssoD#sLV9q%oQ z)ox{AxQw#h5+CtD)*DX{R0w`f+!rqZ%=B^E;Gb02!)6L1N##%%0f8b;49YRcXW(~a zeOxJ(XUsJ!BeG#!e|Nx`Nx)SnIG$e7PJZ6$`{elk96Vh%)$R1Jy&Nqpj!{ z-gA0=IuwTcoG(qrN@(yO(1vxF{DiIeEqDZZI|8$1)On|E)3Sbl8f>O}T?Nf!_v78S zY!##DLRkf8J|6y@c(a;M>YzNP!}5NA5RDSu`w7k&?@#THbX-F1Z}!T@X>9{-3}V5+KO83 z%l@&kwAOl)T#4M-tBp;5K1^O08xm-3ICVV8wZ{@NQ@v72EIDH;gq|{kN6&e$sMI| z^j4mpAA=XZZGKb;&RrRgj-%MQ-rAOyHekeXrH1cD?do_(S>ZLViz9~;&KbJT^i)!` zX>S279lnIIn?6nf@vXwByPvX~osaKF{$_n@sCd!QL5YezR6jSzX}#Gf_)o}uGjgyc*QIB(#+BL= zr~8Xd8$U#xSL-Yb=82<7G`|~dYw(@;c<1OpeF6K}{Y~Cw~&%0%uqu)Zt7lSDfMX*Ii0% ztc*SpoLxaY?PwuoQ8-$zww9^u2TwkTyZSpYjf6b!P6Y!!j=TU{-_EBynYNdFrvgci z9L#!b8ME7t9dS!2p?DG8=vauZpUy3ikiVzM+!OM-_saDCRsF~1*q2Ji`y52TG$9ED zhNy=hr@~bsZ-j^$34y8oqX-PQGtmXefXK|hf}sBdb8yrJ$PDzAz2C$L2~6QA{iBEm zI&i_#AkRpGWQ3M*35*z+gitMXC^_Nt`hvBkJy}J1lZ@{oN5dWj({C-jbJp2uPl*!&IDd z+O=CoNUz(uw_2B{WVF9j9UQk?KRmy-UJtb^T!;0xSfRJ{=8?vddM2ZLse%&-2&peq zZX@IgRR|3m176Y$SS(Nb^75YDu1pIVRU)}YuwB*Ra{lcKuMcic*&7fP1pj=%*_0FFB}@z*C}5_x0Z#10B&e3BCXmA zo8474E^jN9Qp7vPTUJg7X6QEjv>gJ#{QDEg0TmEF_Y0Ad!9&PH!!aj31eS|+mDH;Asen;rXXt!6@lC^I)8U!XZvq5ax-TBZass| z0Cb=%2X;2vJR47?+%*({bZMHj&zmV-PHqn)y}3;=;GpL;c#Z?ijBqz{p9I++4cod7 zcv`a11&TcdwvECS#!;@s#%$zV4K$~I5HJz25X^H6hD?@)z#&5rGlD=Cq@`aJ`6{vh z{Rk=u=RzLkn#gk-wDZaah*b2k(f}LG8E^Q`zngnaFo&=q{zCt>znWkj9h`~F%lFk;q2K6PO-Wzcvb1+?&`{MO{)oO}&WV6b8$LoRK-0Sh27} zsF#`yIS$k0UGZ1dw2>`lBkX?znL~!Zl6sMZ13?0uqIVtu-Qq@~G2JM_v0(iDc*oI6 z&P~FjV}bUy*!3|a!JI6@+7jni*JVZa=r`_Vtv%JOtqd0CFJdGK>Wu&Fl7`Hbo?JWL z=c8A+-rZQ4yzzHxoaOo?FP$G3qi^sf6|(OUWRZ^QJRra~dYBy_)yhE9 z`lk`Xa*B^n2djygRYL}&HGL_5&F$)~RmjYMRvh;w3~mUyBRBaRT22@uuO9?AwkA&p zigjpBXf?WLgtEE#?mn2{e7)myo@HWQm2p}{_FYoqptv(1#psJjHonQXSD0+foMKBW ze((Uf34HHiY;GiE0QAuq1T_l-n2!D##DawWH&Q@f;4C}POJA@+9d_j8?MzNot9ezCg*;zrgjIYQ(rF!Rj9=*D(uBFBjpgc=ZNXpoBDIYD%v_`4|-WY`Eh z3?T@ycE2Y$#UX$M)W17?DwqWAcm+dH&v;0{gv3DU>1vmlAsudrvueT6NCyN*0Q#Fv zvzQ5#aq~%zdMq8I8hs4nn59Z8onmq(?hNAPf`?$L-!g;T`eK@2gbETs@1F{jQ%h4L zW+WM*^s|%n=a9(mAqSkvk`gi{Mk``)5bBQ^`t=;kbwMYEw{ZxM>l=rw&(U~~3Re*_ z7E4)*5;UWThKB!C>|ivhfg&Rtg`-43{;BqOYkjK`#5&A!v7|@UUEi?#^JA0pGg?ac zyiYUxzxBOreIOK9`NXO2slJls({w8)dSG_d;2Zhe15XD# zq?H#q2E1<4ac7a@9F|?P)2iVeS&I-z1_ED;fE&vlg1SHyNtU>sM{0^_@3v~*%soCz zXt={If5X_!&rUtsYg~w}h7FRz9%#e$_Y@e&^>)6PK2u?gHY9-vh>I;iRv5C-osh6w zP^1RMEtKO@CqvYX7FB_gA%Y~>*S(375yB|2B4#LH|Gq`gZ!i<=iw(*;DcmS5 z{1^2|a?bR%KSe918^b}gNjiAyEb3zYy+K}LCFVl9mWMMmG;!mo^Y>aHyCJD-!lFkA854dHp zo7h-5akWd>(5@dchTslh>P+k4uFX?*SJ8tJNDG()w!L>5@h=gHj2>~_Q%_JZkv*PF zmo~17iJ1TE?hz1h7@klvUirM!;}LW0>o+@C{O$KEfEd=o0fQ+b9M~nh9HUNwP8?9r3Fh>GGhyWknNHl&bS;AXoW$1r;*o?3E_hB(o zJ;i;R9atHPNJyAslw&S}T*#U;so1PYpqQRfn3$Ma+WNYcPK90Htz zm=`f!!I+ToFTM^Gkyx+*RD`TB#49x-gHw2N-zdT!kfOLVp|)Es4k)2(l{o~S-^X0?9;o4s44j)@bO@Ctjp&tePmKzs?9vW190)it3%^Zm_T8%_M zX7TNR=4}zd=GTJ>x)2+ls*$Is?DLvj>vxDQQTft841swd#5Dg#>Kp9;Me1e8hQ*Y9 z2=LvGYmSOZBgTwL5jj(R7uA6zqkDAV~IvCY}y{y>8IhA;-1<{Epd;%Jj=PtZspeV+}Z*F%ly(Iieae&3eDDiefk zDIU$p`1@Qw8V~1Ll*JD@RV(dqhjio8w;wetOYP>@%TG>iP)Tn?g0L&PMGt(oR0`V< z-+(GKEvSKxf>%E627NMdinkS6(>8PI&z?5NUFPG8=5W*WRwVa@A{{!eBp0Dskp8{; z0#Z7&3-d?M8Wr|bk9J+OAvCpa%YX z@Tx!%CuxCcQBN%S6c=HCNE(Q9ZJ`l=@HX4$x~+c!{4p7`@uEEJU|60M+FfB}8S3K~s4+}q$B zT}0O@qjgwNC(Gd}WD@+Z`jIVNOJ~Jv4o1)xG86ha2FclB85vB%VE#0&NX~Ov4`6u3 zQ*%oXSx`*}JoQ!ujwH+nr4}z#F&+XOxlyDdk_@Z~HbGYO=S5wYdVC8=+sFA|P^oRTXMklwWlC2(osV zyP&yj2|7Fs;-9-g*}F?FoYm1$>vmatTT%I2?I38-bhcb<(A#$neR=V0a%}d14|(mJ z-ejEGDj%lBwebq|NTF$)$dWD7Xswk|>$NNP@%i$+wt`9k#Oda2)Fzh<8>=rT~fo2)%a5l8= zawIdcc5IV)e{0B2;p!70_VgX=6F?X)CXGIc-OdUMa0)p@wP)VA5PA`U=FSlPaR&+c zWr=D+`ld=0tAshi9V5NM|UEy&^H9X$CGqNgpi zlWF_~!I_u2JjV6ci1OL*reEP>>zS@VNU4E}^|;toazzLZ5|$-bheRs~(a`e<&k%lM zD8tHoawkL(Aekr=3qVQ{i1q_~`iY`{Q?cOZJ!GrbUm#_gd{$C%+Bo^cFI~!Y8n$=x zb{mR`VZ!|?Mwgy>9*)nmC}R>+oyWipA__6$mkb@nkV2xSP%fS_Ce1l!7gLi*?O+fv z2v45;>o-d=iU#DQ?cVN<(?E+h12#E488%cJ>cYUH1x=4ci5_Lw?E((>)W&SnrRjVq z);IqyYzy$GavSPvY1T}pt9P0lOu;7pLL*yGF`5az7nu+AM-Z&w;QS{D^wj*WtPgL5 z(!t2(MR$q;mxbW)kdG0lK#)$3$1u2l=qS}9i3}@+Gpmm-TlSn-b{X1Wr#OlucE#DfGB_Al{rh)*fipLPp0xqLcARo&3q?U$+;Z-(UY;%fRk` zvWm3{2JEi;HO{|zf8Mh~rOvcMEzWyNlc45m(WC_lN@!3fS@FV&z(<7!|Zagu+`pcNr2M6j^Fw z_c8YRfp+^qYx9P^I*7wGXqj{w6%G6T_{o5{f`6@CrDsvRhtD(6{=V3x{yi2}tAi(c)< zOaB>KX{Q~LeMu)Na3cmq%UfE|RgA(CaM4l5zIH9f-pg*-{d!hFbfk+j3YcA*Sp^y0 z)>8v-Zl&&3aF(y9yNQlx>K;?J)%EtLz-HjB_A-TjEY8E@X<5)wOd)5f(iQ+3Q7*&K zaG(F91A5!pNSKTMGQon1*v}+YO6^jvg)!x3oVVaKbQ+GGnRwzDO`}} zuQVB~3;sL&bTw-~dbqti`ly-mF|l3|Jq4VGB*@_2aYGUcqM4zrZ|?~4`&hH^EX_o|;yAQzEvoPr3 zB@hiohX`V*Zds?b3X$6CSQ%V7#uec|<1Y5jPDF9te2&$^o~qrA@_U`V^(}GLty^Y0 zbwztUQOltUD@CZpJ1IysACLRgH~Kt#QB#&%ZN2Nq@wlIT7ML5wrWp>Ze9HZSTc zSM+1d8}Hd{tCgrPrQyN40D-4tD6|lWl)VJ#FE1zHFXl46yk&l{fD)Lj^4}w4Q6gx- zJ85ms>D&3p+(F%*b5$_~L;Tw7`EI~w13OK2r~~7U=rbbR=iym`08j>~i4`OXC5&J! zNJTxdV-TgsVNcv$6VAZM{>}15;9vwIG|ZEXqU}5G58;#HpQa# zGq!e5hvwD<`_J3l)o(2UAI3w?ccCL7bid-s!B|S8LQwYSIeyLqEaJ>FDjbfGeFtQw zR&T)~gvJpdN1>9cC1I#o3X`vzz)jviBb(y^0D-*${E3r$!sN3f8up)6M)N#Ewq+>l z<*El#pc11aJ1zNSVAwS)-uvU5&IL=an( zWPN!-{Izd7p}^q)kTt$59Rv0tUJ?Fk$~q(oFZg0VM(%AvoJxNvpk5txf9#1QPS^R^ zOEP0osqR=SzU=HDTukaJ`e8f6@DMr$-~?RZ$-p@!6j8Ji80(MkW1KP;dDiV?07G;U z`V1y-Eddl~XduW%L}^qw!_mmmE~~7P?y)dY-@cHiDl96>>r%iU#CFhP=jMfBRq1t# z?CXKV6FIr9ZYQ2B?OAbe4wX zmd*bR(nKlxk7!@Q9g?7Y%GdI6Kq}^F>?-K{A5Vb9p2yt*;UJ9ce{h@eB2YLHEyu!!WC*syiAo=jdPqurUBnk zge6!z^%SF!&K%-?a)HAeT}MOgQF5cb-LgI(A+>d{t?OM=G?Rm?|17iACZpT4X3W|i@BWB> zCAcb@tR8Px$Bl45LLs1om7M2ufDZFI;*tE@AUm;Pb>wPK)wO8`3)BzanL0%`aJD`> zJ5EXt?bazMH`3O>-$6BHvp?8K6pJ7Stu)=^0Tn~=aoVR0;`ynw>82-Kb5&~RR35Jc zA903by4B@L$LQ+uz(o0(tp7d+63nv4Lbu2{8Y=#?W%lMJ-xXV4+yQ_`Cit-XzyQnm zYmpG2r zYTzuyfBmLAZPvr}#

H(^UoKSXiLBrz1iWpfo=U()s_&d z0|J_W0jKpEH$qV`L>w6`0pR2HOs|Sk92?KgCli}X9$SxN<={DXL)=pRmY2@(j<%Pvutk=u^{M25gyUV$Xf~`xp+3 zSVWuL=(xbK*mMUn|Ng0#NneuhoglzTie}R3cZV@WZhdoA!QU&tPZEjTePb~2q~P{D z=>6L*y3E&Vam~?`zz&sotCNK^s9GfJOC~#9e()ucyPkmP*JyY<_#1kKkPk;P(9m(= zaQQ3S>KJf|VZ}|iCFypANJzoqxA7*YL}ge`NV!=&YwL=0hOR))cyoT>_mlYU2wmp1 z*u9vC>ixSF;fSz;5Qy0w_g!E7< zWBo(WC7m65XdIm9V>Vt1rVSUfAYSL!z%+|&OU-8`CGOsg_`SnYlMUJGVocC;2|6Vd zP1PFi6IwQC`-j&J`2768H?leOPD$i~P47K=?|N+#Mj4pPK#opAHo554P$6CyPuJuk-%C(%UNv4a-Bvhj_MeoZEGaW{^A7qW%7S|1i^h z*Xx1{bNe+Coh+L4MKT$@h;G6;*;L~nX`Dbq;)oCjdq}I4R385;^Nc@Md?pt=KT8-R ziR_OD-*4OjmXMRWr!D2RKQ_}^{mZa37jhhu|!=jRO?SN!0_U~-n_=AAUSvl&{)7tLN%8|i6E8%i% z|2FjFq1D=EK8(*md@~MSV&Jbx?c_7R-7Gih*Imb&Ej=H7TE5AA8n?zfbn<3od5EQ}0mO(o8&}Ffq-W8H zl521=#8oys)EGxWB8fHA8%IH?XS9aMUHLa&UvJN^@8e^54|cN8#Fb|kjLyvc5^jr3fa>}`NmMN$wL`Knniw4!zW?;he&>=U^dgiz8Ka{3t}f5WtN?zA`L2=EK|brQ(O<5L23CP#B?;4%iXcomi5Q{t9}J7-TDKln zSS=xV?gd6}wp*dTt!kL4=2y#&Z%QpS0=$b_m5#tUDylGuAPMnG*O{4|-=8DwTWy%i_{#2q z#>Ae4lR+D!A?XPN?Oznmx5e56PJ=ZouuavztW9%~$v!tACu2OPvRL1P$xOp>@dh+Yae@?)vLRkURt(GnJ#bq>7c+7!6g z3|`Uz*)F2gdwd&U9yU6x2^Ie3sr7`QT`4#wpkr5c!F7@XAvI%#s19BWoIdPE3X1_A znV8=uP@TX2ZaC=WU)V*k*z^ntC!P?_q7NV#G8{wCs$_xwxza<*(-7&o1rtNYTJ};z zzosxioPxFI-kP;a1pnwd8zer$Os&R0RHJ%}#yaY)j5*rx#gI|9_uG|5LLv)1;r&6n zGo1p1Xys5^yMpD|j`~8{Z(Q4hYO|LGww+1!1uky0Y?+e*GR8_Tle9kpw$E@9Th>wZ zvPSVh!eSZ%M(h1l1?Hv{=`6Nm)F{}~-m1>az~fx@(PG}P`Pl8#(%WyV4_ZqbzdcaZ z>VVTMNf&mFPr@Qf74SR~ozKwKf1RGpuWG2C@&KnBB<= zTD8@VrK~IP`l-lP@3AD+cdcDwg=$bm65ZK$R>UMM@-uW6>;^L&_M{N@x=;BibZhkf z(GTY8`~=jSVJLZt!^b_zG zYelsgkyW>FR`HI@{l+h%=<+xUaU^Ss4Sx0k`Ea!1o&m2BmGE#_?ykYoH%KP8)F?ww z?8o-QR45)b{XN_mUzzMoJ5M}#UHt@<*S@p|BdTt+l|Zg zM-V~8QfvBsoFF2yI+ilsQra(u`M!5RF&ood5f69S^dBhu8c1~i{CqBl^f*&<1jxnq z^bArGNyp6hg~k1dCP!Lg5F+;*)ly8rQUm2Lv)5>l;!IjE4?D~N?y2eQneR?Wy{Nh2 zt!G`yjF_8$@eodBKw`puvj|s0nh9wxo;AJ%&}jCjSjhfx8z(w?uypQ>EwhM(z%r&9 zI*%c9;6l5voK%(16ocOWEoL_pMF#~JJL#le$Gc|XgqSJv#so=t;RH6F%NX2uR}r;L z(R~a(^p7^!KL^VI`-2Op?`T#8GcA0K*31QwMef=T;}C^r&u!-NH-2EQd#pY zTngYbY|wbNZ*3ibCvs%!$5ZnJ!KR1b?i;85DGQppXj<_A z$VM?KR8V}t2@zOh$>Fn?iPxvrdc;jHTNB@J+hk!WYW7T0c`6IDb&ZV%%dGHj(uuC4 zYelygXGy&46R)Z?`NRvUk+L~?$(5Zek-pZ7faAWtu1+FtZ`Nq+x;{Hno9i6{XpchU z{i1N=NVn>kRcjHkY&((_tS(wEOk@0$A+UI zA3Et!S|uf%;WI_=j)YQmBkgUpzYKhdrvnTrb4XfXmzB2o_pgo_C{t5?!2=Y8>JYFU{7_5+cG`C33XbI8)GL7CFqyv7S=hhEELA{tx_LD zXn2UEBXKW*G#?j26S8RAo4Ix4L#(*(&+|g{6R%ohcZ)ZN(rgqL&P>yLzBo>*b)urS zkFs+A)}e~Zxd7|Et-zGZ1`NHD=9o2M(R`Mai_U$q!zh(x3E`SR5-D*xUuQqWLZ)YC z=~U>U7G+mI;-8Y;iv>LAG5s@a1+A-E)NqKmIS|s*4BM8%db?QV9j$aG@1pnGN>spB zuTtMi)_=LulFO!kvpgg~Y7O`SQy7D$eFIp=NZtH$a!nT-!U6<*PKvGQAN3;DkK_^sy}yrKoRg4;T%N}Un5-jQ}XvCFC*(=ZCxg=)Gw$k^wC~;%UnxA7JrX74Y(|9Q;BA z#(7w`CU>AASP7hZbkC&^Pa6b-^Mx@1=d}bJ%Pw0EtH?_UyqS4G52&#mq}$+Ti{lde zQz_ypvqk_sy(E8DW}PrjB9mBW$Nnx>C?Gi(v|&N^-E{`bo~mUS_7ULo5r_+OoG-MS z*`3~Hp|InNk}7LpGOaEnDNCJq&<@#G!QEvCE9r#8rd`*aC8@Q6N5+{*>#tXD$Ybf} zbFdmE$-U`KScy5L#MQhQ*HHiByYpE6Un;FWC50F=ytbQHnPs@0Tl38oi{kxm zW!AFsr}G=dz=rfEl=zN|@xe~zyR!npWMulBJYc>^WU0Wh+p7Hg++%&*nC1y;$@n!K zmPaw)<4S2q0$^VtJ2z8Of(R$kus;^2*;#HeF^2AEOC)z1Ua&aAT;DSXnZfEtX}9kb zK1kCKb>qJwo>kR>bbek3I$a;YpgPk{P zOM*_K0A{;qR6QS@9WoyPWyK03A3&5BM}ju~?Eu|nHC!UD0Beg-(^HklMm`!J)|$&K z!(a9YIy9s9E2}tKQV!EczZNJDrU%?_DeQ)OY#1b*rCU+oahv8H28VLEjGQ}IeAyo;73D2( z2fR-MxjX!}q_+cGkeBd1`QcMne;Gf~Xzr7~7`uo{6~7sZhI|Q(*?9^*DW&7^`vroCQVN1qJ+EJ=&22#s-!MtY6&3wbK zRdaY2I3&t%D98^P@@p>+UF@vkR zkx3ZanID-Hv65&6$fokEPNk7;Q%Lh;VMw77SZ%3olF52iBR&(gNWXaGH`)CJ0t{Ly1s$gx0i3M$@fOwZ)qrGqfPLU$B8Inf+%0<2VP0U>6YPZ?XNpH8+G}%2=T{Zw2Eq6CQY83X|1nX0js)Bv- z5W71+Y)P^XHyZ?7`KGQ8){Lgz4hq}|TlGrVV}$ivh^i)zn5AeeGu#xYVkB{7=c*M3 z5!P-e=Ju6bg8_5Be5{ou{0)B>b>gYA*o_c>m_!{TxjomIJygNrXu$ZL>(9oG5)kKw zYkbEI1$(UI=5R@lGZ`95vivadb{sPGm;ahI=)tn>>aD8hm7Kgcos^H9j8F_^%wkZ< z>J|ct>B%LzxZ}P9JRlD#y3QpoVi_u&lQn!@o$>O=1?Kj3#tqJDyps<MkML+-9*mAiC&t}Q zjL+5^tG5WI5OV2PdjIYnKcZF@Tz%#T%VaQq_s8`a2O`~LRCdz)s^j~`5`}@|?E3Zi zrX2XGkQT{ADGJF59~lcB6G{$uUL=r@z$d_Wd1{YKnC;8=Qk62(X$aKn28wE2h5*_&Pc|4t2A znE$s)HuhifEgv70vx}3dp)Hih=B&21{gxP--&;Mxqf)Cxne$QYfYCvT+!b3R@|K{2 z%a?#MaW;f#RnG|H0)D@_9SLbH#c~XKTdBS*XKhm`YaL3 z$4b5~tvN_|^T1Q${KT~tyN8W%Na1LJ zq9XV?(E!Es%o_^iX!x+Pk~!9bMGY(Es|l0D>DC3C<`E-%DkWcik3;*&GC3vv>@)gM zG}X2*9v-ApC=C?;Dx-UPIcr}n)4v?d1-M}QvleG#loX>t|D|SK$AqjpEa_FLJrem| zj#5hc^B`pMDc;C3J|?j(R1;Mall(=vFFte%kQW}|1#U(FK#L#VgHDu>aomagp*%1- zE;E^i*BiNJscRC@EMLrNQAm;6bW#1#^!p}m=_|d4p2W*I_t6~{3t zBs0f6wQ6JZPF|}hri*Yy+;C{%3@}AwWZ-CyCuemIC?-8KM%Q8YJ?_sA4l24tkOw~rDk6z2bsYP1B zrZV-)3%!DO%uXM001goxd3XRZK@$K7u)qq@R_WhF&8Ic1WgrNWN{Qr$C#N_2y_ZES ziXvL-MA~qIY)J;_CK!%vm?k6w8I!JpMMAJ9Al@atkMleRDq zGPbr+zEImEQZxzw7MaX;Qm%@Sw%XY~=5nXY?51+Dr?END0Eov1JEkG%iz2>!jKn+; z#R+v5V+g!^?9773mgl(apAHT3nB4xtfe6KUuy6tu!hckW4&+wo1j&#dlFL#a)&JQ5 z$R$?4C}0j?36hgDM<|NW;M&=k?7!Xv@FYe*BbbLLWX#Fq>Ex5n;|BHc_Hz54JJvBz z&zGlz5wEWS*9&HpL$8(9EE&T(3*{ufk;R_wkslmd_qlB6_)vnS9+2qLodAS7IG*@G znbSPp4tzWLD34|*Pv?hKWVf@0yX1-ZnLJ+c#2Z>yYRCNB1&P@S15TJjoG&j2kuEMB z!QJ-#esTPG`TQ;Oa53Hvcr^S8e$tTKEI$J%#KrIduP7?H=C7^!(#%NQzV6t9h=R589S9$0e+r0f*oV< z$=%TpxG-48;@I+m_u+K!aFvVst#;a|weCd&we@)asF82C^raT>*Ga+J+9&MIDVJ|< zxNlJTaPK#DV2dkZuP_lre*g5q++t18Fh!Bk)iZ-BwgMgg>iUlYYXN8MtZ@qVL;!F7 z`|n>E`2i6s+%V=xJY9WWPmlT6 zhb0qPsVBV|v8FbO$JbJs>SlzXl145>H8M&le)B zdtjOpV6SY6>_<~ih$wbo8rvCO5Z6bWC)ZS{bgBaVrdE}veA)Wb)3oxIx+S1yh48bd z@;X`#UQ?#F6smTO3v?utDYJY2O1LN2;H4kOho6F=92j`bLQ)>mU~K;VdvN`I`8+!b z&o7Bd;4MP{%_c)oIskbgcS0eDL}<*!n)Y2j(j=4Dbgh_9?JHx=%+ROJ@3eeg)S72u z2=?hfmc=v|H;dJPhS`nP#O+9$1XQ+etT$UN^H&R;LdmOX3PPCfdam3OFJY{E2Se7` zyEWNsr?yZx5(l2fqkpB9XtPSh{3QI?zUH3ec>W3gZHh2Xm~iC3RQ76X?%bGxl-Wg1(N@dIs4FZdcw<=af5N3$ zR({6Jt7*4&sAd6XA;IOH@N>NTWXkIwi9aZBgPrk0{Ocyed~b+&PWUss3u7%=s!OF* z%d-@M1wOv2)})_17hlWN#blb>>wihQ2YcHtO@VrBS8ME|$8Mvnmzzyb2B8M&Pj+${ zAIlcoy|_zMj9G^YRdDd_pu9qqJUg!No!x13GQ$AzS}DO`T(Kcwj9P6$fo*fBoiUTF zoptdq-?bcv&H@iWa&%a!wl;G%8btVt0rv7XuLY+nrJZ%NG)!quPKI<^H0At@=t>o# zKXk^faJ%iKporw`+=GL7^de_tmqZtJ%x$66R)!=BTUO}ZVxNVMMRzjKSIf@nMAGV( z+Em4Ek|d&4;lf~yMTZG>Q~1S)F@h7CZ84nDm-#CkWL}L{UAu3DVX+S?TC>}X1ZmkV zR~W&@k{DC*suq-W<7Y>uGRT*Ftf5HLPCKm=QQem`LI%HEeL*MInG?G4moYPrJB2U|#LYE(?> z2MOglc(ys8Av}o;*Lu+4CX#D5p7)J@(ViBbMtC_I=MGcn6gpR;2EF8VA(VF(g^8tw zWi&6Zq~96>VntP4HhFaIcGo~qx^ACT-U?KIuR^s3pi8#k=IQ}-;cb6vC=g-Kxc-Z?`^wy;UHA)Gjx03!uQ=n(F zR!qm=&6ME7Nm#FuY1eeMCAD3U*vVR^1~|rqm$=kKbe5p-3%g?tw_M3Z!`X+%>5kIj zvJ@%1~l9k#Fy>2;BH-9XW;&Iu{6ncpdYSsuSjg z4<*A>h1yYzbS_K2*27KAL1N)K4W4nQ&_04Wt`%=NgJmh5QIFLTL>0! z-?bKWD*w?JZLc&uiX0-dYtvyK8A+seUzqzgw;(E}xn@lSoj*?zquT=nfXy$QkD02& z=Gj(xK0Q2sp91MoAFtobJcwpbi1v+GEAm++JqLz}(dmZ<{Dy(GN@>yy-n7H-LHfL^ zO5@!i4th%C~>&+zu|qt46C`Rn9m_PmbK2HiNf zB;uz^ChiEd?N2|2?WhX1X!hc5h$)={oW|f2_Ky)MQSS%CSt^(obwD8%ss-(u-1S&U zx8zU})&QP_)g&S$P34y4Wq9=B>j+x-_>LUD2#vo{GZy5rICHWRmgjdm_Jd zt2Vawr1z%X+NXp51-#BHyrGYJiv9-=z>Gu_&Vc6w5;%C_tRCS+?CAQG32AfolE!Jxx<~#?sBWBW(8&O`)2gx>tkej0bGRx2wPyBfQUV?zbX#8fe z@WOyR%nMEAYYSj^bEJDbE*E@;r2UM~FVqGWJ7|E5dp{m3rwAw@E?K&{^>H}hKM983 zkHD+wb!oLH@xPEGV72M?2;&<7pvDRECSzvUoQThHWNY~9UyV?Y{~vl;j{j{h%fj)W z{DfLJGUk-hss(21SDE{hG-*+l3Nv+T z&HShMR=w3vO}-CntMce~b##3?5bu=vklZW=k|k!-H>3Fe4;xTiep!xaWs}ziVYcRm zRRuI=`E-80`))hVCyU2I40Nk9q=3m(SULofnn@%(M0t{j5PpQ=WCzU|r4N%ZNzQ3v zi|jKCiv-AMlNKIegodo+)MSa&_iK}qa|PUSl5&9NJP>A5q%??8CZsV5ILzk$G(dA7 z-{a`f5~5E;_F7e{)-)$Rs@4RA?b*99vwq#Pm)(4gH)6`c+B-?6i@qCIWwX^t zoD9f`h?_$YBMxAVfi#92Wks96G4!WYy+-BH+YmlA6TC|Cf<7a!%A>YuQbXt-rQbbg zUDdh-Zo%=xg+|5(Bddzk44mz^R~6!%JymZj@SSyZw|w-JB8&vrSbm2`S@I6T~MB zModw8K3qJC6NN^W7NIy^p+2pR1il%So0fd2cJ4;Ly6W0hk)iu4-E&yIMN~Fq%0rF| z+9jp*#mE|#?4BPRsy9_{;vuE2+R*Ja zIQUbhjry9`nx1QCsm4oW&BM&(!*)S^AbF#6i8J>N$|S$-&IaUjX)E!>G}w7Y)uOz#`~Gxl%36X1sD06EohLTER z$~OabgSM#2M(RnR+XirXh1En8kAMk??cK@1i44#};KTNJ4D62W@uU+#G)e~=GH|p~ z8U;9wXAsC(l3`#Hkf#`Vc##GISAkISu(I7QV4U_Kt}9)zpZ$~)<_!gbHEu%+7m~- z_8-?XFp!5&D5FDyvp@5PX0ApS;(i@(lpPeZg*kXR28t$wpyst0)ML-2srWn90k-k; z!1&T_@Ej?VD+9$WzrnPCXz_q>^8j(-f$_u}z`TGPQZn#739?RLJRTq(K42ak0KOy} z_7tEw|$Avm%P^}#rXrrh5)=~*I40wUiocL-h7

#pTooJXJd!sUHc!Uo zRW$9IUN=Qu;IgY0>4a#D^)<}7g4&3#WU}7GD_m!HbVtg}< z0FtU?>viidyJBc9_;i@UNdcSHxRAajff^vEvHvB;*vfbSbqRr+SY_!IuS)ZWAXmRjI4B-WIUd$Xl!!vSFI z=!M~8fkp;TlP~}s8^$eam1LEr|f;z@R}6iD^H z7L0aAK1^0sqOxEB>S--tXsk~H_?b9?&Pb3BdL=8$=UTcLH3`OkCi%hjo>fiDYM0OT zkKB;^rv&398oBT;6BwuFZKbpJw4#_OvVKZuhN8KI-^&xL5?lAb`oZiYko5vwK zpOaHx;|%E+{?23jx?O?5t3liPIs2SOE&nb@ksO{^eDq?rINwH; zN~1{Wd!FR*mt7n&Ndq`yp4$#t$@IvKV2#RUo(KWY3M&QPr#5O2`l~}T_Xlxp=J5uUTtTTY zw85nf)Iy*ypQNg;iF?Oup*I|Se-DTw$0PD^>tuv$kAX;&#} zn4fB{{obHP*QDR_n?Gzncg8XFx|s$je=a16BhMv)v2No%`aNF?!3qx*-Aph< zUkO^uw!Ut~$K7yObHGS@XV;2HcIDt90nF>G1RX+e0Q-SbM75Dqnn2=ELL36r|Lo>M zn_T0hx{yYDELI!pCC=<75O7*U4h5Dlm?XBA1cr5(lPW1C$g|8Id$n;VH z`Wt#^PV(g@%HthogpL1RTN!?*`?bUzI!+ilL=HBnD?J~b1|B~hzeI~tyi0E5e0e!K z+c-En*I-u~j)urIE``cG9GT_UvuuLLi*yNEFhU1lLx+}?-=B5{wEV#HJ=D7Xzdau7 z|C)i$#Q2}`z#Cm{XYF=GzqNXfWxkZx6pGvQnF?mx9F7-~R%&Ybx^dGA>~WOIgq4ne zZ@M%A!U<;*TuAV=BeTM43~+WJeY!Mg-4L~O)LOkfpI;Y?`hDtkP9v6e)yvUQirSc^ zk!Uqsyw$s~yEOCY(~~xTum4_D7vIUn`|%nnq^i-N{TCxIwmyEhw|o`T$3s<-D5sXs zu<=TY1J=KMb*tDPcX6G5=OUl+Fy+t@AcR{TQwoxq3+52?HP1@*E-Z$M>wWTU?17p# zv!0G4(Jw&b=B2stRKc@iW8YlnC*qlV#Rt@3o$r`qa@h|fS%zme90aWFKq1)MMWb+{G3DSx4yJ#Yfp{chts2zxoHLoL$($* z5e8k{Jo+C*OKQOo*44+Jt_k#GMBTD3+P_*w1v9z@5XO~`jpA?dm0LJXOd=CIm5ZxIV1&-(9fL{?I3lGhOsKJy9oEU~)s26aDv4#Neg{{G)3d|hi&zNQ88F98;vxr!3s;>G45(|HCF(dH=nH3aCf z5LTS-0o9g8xdgeYm;VIU|N9)Q?|>G-88YBWU;z@uhKDLZzXxpiXMQf{0D?6h_P&oq zFEDHKHv?y!esUdYz+4@Vnh3PoIPAwo1X)?yEJ0dh&2pGlPh4KuMtgGNd) zCRzif8`~?6>Hxx&sgNon!MQ}a#P6i|{8zq$^AZjGjC28W0dawK0lP%16fa-l))ODV z43F%vu(K5&zoZ@wbKjAViEVHKpaMY@N`+b=gg9M{MePklVWaIniF~rQBv5#Cf7aJD zT0^2<;Edj#w2e@GC{>kfTOTlK944WDeat9c6PsM#P7ZJNgmg4(P1T?C9vZmDo4x~m)&r#P2z=cE;F<21!oW=pEM zm?$Mk__aI7#fwBVmlFkfL{sUY4re~P;Z0OBB7>ty^8=YYGAZGhmXKu*j~Kkcwo(Z6 z>Ov>!fg01?bWJ`3@8AOx3FgIKkkDnNSP^N$HxIu4-m06wFg7u?i1l$_TT;Y>SBTv}9g!W2mbm zBJc%UqIFi2y$1cHq(th{Lb*}ns4s-*yugcm5LLIHNs@}BXc1_cmGeZCZHj5g zM_Xk+Dut$T);yV<$%acN?QEEJvSHT9mv#=eM0ImbT5B13#M>0TWXe9WVeZM4y@fnC zYw&_S+gvt!=2(L;OV#fu)Rqv%E{M?QlG>hd=GS2ITjlU5L&#DH#8JnPrGk_)JcLtA zW2Ho5=-l3mNM(yxl^-NgH7|prWf362x2%f^eQ;S*j_UyG-rL2Hk)II(nS_=CaI4V(D@cRH!t|f(W>+OzMni&Smz=YYEyF8-H!HmESWN`-qDNR!S^fk<>eYNs(UbepUF zDtUsgo6GxRzln0SogD;WAS=G-fP+V_{{tIgyrBr|iVEnjmv5@luDrB?=ueDr2edmG zD_|gm62=XHMI&6;o*ah^n8dahhhDxY2|Y%wb#fo4JsN`BT$uYl!Jt ziiljoJ!RiU-1h>p21yX3=tB;+7-5J(<9U`*WCB(c>0RjOqFJqS zgao?Z*{1sy&=~GtU5uEy#tnLPRDX}ZYi^ikNi&E3q_fs1_8hvT zy!A!2bD_Jkhe>2w-+Z-p_mG&ok;6UY+{y7W*P!qHb)0u5e?%|}QO)Kx=4fZXg>||y zV#2sU9tHb~FjGjkffS^^X1!R_E&915f1UUX0`I!ixSO_R-NJUD`=->=;*7uIhVK>w zpLB70wyF7hw7rxYRLeC2P%)VBd%Ev=A;)dopFo^CTQECIFwt&)vMtrPYc$!h${^?? z12OEG_c}M$E7PE7ZjfAg4iHW6rQ|gUn-)yoHbB2{i7c{HV}b`QyYxS^QyMQZY$cCO?#n4GtoD8UiiG_{|yb zNA?ftKDbh0$l58cxn5uTbf0x7f5Sg%6P&$9{wsq|N7%qo-y~hE&{+ReU6dKSE}HGW zL;Ghti#fqpf<^ph;>FH<{ZTOgi(*0-anB1(B%=38ox&00AePrjAD@Q-OGHec+>T<} zJRglk7{7s``%|ParwGidP;-1(Ruj!>nu}E#4q=Z*#oWQSgwj=X7OkN4+AVIfv!7E^ z;t->lV7+Cz7^+cL`X-E1%*AFJ4y(9x?2^kGpFHHCham4G5^LKmttp|q5}OL08G`#K z*DTQ)8xFrOi-aTw~&-&>IKU%Da^~d8s-ROw6_Vd1Zb82J zS<7|AiXPWtxWfH9dIyGD_nNiB1RMjZ5br&su)CURD3xOBzxon%>=n8acqJSEv{Hb4 z$J%$LtW3FdmwwDSWYhk$aR2jiRJ1`Pu^IFi(dRFyD)@siI*Q=KxW^y;?58To-M07v znNN7HYbS#LmfEng{pZw1#nZu*fL`9nO4-Ghfc}5ffDp5Ea&{r$__cbXmoc?7cd;N~ zWanTfpqC@~{~quj(cHDi7De!T(Qk~uYx={?&D<4DR00G=5UA2B3RJ}xvYZk@RfmI~ z`jcl*mp&Yc?S085SmkZLyS2r>``KHeC?VJi+JwVmxfgOv+906-twSwGdz9mFln@Pa zfD&TWUuy(|I2g-jTqjrxdVR16J3g-$@6BM^)Rc63CJNe;yeH-lwIKd(GI6Oeb1B@L;9t6#}M2{WUx z4naYMw_wl!L}WAOk4Z-ESWrNy3VqUoU_j;KXxC7GfR2h0I%Tu+55>G_dKuF?%6tWl zi8GslzuFQ{uYznU9lK~+hR&jrj04U=Q0>e^7ZW#hSt2w`4 zYU^J_OKIs z1A!pyjBdhS9V&F)tPC^o!luGwcrc%#vf&bNG6^>Y+_{cUSqQNl)Jb&OREy%`LeNW* z9WzsjX5s40g01`YWt#^^es&JMdgE@!=*_FwCo>`VeC2NF?(mMbmta_bCd~YCN!3Hr zq>&I`gBA(Kq!Q6jbY4?LNZ{t}gI{f)^V7xc`{nNF%))2z?#RWpjRQXqzK`c)fnWc# zRja4#)e1u{4S)7KGWs}sfBTXje6~_disa#VO@Hogj?fQXkLwC0FLZxx*H7GSP|SeC z|L6I-`Xj_D4)R_*f@olbx2XaualeR8Q?gN>x`~)dXPq?L>FMk$F_nnX5g)9?QpCu0 z;$)QZWk7T$LW$cnvj{yIPty*CSHIJQbtEdFm(;tPLqD{*m;t9cy=*EwPH~P^_EZAr zs389^ZU1*sd-sXEgBx19jT^c%`Zh$ZYt=0qUrJ_w+_DB4*9Yqj9vq8)ck~)pQRm_quhY+3}`}=fbt+*$HGa zy-cw*lxh7)X4fUBr&6z>KoY^iWYupML3*NY8{Z&!F)_LJ#OT%U)&Oz%9y#Bz!rRTE z!C-arY|`<=v_pD)oR=ue=xNm}TwwD0YMh^cJ??B!je~jDj@v=1Y&*+MPZDuyjt_c! zyqftq=6To`Fp+Nb;ExJwORfckw!&?a@xTVr4Bv|kiZpTW=%x*E6NGGSxTF>3UP*Xjxq>Rf#oHx=2=C?*B_k z&Bb<=YrqSu+?W^Sr7W@*eVGTklC(`*!N+@}sc@+x@q1Ovjxph0x^xfUGT$F3mJ!R3 zRWkgM{Ec?4|MzAcPP3jvV+8aM6$tW_8^*`D7e^yS2d~2*zIN3;nwq6l#LSb0#T}~c zb(oV5YQe``S;gu~IV+Iv*!DOHZamU?k9$6ZsMwFdtu4@l1Kd<@;U(Of0Rmm{hz~8x z8z%&@+pweC4FOs!h*v|pXDBkhw|7;Z!fB#0&d`}Z%2n={hrvsb0VJicRo@>m_fb5A z2RhCBEvb=Mv;7(lK-FsdJprE}fE;xNO=h-eOCcupjZ3+I{P{b)1>g}+Q2X1%h+}3d zdGT`MKP0fkgif&W2_44Y%?E)CDG0{G41^S zw8wbzJbz|5tH;5O?sMcha!^~a{aVQ-K(+N5NK~MV8XkN)%hewpxPyWwW0mwac8ph+ z*VsaOFed`Dj#a%VuY(`a&ymX>XAHBUd)B-k>PFf?Q z_|9aC@6SLnw+M1^;crG(Tl=*;o8BpzeJ!hfUTCW;p+y7rw|#W^!p(5BC}EVJFxZ$2 z#WCEHpmPI-^;)`E2|V^s=#BqKH=zYYcS^hPJPh|Z-N1QhZEF0t?s;(&RQai%X4D9m zDe2fEmud6p1no_OcJbyWVwpFEhQaS4sR?ph`iJRBLAQsF74_-0K%OJV4WwrMn%QwuiwF0b5uF zfG{p&LiSKl{6ft2KGlp*m-jUmKNt{x6UcBF~+`BgD)nDxP zbl=ZcbLey5x0sLli-^+oPDZ78vgvbreTBuD{&#;6f1NfK#64^|k>o!5){52VzCSQ! z4;PE?F^T0U^T-6+or#4h4aLSJKQYDT^ken-%~cleMJ;3>)wP=LIN66o?ibDHSh0V$ z(|K0>L*J+D^PKsnEW%V5%=T29mw{cyflLPB14WWsE!R^}E89MW&K*kM+Ae zkANhy@w;Bp{rXfPn>iTeDefh=@`3*bu2XrnuEeq3jkoiBc%T;t7$vzEC~PGP0*ncD zGsI1FLm@L4+k87i8d?Dko8Ennv5tJW%?ZBdNC(=9$M`1P&6Cr_Bs#iR3dODNP)Z3| zeTC=a_cQxbGLiaK}sj(w7d6-hSctf_$yu zLE$>&emSI&2Nek$J7=G(#nAsyiX&IpEq#EXj4cK1%fX}fu^g4IurKHiO9Ne|pW^o7 z34g|1_78A^rM0fOE!+~6uCXu7Ny&>Ho7#%U^AAbm2&Z1o{z;SM{l=GP6@rBbK)yo^ zuXL<{K1&)Rz~MW-BX;`0>*8K`*2khl|7>y{I0m?x6-jV|# zi`#&Mp&CS-GyzN1)pOI=6ZXXX6EK|H>C7#=HA|?yJ1qw#SfWsSQ=%~ZFx)XothCU7 z;3!&R=XMVyU%PlvSubvm&xWC34%iLO0Gx+LU)US?4s@}mke?3$J_u8~xD|E^2GK)r z?*q9N(RinAQ!sK*Sx&X+3I!nxSV+krf>M@z&PGPO&n@3=_zHHx3<5-5 zal0uv3h>;mYhp6o7!q>=XJP^qT_hJ|KqknLOppOc!UYuDa{>kEH7 zmL_>KBY~-iF$*a?H|AHo(Dp``_>i}3{ zDOPAU6$EnR_4Sii!DA4`wv`cOx|Y%r=k1Y4eQ;I#GdflIH(tZYQYzzvKG?Ip&m{ph zP>L8Evq~b;9hGyfEJonjc$M>6e7?Ri=SKkuwg~F==er_!+-VWOMZvX9JDWHa6gCA$ z{oNJJ#hBf~LmE71`~~}&F`d+bWdSH{V}_X_rguVOcgE_aN{n#~HLW^*=6uc0xalz< zNB0k{VA`q7RQKaDV!P0HgbHBAX*^;B=#aluMVj6rTF9in(XAniA{CfD{H19z806K; zR?|MW-mgc@0t&;+W3xj2uZn|+0agVgf11#$8cWPHh)JTizFCEOxmeW2p|V%N*D7lk zU03A|_D`H^gI!@`taF?DrxMG-FJUM>%TVWlfOEOfW`Xo?3Uh=zM6Z!4>J{HS2}-2@ z5_K?#S!~dC6o*8~+^lmNDmV$5B7%_@DDDP|`Kmu%Yc9b-@wQjFz_DS;F@kek1I*$` zT+f!3|4z+l;eL;k_Jx2Y2=v?6-^%Z6kv_+<{ zH{410iJmCtYD#sXJHlmvkN-@+L@o^&xxFj?8Q6Xy_6KeIbuOW20A&NFW+A(4QTSz( zvSo{)D+u@E8rX|ji{9r(1JR`pZkph;eGOH>_;ufN(qdqV$j?HuLK%`@#2dT;X*j}6 zUEXrgLyvbOru#l5i%?IQ*`(hAlB8Jarf{q99c>aaC0k3YDA+Pk>S`W^QK(C+XqdD! zePdOrd}iV69A&ba>#zpUI6Qvc1@Dy-OjzqoiWA5xLDwjDQjRtozTqRd(2aGv3PjK)jZUU|Or2P? zk6ykhUkzyd?p;mbHeqhYhXB@5IUZ>SM!Z$-`8@%%);f5bp>U9UW6H8Jq{!9C{AOKwhTg^ zW2(R`zt?FL`5W}@n3Em_o*OOrWEWv|(dLpXP?;p)b9OV+pKr2nqgUDc-r#zayA_Jk zSXYYKCJA`e=}yTHXXnqPm(Axlywr-UWkKBPcX^QnKRtioZr0iy`q;v~o_^kTpU7F9qqqfSc?1;R2b zHL{fVl85K!5|W3?#>>hxwGrKi`MMj;cZgfv83}9EtlN%lTZQ)a18sa`Um1{|1IwYm z(sXd>7Cb(dxuTPw^QcU^5v~ZwjVDar%u!&9w!67vlIE?ru}?t!K_c`6ntThj^-R8%{0Ez|Hql~~fX3iutroQVesTy^ zvDcIG6U?Psa+4Pv>YFO3s}N0shYjO?73g0Z1i@tK|7RS-*{uk2r#Q`(X~iZsKJlC5 zuB2sQNsvn}B<nsyj!#jb3y#w5qs97SZ z`jkpfFpb-@+9Ak24p}riuSpeYmoc9k&A&%ctuzW?dcr6XA0uEep}#TLuku9N;|cT0 zmS%gsxF%p8O<=>?!@qKl{#xS8K4l(}za@9key&}V5E5-BFl9;P30CnAbSPO3x%dqS z+C-{)yelyppCm~A$Pi|URRMth9e_Dy+yhZss@qL!stdzVy5&ttgN6&R6oHm53xGOn zE=1o#W{ouG+vI0)hn?hBN5pA2NcR+qUK*=R6xzGp7{@$TR0Qy zX$os}6XkwfnMaph9t_3|@MMX~%5Wsez|+Hb%pcl=(^m_Bke~c~oYQf0@SH&jZrni; zXj8Walqyqny;n_^Q1}9Fhjmf)i`$a?GtFlddZ&wbd6Bz6^Dy?qmUefYXaTmPrV8cd9$zr;9e`aqT!mN;a~{? zn-PS&R7vi}Di9zcXTeS!afs3iHTF{cG!@svp16hW>EPO|7w~_;-p&uHw;`?ag*my` z>pX${02R7HEBDAamDA=AzTf`LquHsEV8?~#!WW;<0{rSWnnjqTI4se)Fw3>f&d=Ni z*a_0AA#U^E%xvzovonqMAl|>n<(--90L)OEx zYm1NFG2!*p;{%V{&jmEX%_ACj_WPSb@TOOkzy7OZfbO`3w9+IvqP-2bBrh}ys_SHz z=+`(#AdsjixF0WQpKn8)@(9~~uImb25jitcpVKl@`5pW_7nsr^*6#l( z7}@G~l-md0jWq1!A4IkZR97l}OmvX$J|bJXLnW%Uod|G{&$A}TLem{7RrPr`Ofb@S z={JrP=~TVw%B#`Wc~U6&Q|8MfrQZ5niWU!dV)RP}bv{>@a{bfMbY7|7NBIA_p86jT zXx*5~yN5LZTlvCof5B{jspnPF|12!E?!uIY|J=0)?4H83_kPM#_*X>>5Kiq#PAAX@ z2;k+k9QT#m)CxiTh_hvR3P$2+hKy=ntZB&xcqKG ze(UuoH`hdD%FI0pOdob7fBpHA9?w0MIURKzX^z+hq~ePwKq0qcgX2$ z$Mb}r<+1_Hd;qy9q}Ir9&21yPUMviwp#5JlrM7@@mRIRLSJaAZX`p25h5NC84q~1BMg|)C|LQ%v+(KdPNM@~j!BO;Vdd_K z;uXjj%X^>8y8JbFxNebwwKm7Ny$wa1*vh(zcHhqf=IvPs&)GP_a=w%;`LtheyjNwA z_n=?!`V(*i0o@Wizf7!R{H6;~Fa*AiGrpxl`A>N;egq0UeiuHj63JA3dZTXc=S>bx z-A5O8KgxB)!i-TAB)e)8yS)o?GHY(PQ(~H-amCP)&FjHgt&PTeoTTtQWv<#7(B&5GU1t z1uj1%>GE6}rmR(79&rvGBt6zDQPz%>1u;Owqomw^8e=G0g#XdL zy9M{=qPk3e)j$5BDN>tP5c|l6OrfSQCv^gpN8_|_emxYx-k21)?Z%3lDo7o4)|62^Q54KITlS;5fsz(F%5n~sbuFyB_Xj(kWz$GurTr%DzDrWOMmWU4B2(x4rMQI=3*N`YL*CpnMgSs z2Q4P`5rif%9Qr6u8HCfEaiVjSec3(_$0$`O!_-dg?z1p?E9fgBluxrSi_BC%t|E48 zE#zwR671vlt}}h1N3}= zSyr+cR%o)V=&Yq@@|91n!Js^oy-=s$Z!Sgq2c(^m3Aqfn_uu|ui=i_KuKj$@!v|FB7dP6ywDfU%d{>(Ey0r$tnIXl7C-`-9{|(n< z|1ZVE|FK;7e`|!y931~uGX5G@)1LUx+jgJPJf8}*iRO+t0KZE#4r~xfEc^J7nV|)# zrd#1qYU#F3)c1?ur$k)E&|-}U`Bn*hnZ+#31YuwFE^7-Wd&}Fw!N^o}pI@EMVZ@RS zJH0)Yn5}UNnO4KeuaD%)haF?5{?<&|FLe2NanPKO{?lu4M`5ecor+BD%ABFs?IoK{ zsPkG?D8jMXJ+xUC>wx7)KC?2yPj7*fj)#@+aE|pKNM<;v&+}DB5pPgwe`E<6teqEK0v|JD-jmb7T+KK=%_e@8vH zhcxLLb=zhx;onA=A$Lu8euFRUjLJ~cVcHlL*I9?6PYfGJuV5@RUN5Dmj^3m8e$7=aDsG2D~hei2OQ z?b|*O;Q7<9o&>Q_K|nqG-R8O zp?6vD`19BX--v(c8ncb2asNou;Mb8ZplLpA=g=r5F2R}Kzh&1Zy5B&xEfb3!nSJy2!St2W&7PIJ5@o;YZ>|b~-$Z+I)0P~bkmzy#tlPtS z&&pK^ONh4OrJR)buNq!un}!f*H258{Mo;uuL|~o6{geH)JgSsz%L~|)zai(KQ@}CO z@9*xGRm&{a>8L`8a7I#)BOE6QaNn;}U!aVG5DTe63kV?)69Hu2xIE!mp7KzW`-NK+ z#6CgfmU`#cn~ibgRLHh+kD>x@u-XLI#_KVOlGM80XAq8#j5udUgUfqg=iJh%{n~^5 z2WK;3Zw{A>!)Iklwm*z|IKVWF`Iq7oW=t;}^=_GXlXCXOo9ro;Io%vXOBxpV2FEmr zATjcx1kDt0Q#u|bLE=(z8UTCd->@_G-ZEsM zr%D4ee{fL&&MFf1UJ*_v#g&muoF;xTa7JiH$!Vx!hQ$$H6s25|l2pMkq%5p`YwR2O z4{^1=(J%fVXqvMP<{#Q~%6glGD(qWPLTW2^1w}JOUvouJ@PC>>7GyMhgqU$74r(x| zo-~uL(lz?Voc&I5$GmB``-Y~)F5nWei<-1DdYk~Xy7VQCMyf$9YPfk+V-YT`b$L!f z8rf>`{v6pa42sH(`OEwI(=j99&xvw;Si-<&*nPCIOycc!RE>%S9FO4ul zJGPkn5XT}0lF>5P4AUrZ9Z*CJl|r{6S=zQx156SrFPReTWA7ZuAMYBsG)Egyp**Ib zpDI&4G%bz8JR+>qhuK#WDH;;u1O=M{4hJ;t52Jq(2~$DOh6@%w8L6xL9l=GWj6eKp34K9+#)7shN2j?HaQM6EkY(nc4=VJ zFQiAnP+w#&f+#ha$dEp0aaPA;xJt8>UIjaeay4=*jd*S<_G6w48Y&k;%+a(Z#%xe% znz>I@9^_>evG5N_!U~# zI{YUmU8^PBhMX*OvF}V%%{#&h^Xx!oqYbRL09LV0tG6JR{63Ms;z16$pFPJ8w+lbr zfQVm1`^Q{j_yb~aiNf;Fs(>?q9*6sE?G;nDQ83DA4W_Z+=F?%Y{N}ah7WIS+BB+z1er>VZLW+#gqcBa27UN$Zlr1qaKI9t_eWf%RrZQK;Si9=B z;8H4G-8SKB8>x%-(#bx;-ik>*6*mvXR>yxO=%!^-_=Kex3r$)gN=XWd2By5}-gAQr;c+jeexW$YQ@R4-eF8f&aZ_SQb`!btjh`%Mi#Bh3K~MXMPYT^tUR-mXS0bx zrNp3+M|Ke?4*1l@e&TgFcj$}Jd+Rv zqn6%TFsBiVt95yAhW;*HH1Y@K$5Wi27W+E%y7FNFPx7T9)2P?h2Sytvgf90yP`Zp$ z5Dnm*YQQx+%x2T%IHS~~HM(?2w0+c4bXlHbnW?8IUZ7#Fe}Cr&pc-cw7AX}0C!4S# z;`$9kW;i8>h91J7m57C=Lo`|}l2y=*N>t~1^8H+%a2noF6G~~>iKeLXQ-n*u8i$+Cbofo zs{~4tNT0BOt(?lHV5>ug0aVEk$rG)u=;D-pc>8Kt;yBf4)imTmWHnw;{DA$U8)nAZ zS4TgZrkVk6xqK~Ca&dy6iPsa2dAMf7^g^ZFHYA8d*A12Nmll98s$E`M$c0N68{`s9 z#SrLM9!Z%?ur^vY5)#Ta;Asxw|0I_Y?3Qc|xypUCqdj|d3SZfWy%2!R&#B6(&Kc7( z$BY|-c1-QMI?{&t$BMp*N9^L6c3Au0ZC86C26oa7Z+1l*xkPmBI#z8hWSVr;YmabP z$v#V}WYAP4t2 z{48eQKN~%9yxE&;m%S%eJrB=ku07We2as^L;uF`r6d>W)JGE4RKoY46m3%cBO8`LJ zTIuo<7;;-Yx>0bOaG41JF_{9fT>XeN$P;s6nZsD6z$l{0YF2Nsk2qzxivQsFR}0e9 zNw$7JBn{C==W`(x;RLv(s4)Y)0lc-51msMDhj`?iPQ=kWlaVuRb0mJvAIT58>1m;- zsjWnL%OICojqsZ7v+w8C#5XHK1E^mg4A7F%=2Z>%UU=!7Q#`U&(=(k`ISQUqam@6M z^9Y<|){hILku{IIObZr&dZJA}(sli?UD|3+#%ZkgvvVXQG^q%9bflnAmP_(8kH6%JWel6{OLd#tvPIe6o5t)UczIW(L%HrK^yDA zec5Kr4Xs&tm87g(1(j5iW04in!%vaJ)b!7wOvwH~cyqvfFtJmX^h}Vd!lj}lPVN3H zO!^XnyGjHIdD`5e-q|-e@h^NegwplBAe=ds8O4qDz14%2lT#k<<=~+sEUdULsT>0< z?qBoHS>wR$o zx>&L{6DYLt6|%pZBTo~Y=Q4%Oru<9qgb&M~WbCVtpkof} z^KuhYz9C&6sF_XRxH!0QciX?23TqLXR8vn#DI#1un0cMH^%pgBF0P^1%}4KN%6%>= zZlt`2#Y(6X0a-|eR;;j4Gm(mrSeMP!P$Q5unzhyCB!O{52lwTU_{W}ffwby3n$wZp z&Lt_QT?WkCAJT~R7G(F?K;Wj?$en!7T04w)_YKoTJ9|#tP*uxvBJiG1yvwGqL38EE zOR^y;Tfw}D>X-1>s=_*@A+1$uJQCN=MxYM_-|~2TR1bI5;Bh`WB}H^Z1T2Y7T10#N zI2g)~(wf>4kT_6l0t3v&>Htu20)$Kj5R$7RU?@Chy-A0+jBjK4|@P_ z7p%5n4%gHka{rNSa+h1uwPdoWzp=FWAv17ffQTC)q##21NI49y=%w3hME(F8Xl6Y9 zx1^hmljT1N&iwyi>@xqqj9o^iU;6HUXYBT5$Xe@)!-T!Os87>paKB?%f{;bCd8k8J zqS?x#5%#1AHAG(vyx)FIQj$_S8@Votatyw3-o~)}ZaA6kiZdmZM}#-EJf_74J+z3~ zd3qShTd}l_7EA&j7fgbjt&rT9%qs$%D`*|+Y@M8%W#!?tLi}&$CG1%7Cq$h=|A;-J zdMIf#$+fl9TC69brSw@tU#+1%vx9}8v2l>4qzy%gs1qld0E2Kt{c>ZILb^a zh}CHD8dAsy#4R0i0y3w-Zn~WCNK*^}&aQI^Z;%>Nc;`VDjZ))=P|%wM9F|V zts(m22dth!2^2D9wEmgeNiaLol?<(zq2SUNH*EGUU|N%O9uQ zlD%=RUAcdr9_b|%t~L2ldX_*ETYn!P7Tc|D)-{;(kvc zy4(s!)m-93I6~sCMMl@u%{5EK#Z}ca4&_!(G`+47O{-}}-)QWNq2lh8h+WyE%YfcB zTewje-d@}|8h$YFQ5B! zF3+#y_w}{y?wy>xe{uNU&xccY@Mz2&hFAw}yYt(BUC zM;CA=pmU4_irU&feXG{!FAK#ReL;Eoqg<-be+*UAyc;+AIcAIP);7jGVyT%lzlf)b z`qQk^pWdTgrZ0R{YV^hI%#FO8KKZRuanHL)(|Glv{Ko^)C>k&S)?26NZ=)|JufE@x z=RaK0G@tx*MSoc?NuTui2j5S-8T!5vU9ms!gx}}}x>=h4EuwKS{%_@=Ihg({ikPY@ z?YzW*(0i(W7RD1Cy3-a%*>bE3F%07Z1+_A&p* zT5dHYd1^wF&C}5k$8lQIQnHOGsGNi`O_NljjfCB06{V|et8F3v>iUQ$`!ffE;aCKu z!lbgzq%m{rbI$}4e+OwcMS7wq+&01q?Kx_iJfDo`^;rHBdLW0938cTonPfjFa@Ys~ zeaG8g)Xr!Cw)`}gZP>mBEw@7^V-Lw-=SCYvTe7?X$_uo*h?&0c<=vTnu2_RK%kQZQ zn#bgO>Jo3I((L<=GpZC~ow~1x4_hDf8|l4t&Du94Ukt(XWQ5(%mop0S&&SBrS)N_T%@E&noOP72}hWUuv)S&P3J@dIj~L=!*uJ@Mk)RuCkpgEs>p7*Dm{P zpFh-Tum7F_U(_Ep6V%RxilY&-F-OR9O>{E&KC33#J7PjRW6|9`y@@hO4F8hw`mL1D zi$d(tsgJo3LEgbbT_8MDCxxACBGrL&pddYHu)l%9LyuS1_#ioS(r#&GwScl=1t*cn-b%JBg{dZ28e*AF)zCOmvAC{RoU9}dDD`ZuSHwaDS#IyCVMf98 zWOC>SgCp?kx`LH=2!bB%xOUb@-z=KgJea#LW9eqi&?3%IJns|BF`;uEN*F3+d#*H) zva0=5l>+_y$%8U86j>e!X*PqfYh0IBCm=@I5>uKjyprbr&yGSSrZf$b0l(}FQ86(^ z%ArP!yt{5wbK5VAiASy)D%2~Ko8vF~d^Z$D?^Jf14o+2fC0y<>YmFg;&`qPmy}slS{jvjb3WF8<@`!Q%|6%_9uO;l5SpTbu z{COU zotEB9JO4aIJ8}$n%6`N%j)|YEJ9l=~6>U#P-|JJmHH}!_k*C|6&uC>XW?2N9ty^c- zH|@`}{2|eaTT_xV7bTe~;%!Ymci+|12y!;LIA zSb11MN-w0tDhln{Gmx}=} zhW(J#K^kl@5~Ez=5(s09?U&5jWeN(Iip+QIUQUY6a_vz?ZJLTd8y~qf1%`c^1RpU4 z*A{}{=;?$*02mk*h)#4VN^-L?8tff8nIv0coLz`jn&YBi;Gwk>8!8+IpcMeU6=Xr8 zI5EBb;Al<%FfyF_)3UHKoYElfECDgR{X(TS#sL^{4#v$XK!BCQKrAvsLimmO(3K|M zUS`}r5VMwppz4GU?Zds)!%tkX^R_#;Z+o(b_4mkxjXNv{Qr-5!H6{eedFTv_RR19s z=}gRg+UepG&D8C%0tc4af2-aSgu!7Q z;>LjG(|!_pSc>FGO}_PN?zj|g>^!d{AS}M^0zA%-x!=NHv$Zg2jb{kFiXI6gdog_@6?_A<(+yzzQ%BfIp z5iTwb&;sms>NZs&WH#zHrwbZ#FsY*2E=&x;X6qm)P1n`31zk2>-_tNrLKeD2bpn@< zS5Z<$(;ixCx%*Q~R1N$HTHyaskn9Z$Wi zL@G_B#kb#6m=XhvI4}?>A0iZdrNO)PabVFOQ2|5eAfGA^9c1+bQVe1_CMy08KCmSR zm^>`7v&u9Z2a~xSV;;b|m_INaR{K|Pkm&dHWDe7Stu-Wt33EvSb7U|Y_e8$iI=k=x z4`1gLBuWsZ>#=Q~v2EM7ZQHhO+qP}nwyiUs*|QsWBlgBd>|<6}_e(`}WMzH%|3@H3 zPOqq&*1W&1|1K(>e;A$v-5%uZ3L6^65U~FEylL$Gy=lG7GHKZuma8S`9YpTT2YX+4A{twpm3cyNFR(4a@aH3ks8Il)>3(ej2|4vf;1Lokt z3~}9yBkhS}Pv`*2>es$aNcQVll9t$Qs9 zAy~JNL)J0yQeO#sGs)s12WCr*?X#Hhn%O5m3V1zRml)>=p-r|ib=g<0ZOQO<*{`ss zg?{17xS>i>6rCMJyqO**<0Ih6cC%)cN(EOO7lnf$3YCKs79UKXF&!BVArG-O@ehz- zwsh3jMkcjkmH4koU=ShZaXdl+fK`n!kWIW7PPB9m$+t?9vwN!G&5hb~*v#GA({+_u z2(|j3@@iDguBkJ7%LR5iE(gGQCjqfBeW0PD$a!I+Ugj{E+}|O%}?35_R%}$&c3*Vb|dQqhxBR&pK>8MGl&74>3o`B z>=cdLYFzP{@M<=fC6k_4WS-#_Mh93%hSq8?RwMSmorCfo0{N1#sdo9}Uh&%y^yHab zY^CL8+v_(zm$SFhLP^@_NTh7_B>&`aw|>n)cX!_jw8il(+!&!xqUGUlsd!&JSJY_YEZ4qEt zpXK`FvU0P27vE1_p}8_USkKd3L#D~ipSVDK-7*N&t<}&8T2@&RsiZ$Pz2#0V)p?^y z&(W>D(E7fF(_n94Jtlwb7V@s7w(ih_kZ+C$~wgr5qlJF!mK`-VVmOaca(r^&%dOAu-RnTl1*6JTTF;|Nle zmi>*@wj?wWJj|sTcQpKg-TiBb2nnW}QG}t}T>n5qz%G)HjvJm^mqq)mz2iDld)qm` zbIb4v7(Lvkj*7S){J4;b7xcr{+h@C_VjcQejJ&yX(VqMiBl1i*8Tzn z?x;XH55)k?9Lm^I@N1-Rs8b`j)~Y$u&^rm6J0;ZfD2s^L&GQx-u|+(;P9i^L$`WsD zWab@cob}!Mm&DmO({ydE3#y9OTL$6{pseZ77T52!DoVDx(I`5RJ=VAACw+lKn=M-d zhxL$Hf;+j?kSev*5Ier-McYN&g z6G1jIiS**>+1(00kkqSXFDc!2m4SdkyGT%KpIXvk`Pi7cIs)e1%Ka283>4sBg_dF% zz`sc)fxa~Lh4j^+igD98b&y}fED=I}u_p1n{%|58yt;pzM%>DYI51Jw<^m;+&Q1ub1%Nb_h-JS=4I75bp_(pydi9>kgh?;6zIgfZ?+#1~DWBHxouLVjnSGp+>> z=jtXsoMt=SsX377vT;!tg(teJYShoGZ9FjUN8A+Ch|R)?_#96+Mjhm?BBY1MnFhm0 zQ00hq-ddF)*V@p@pe$-=1vh zs4s{FJ<0`u?a-wBZ`(nn2`@|p*vnK)@o!5(M@wZ)^&4TumOUa{GU>@4q=P%~`eUjY zMf*WyT3#weH0+xbS;l)4-?sifj;xGIBA5&#uZ)(t43fPlybB&| z2nMLLaHAMp`~o4y{mlN|Q*gNt{NHCE#{XGO{l5?^ zz{Q^-%r3%7gE&LN6pU$o*45}N9u{FEbxYWKf@tD`IUcWXB+a^Q!JVH=Xk)uOL24a>hBVL@5u|r38SFSG0aqYa)>*3(_#N>gxl1TN~L2cRHIr35W4^=*P2L&~q z_ma=IAip0kz1E!CD*Sbp7sozwI@pC&ktX%LEC?f%2yJy1)E0%vFfF= zh(VPzLt@T9hy7ZY)no2N;S&uW$dLr-gvA?Ts)e!$*g}ZxYLdfkLL?Q40>@>)0J*1chJS6>M1_KM$9!f;u}P zW2RcpX{#EAe-Xa~33B7&^76T#-cA7iAh(1c;G276feJx+VQ3_euGymhPALDph3dO9 z!-Prfn>w|8{~lx-kI|4Tc0j?Z)(%|!g`O&~+vgFr#z_~QUu)~?(xDk7(g8Z1B${IBIUHnB}AS9LE2bHp2?4TMy@GD{?2PO zzk+Jb&)fdgN3c1(-g*ppK- z5Fa!UZ@^Lb_*F!rep!O{UsY370#Q0x3!0;~Kq^08=pjukXr9C`-jo@!st7aD2orY# z!89Kw#3}YMNzi`z(iTSy?SPBPZovgkmvCy>{qJM4i99l!5$I9`V)GB&P(B8b>^(l2 zQ@lQg+A>hup zjsu?edqYx^+}+))m|iWn(+KI`Fwok|xsy?qR+FMBwBTVLznGVWnk2L_A8nN$$3~SN zJM9anhMkMj>;SEuR4F|zWt|1yc3PPp8pOnRU1qHR}>9$0dN z)DZ%r@ss+)5bmLlEWL>Q{Z@}Xt5An$6R*?i0mr#26s3IVGh}2RE!LJyHL71O>P%8B z5YVsdO0a}dEaQLbtd-pI@vxq$ww9lth2Hu3nj4>k?$a)3>(7GG)!wH%da0gA6+Tqw z&IZW;VDhsYvVZ#O*s*q6;%$Ky1X&ZMtcTn5HwY{=d^4S)G}H?m&vl(C(-5*xE3o42 zjmiwFyW?MGG<{J|oj<@qO#=d84is_J5@HEi>_SCyiBK8XElmo?LSlUWE& z`t4em_WjWOuI=eWJ6Kn@d10wqr+QmajRqej(?gZ!c|a`FL7CMEMYijqH+A%dl%d&4)ZF%hdZUxsWqc;e7h{xszusicwmCP6Z&-+oM>+DorsPJmo!$1vd-!kqcOja__i;?3PA`g<;QK-$Gt+l^s zA}PJ^d|O-zm7}HEYmh{aJTd-Qgxo{XvSFkwz3y=7ak&aM&7Gl}=x{6{0-7$Go6q2N z;+vanvLb42|~e)-3PW;e1=hUlcJywH0AL)+c-Uix=hts@Bn)jr;6so&?=4Zu`5&_*a) z&TGn;9A_(65+5>RZ@kN=6lsIVf@>rF*UArJD*justvWnhgb(f9A$nJA!EGZm!(20; zp1-dryBX`t0efBWv%{x7vdExhxZ@^Gwpazt2d28thTj-h(-oQ<~>SDHEX7zf1luY;>o3PZi zIB&fzTV0v$S_em@L{+D+d|ti1he#YAVrZ15)b1Z<;7=uRhd-9f5{IxjE_#Aqz%EmM z7CNjMzk94FYv}aQ_4e5#rL*HRP`8t-I{o&1F0fQgFa}SJ_D862cMlw?A;MCo8Ue3K|Lr?Lf?(q!-~voS zW`Kz$C!7jP5r4#p#)Uqg0$~C|AbJ-60$~Vg z_~`_aKnRa96Pe08vkQnK1h(jV4|Uvoyn_vJisQ+@5|O_}r~&enLqDviF}+P^?v%^P z2XcH$>%5YbaW;`f!7~fL`6qjjh_tD~lUOSHz{s4ayHpnWx?7q zH5qHQ*=n`ns#tHmQMTS{RgxH~OmE9|y5%_IxO@HS*t6+sX;I5_n%@1yF`C{$qbLYu ztjUk)NvKHg$$_RJ!Uaa>Uz}avo!!_SogbN>N(@mQ-yH?HHw#Qa4NX=aSsWRC_Ja)| zP+@X$Y;A5k`f#hSPpT-G%0omHO5?Lq)|LTm49x`)MFSw>LPv&1CnEEY2uOI+3-~Lm z4WJ=q1TCb5?cW$ukQ77#Q&Ha&RF+iI5dw|AkH=6-3P2?1pI8tC%0Ie1v$zbme`#%D z23KG247T321*m;}W0MUS>PqV0*Ggo1XKX9^x<~!%`Asy_|JRL_(?7c;hoXObbZOua zJ_Ne|89u59v;P_%v448;v~Q<9i!%cVMn?JvxG|JgHuVpH4HzdFm?*%IUY}pz#ScYfL{Co4 zncv_EjExN7GZNupV&UcTuNKuem!kOiF7Uq0)Hq1K$+0m&J!2y<`#J_d@9#{7zWMvF zr{UY2q68O{7r+@`O;$VS*HrHo9~T1uLmXT&$QN5ucF$BE3=sKG9oNOL-0PR$6$UPt zD%cSp{Al0H%&$t_*BnlG$L9K1ksyj673{v84S?Uu>2Ek|b4&BX7vJG;@b8?s%aWh# z$L}cNh2h&+Y<^D1OrMqc1-0pgfn}log`qVdJyWv->-Smvr5;zqdy)jGm)TTbZ7g5S zz~nvOsWLTnOwEmSP7WY-_HX(J)!Hu&#m89ngr0<{p~+{GCYg{NMkpW8%~+zx!Yp56XY{%=R`G`m~3S?Z12S?-|uv-vD3j_78Bz&-(ov_LtGn6W)E_ zq`~#&@$*^h`|R&y6{y3GMVu+djIbkhReuO6 zlQM99-J+?S-jMERiHaOd=1jYLN+CMW@t92axu%8f)iH)r2}X8~x#pwf`a$SgnODMc zG%3u`C$`s=^byY7vdq&zO=rZkSGRU1Q=AX?qO4r)EWfwx$|tID+@gqQ8*kxp@7t~b z(1E|^BdGiM6&8%IDNF%~$gGAcIc{S#SHuTK-582bx zH!TAKeF@_H0i;Ox8j8DTVqokU6ctM~n51>4T_}MGu8#u5lr=VD6a!F()|j1 zueR+1#-3={vq@mZ?3$3>DYWCjudMb3RCF35WJxRWN36JD>sF~c0@8~m#;1Si?^3Ry zIN!K;A@~yz+h+l|GA3RABptunErtXDAu-*g2YF2HV=EM8JMy9Ty)>0WCfqx9ihPlo{^B@rvvTqMtIim3- zP!EZc6osi*V+Z=nk%P*wM4z3=)UljV4<_sGSEITb)q3g4lH7Gfu^1Hh=hEq?N7>f& zK_4IFDnKjZts3|;?7p?7wYj4fLT-??QmW9vI7%dD8!^S=fQXydGa+F76Z<L>C>P; z+BY&*?^eHx7k0~FG*jCCR*Ye1AtkI^%BYkpfX#%z9kym1TDUh@RC@ z%)~H|69}nx#%a7X68K z+f%#P$=N^xik3&iC-!??QHs#@Uv(N%R_J*>Q`T#;@yM-m!OMV1;+E8x&p&%mTjy-% zZR3djop&(xU4bb%yaC>=)@O~Kp)9TW(LRqv$JVqB7qN9;p(~tI*>;W%CGl%%3p;|L z!>#Bm=!#M5lBgY+z?C8ben>}icY1puZa)1vC-87KLF_^o1OE1cf(9iX@tFgsa) zerFMzO%=uD4-EE7Kx_qc%aSX7$lx8t06S`RkNdT6*k5HADxlb!^ZhxF+|m5$6|fyK zh#4I<0-S1>#pgHatoU6si;iB{hcC|9{G zI75$T7;7_tYp)Er@+Gy$np&8~WlY#?gdNMYuUsu2cqOp=4>)+bp1kzC_Iz_4p4r3? zrm`+Zlw&YNGyBfW;&7XzAgpCJ_z{Kg~Ypn1N_fqAd0*Rr4QBB=9O z$emwPH4_r@_`@3pyz0t%h?x?3KcHp3bj|o~Pe;ARU=YT>XLocsaMOZW(!##+Kjjq$ zNr@g*BOKL~$*^@@_UE(zfEGJQ3N(>AnjR5h^CmIxdkL%%W=)2GiJ)h!TuaL@ww; zmS)=$EY_9P(NZ<)RJr9u$2(!ps#L&8lYBRE#JdLM>|-CMwR^bB#Ivga!G=3gwH-*% z7lY-YI`8!mM|WnBoi9c*;V7YuL#{f~JdqRMm&IZ0 z6cKyWDcpCx|7-D^=SJ7uFFqiW5Pq02X87?leuZTBpqVWCM((zSB5G~j`$Yw?wH2xg z&fO3B7~e4@`8S|RxC?_hgJNj*!0;-}+p&{}s9JsB9tJIHx51hZj?%UFN)2NXMO-VO z$F6GmNYjepo_50%`fVu`fNGmoLCvLwC1f2HAp0qRFC8y&n+{jl;2bA)L1 zAAs|+HkB_`IY=!Ss>(>o%)N!an1Jw#f~m5C7L_fgJaYLum;&A>S?~BqXe^;=BRnW< zwJvjh&r)|CR(41C5O-O48}zJA-l*9~iD_>sa*D6D?-q>WtlE7+75zv56GbZNu`+JODZK!!*BC?#i@kBS}a8er@p!3luw3 zzC$`S9El)*8FVY<(x6;fb76gs4N>)xs&6wAX!k};mODO_Ey13%|1uy9kzbL`?povl zVdBnAb1C;O+J$G`?%%B_h%$0_WIgd{s>a>cGx0--bBWb~={5@NVsDR5Pd^2$q&E1LNEebjeJ-CA zfTYt%7c+XHD-^=jqqCkIh=&Q~tqayf4ViAX#;0@C(f}KVwQJ`|>(WQ%iUK%7qqmaq zg3TL}QoN3FSw*vX`kqsTc5$6{cGY0HL{E!xLZ4L#Y_Nu@L`@_Hu`@DYs_}v6h=K)DnnNmwQZs@gbW6O|Osp0EU;UUi8#+d{9L(kvJ+ybUxu}w0*D5pEXdh7oDM_0RNX%Yc+oglZV-d$Xa zm�LZ8W`$A!Keb4-fppr-<*u*>Zmm;oVY zfS0Eov4y?=_!aQO^U)A%Jz2u0kx9z z9PbWx-)~Z|ohyV{$I5z3aJ|pQjYg`fS_8kRa^{!UKT}c0qu^fXgIRr~PB>kN?Np-) zxnlT-qd&4uos{`tU%rEHIPH%IoA3Fw;Eoj6Sf&hIwR&MXzcrxPDzSR>LKi^OOezra zm=qZdk{%4rE-9!TBj!M{RPUBVKeX2+hWH68HPoJ2EfMEFe_#?|{|x)M)A($5iNB&m z{191b7C<~SK_%)G^L9#<^VvVkw=_G%_d$xZMzh~U&B0o?zM7bU@yoa^8+Ns2qitT0 zuB7OwwGlTCMT>!}rwfIB+j4zUG{>OAq3QinX9Qb!MW>43&W6pplrGbM>ci5th22xq z0UZVOF)dq6rIOw8ImptdW^lwzp&F8Xv4@gd?fm=}wL+1B0#&E*r|WV6lL)UO;JWLx zN}TvV{%|bjC>AY`eBSW3a6c(6QHWY2GxRih!}w{?d(~adq!&3fsuo`-RL2Y=vWX#* z6ZwC1HP@^Ho3Y}tMUh$O@|qxI5a|-B&BwgDRJZ5#qW;+ht$?mN`fWn!sw?9#Vr^u( zy%e#(DenJE3wY5~DptRW9WFx|dIkz&aKWc5Ra3K&!o%tx$P z+bZTP5Vv@bdTApN1Lm4_JMOD{_}aCOzfH;emt=?BL0X2o*m9w;N%pEMTN~AR=LaM9 z#B0O>PEZ~Pq_}oJA-Ur{hqu%%f3)EJOjM@C-;guy)|~h*!NOcJ2Er7-6^rn<#-O*N zzP<2&du=9^&>*o@uHZo z&82;AZoa=fp|Nf7Vv!Ky(e0%RB|TSDhY1l)7~a3%JmU4c8^`PlkL742bgl@66eUg)zocRVa2vpSuy{k72!w6}tTk1?CPOsN z#)ji@grBb&pu)Dgx0N=w_Zd~r0;{4W?XZNtUG@7BZ&P_4xb6x5Ercgtj57;NJb15mWv70*7}zY z0FIi(0*Y0+XFny33Pu@?j#GxC)LRNmhbbEJSgB80*=y^m6&DJ6=S7zyE5C|824s_B zggvC4!ZBy`w=nSSlmIdr=%-L!S3UhE;d*Ye9HLexhu$>KnJ?R$HUf;Akfd^xl{Kph zxJMI5KvpX-*cQ>L=hF-0@qCx``|eYe{Pe%P3!Q3A_CRyHRTJ)l8w+M|j|q7TZxDK+l-CDl6>0Z)9VjK(*?OgELaJZ4jR=_P-oT9o2a5 zQ6K*w=eg`^z6&>EN+$t6HB;_9A6A{r z8Z+LCT3XKc8mVu>a;Y~a)}3M3 z?*$sLV+(`5L_f5VceUkKSAC5_qxF#Ud3Ma%2qLo$uho~}*FfcaLl2lNTr6n?cD=?z zZhdke)ig}?gMOX5cSpTS4>C1tC8PCFHHLl0^H%pculrPK%`Z_W4*u9<8%hNw(G?GY zU7k5ZlBnD{4mP82>+b|+g-{2;D)hs84>o9(T^} z>|*`p&yg6Ymsqfl#Fm@H!Y#hnt{+zUeJEJ3V|(N}`2h z=I=|kt(qQ8RZ}gQB*o(PR9@sVQdRHxwZe|;Mb}=i!c@KW6DFHJ;BGOe!UC+&2nrSi zUu0IW_?1o8p=X^XvwDLO)mjyWz}1QQ*?CB~$3_+^Fic@4HC*xOq&$buQFEQZz-PTX zCb4CmUr?*gK_4V&Dp-g_Y@o3lM_R%!BNdL87B4>7Dm+#+8K~(r6@T(`(00A)Zu~)H zuv!d%;ZZ%S$?0Vf%M(;!dh4~fxLSh%I(XGB<2$4Z=Zee7*QiHI+spO~wuf~L;6H+f zg3<*$IwD2S8Rt6V0~fJtVG>KLy_~B1k~)_yy}l<9+bkQx(=>z5<8cNd3rcK{U${Fy zl-6FtlmtNSkKr?MIx$I_o-^k;J5WgRu>8jlmft~;!{{AmBLHZ$}J+0WiEBGRpjWof*MFpK@lsbbzWudlA79SW; z|B@m*Bb{l_MISor*x#WeX!xsoJ^!+X>!d{`TPq`exnAbG>w5nYCu&rV+nKd{P{MD6 zd9|ayCJAmZ-=t4@up>JAX{cQ7F^ru2AF!Q0qZ4By*11A%Pz|pBxrwN8lk{Y~W96f2 zy$6%Bm2k|&2^^Aa?PNc5rPv2?*(FAV&0r!XM62^w?upgkYLL+hlzJT=22djzHmp!q z$S)%sL6)K_QjJaxpLqUv0x`kyhxPplgj?Pf=*6TkFEwh-<+zvuTs3_PoWl%F%_`+7 zswvtdCyk3~#h6(=pQmha*_`5|?UF+Wae1fk>>NY7CQ$rPZJRpl2oJ)m;tnY>TM@_(TE{~6}w}|t8A|~0khTzez)6j>V zEot_pGJUKSq1K~O#a|})Jw(9H3(lB8@PyvF$?i9?nOtG3z^PeQRGx($-w23eY2FZ3 zeZvEtMeE`3_s2#*ka?OK@_Q#$JI1?RmfXIn5#=?FsUVWnBCN&rF)*`2SU}wcOPkX6 zq0zgpQ>xdQQo$1tfMyw#C1x!mIR}}o3OuQd#;g$yE@PY#2u*Vi&8~B@5Q%Fi-KB>0 zG5ueY($ML10#xcW?Sx(&m4x#`Cp6h z-5zACW_+*KuQNB=4eHc!EL5thZ>(1)Y8~j#6t(8p7A^_gd2d;5;a3 zv!;tZH82;r^46@GS~f|%OoZJR7s7yxX8)R^nLu6d8M) zM0)|O{kB_!c5bysF__ra3j%iVYTn`v)CWw^C@D(X9VEr@>mH3UQz+&UUoW4X2PIA1 zV%*ig$AGS$+zN}5w(G+itx@~*uN|k&2}L9Ih+WBaZEkMnbf$xz*IJhMW5Hg3qC%~e zwcpLc&(k4+M_pA@A3%-N6yI)|9pG)Ij+~~E4;wWl;{|wcNw7~QO*?pQa#%CDw@|Iiqb6xwgDMi zK-$BmI4)^3c4Dzy!bd^AjXo6E45q3A7fj_!z|G3z#G7{tYIc7DfFv8NO#^B{ux_{< zx*ELjHV=dLrU;V8je^gi7o*tG?Y~LGobB#fR#MrvHALS^7h9wK;FkCmICBTDUIfou zBX4U8*(Hh&&)R*j8k_Dh%Y!0XpINKIOzkAVOc@U#4}YvY_lNG}4hyn&gmX6As+P%5&{nsoHN$8iX`bE@N(_i$}Q4QND7tV-DX%rC)4 zG6;G;qCA&U^xS_K11E0 zI(De=%mrHCMMqjUFWLb$*;)3?z%Vb11eSb8RiI^WcqPNDm$`2nCqz-ba$3`hWV~fJ z7*iTHcT6w3nDSH_|Vw#^i|v9`O{ zfG2>t_Nt+-FW9g>z^_wUS5s4`s)rcC4vaxO4x@%fqiLR^NF-!@XL(K`kMP6w=h=`I zSm(8|tC!L0{bGt^{_)FW!Sgy9IT|^#4@oXscl@~jpfUO3W(Yw7?MhgxBPA+-P?m6l z8z1wCiOx8CD4Nvd02_X`5OCY<66B~6NX*n)-Z)Z3l!IoU>@~ma4fN-)d7S{tGCB$QNPz2@tvCP{aEXvSa+bmC zlUd*MeI<9kQY7Gk#b)&ch48K0UhH`~Rv(lhDQyf3-Z}RwD4GggChd6EW0nPq9<;!} zIx>3&?Js~PQ|;aNa9@*1n&IjU!%7-nrfjo9F);K*BK;g?@pIC-nfa>CB!Ay1y*G~g zj%Q++JK^6TUrw9aAkXQ=C2NXqJ@G57xveJZ{X0ZGy?YhNgfV3t(rg3Arghu->bbBB za4#(&<3Ca>F||%ca$ksq-q@akH!Z42crXS!dcHBtsCyvG&Vf|+ zUc`23UN%0$M&)7U4ycQpIsMV{M1*Pb!;=3e`^&5PW2jW<6F=~J#g zmP$}2n@TNWPvuBCBIxxwuz%$MA8GHiUi8d$=0+=ux0;4kglwBX;(iY~F8ob$Rg1ya zS{AaWa9E<+$5=qh7bH?#xsQM*G*#t5)w^&2BC-l|^bNRV!;Gvkm8J^wESM{#iyZ^} zc0+kWuj#zer|GAUutY8Sw3A78VIm6G?9L@uCW}HC1l=rJP+yMK^k(^i(z=oza2UwL&;?J7@ z946rDJOsZy)9m~KR9%gym-P6<@7pP~lpYdFc-w&&Tm+gnB)31&tBV%M6iP3QO0R(I zfqNUF6#aPf;vDKlPC21`>(nnHNRQo>w za2Jyc)m))*fO0$X@%nO{=j&b+=JgMqPQ^4#u{6P_SYa zI?@sC4~%Km#%rW(UhH4^S1?5nLFZpI;4f>J9jBFo7k>zMQ%`!w=v#b~RFVc>ddVDE zFu>DdnZa8kaIr;A^?g~BjDe}Mw`bYtAn4UfdzSjm=2ndZE_AbeJ+9N?>dY&IZrpP3 z^y;8EVHzVHDQ8E_G5LfnF}u1{9=TCns^G*<8vE?Qx+Q2^XRd=`8Ieb9L-@xdkeIc* zBU*AFnh3)5sVM2<3xk_*#XM1{MopUX>w0l&aH_{G=6HshYLBC0A8M(lkLpd(rp{p>)#nYZ#Sf>_@pOiy`g2Cwq{E z6C?N~s=M50)p)oDo=RfhYp3$wZd1vdo?%mw6t=x3cX*8t5|P>T-pR^MdywY4utDHSyRS_l()B6< z>PHE&ReYdM=tR$#NnQ#xJ!m#~MU+J_EpbREkaMYTKdirp%-7a;@TRPN7!Vo7 zgICpMn96{5KFSMbzl^~4nj8`TFn6xzzN)8S2&bHoF)H6o4j6paC@IvevN!@^R%{b% z&h*RNW5jQH{9GC@6g0-kc!V7*g!~zsiY=@aPh`C50FJ)7?s^~k8f~TXX6F8?Tr$b7 z?CW>^WCxFH5K4jSL~ z^l)Ex6*A$h8mberoA(Dsfl7hW9Vi8Ao;B(ZY@@7>1x$!zGd1e*4wD-7M16-ZseBu? z!D(l_4)8$j-qUNoh8}6@mKzL_glqvIP#nW(CMO4=kn{bT)7i;S7Wehp%|W-3T`=oH z5HN4Fzvq5;g-~kGl)+jK6T2CcR&tj|vTJXdo=$f20hk4f47LsO(3#fH1pgt+xI7Ew zJNYUaCaI0D(lFWAdNl^QUHu-;YPGG0yy~mwbNXbR zH$fH5^Vdbj8zO_0)vk_<8X zmQo3(NHo=R7~*RqeP6{PoC>C7tW)t0m(pzk$G7qg?U06GkxindBoD-r%k(2nI@}^lb25G=!`YkKZbQbb)Bkv)Pg1? zu2~dz;0A_bGp~;JrB7PAz*PJBvv)gAIi zj(qW2ICdPtA-Bcv_OIV>c@Hx4c z9WN;|)=}P)5uLO+H6RAHFcqDW2}H|xl&&a$jyO^EP#Rc=cjMjE+IzzV8f^F>r$*WC z6p7Qg&-PtoBCTl=8{~e7f!zWFOhD#N)N{W5d8hWX3u*iS%4#a}kH2lwF1bc7MQ|QLeA{LSI z&#ckifbN(${=eqSZNgMvI&$(KZyNtB&z7NlDY8MR~%d zK$&cuyK5SMcW~fqA$#Q$YG!<$=vnsGa)ZK$e@e=}gienOpwZmj)f!$;#`Xf9cluO8R zvoPAI)*?RaCt9Y%T@RT-(VZ#@i!8!vIUQ#{-SL-(f=iJOf~@+07YFn@E5F3LM{RVy zvg9BD-W>5mxTHRWMsQ0II*mJI&ZaGzbH`Rjqba9SoQjyr)ug1uIm5QSdTN^Uob>&Lg zAsIoy6QboH{Rgc=gYR1ObBeA57cdH2A|6uFw0u+Yazf2jP&mC0sDP~naaiI+ZiBA@ zT$y*=f$U60n|p%u*on@GYXi#!6G+@jxzM*W5*~x#^c%_)kdEhItPE`NIFt0o?+94g zkLJvzS1s!LI^uPN8jZWM0xcz6$bvG%fCeFM&o8 z$RQ#YgZsEByKaLegyyx>=e_f|(?4z30THNEVNax>^!63SHr4=%C>Tc%0RLCr4r zh%>0SQ!%$uXs|u!(a1Kmy3x%3oFcor%-Q6Isge{Dy(==gDmT~NL4j}e+&0cUgkHYY zO(kSRPVqvsGz$O0*g3?A0yGJDY}t2E#S4!PAihtjH^bSd3cqbA{Anpj;A40*;upxhbp7auO4nV^G zer;1BrKXveWUcw-k?DtIVoSM<2U`;&1NDa-MEM{s!o3u;wLZTD_hnT1E=oo zZ#MB5QH^JSc{*Wzs|kC}E^n!gZr|g$YF(F%R8VS5(X%XG)0uQ9c+iYuH<;3eB^Dat zBztLuE?;Pw$=b{N6h>FWa>GJyi?_mJ{R5Sbwd62pJkNN;RX65895n! z?5o59^lEwi_Rew?*0p%YUGg2pBuAft3~RcH8R$bfJJf0=&^zPk~Zc^TS2HDPEQ zg~-_HgZn6EPijr;D~gvvhNTs$icfmgl<;D`1JT~LC|}je1whVTD#F7nz0;TcyonIRU=3GU-r7>(zwW?t5Xjv8KP>~ zC+>Vf9uyx$2Y7`Ij)_%IA5~@Tt?8$nh_BiCG||}?2AE$7~TXx9-4 ze{1#N`a|I1u*DcR2?C>Fx(jt~v7jZ&p_zS77QZ)0-6T>nt$4h<&wY}LC?Sl8@iit- zw8tB>kx!?8WgdgcZc{$7cvb|Yec5r>M@W62%ED^7 zeHm?K5RhzUcj0DmgQ^=Z4}2%&U#g(YlIvn+;alyqrmTkq5n&ZgL>b~* z66~~2ONpZtT7wt+Rg(U;eWfP1=|zOJmYl=i#P*uQzV02_dwF0l?~A<&d8H3dxk;Q4 zd0KjRDY4&EM_|S;v~=jVDG49m+JJ`63d6VCd|BxSe^{ORN6(>xB=>}VWpmoGUw zvwKa3Q-pL@n0a>bq2B?XOd4aq0@JcyIK=UYVC-ze%%0;NAWhx zIS-YFih%o^n`oP20B-lEN~t9NH>Lb`t@5!(b(MBW69Egdj`#uYQ2q_7evO}K1v4!O z25ITtL7~lIHhh46stY=g#bY;E{IJz-^Lcp-d1VGAS z(;~13ar}1Rgx;SWZ_IwOL(EDMR;uYo#9X=gqCO}`@9La^aH9t)-C8M6H4#{;1z(Ud zeLt+Sq6G&NqKmx!kxbI%JtYy>+el})ItA1qZ|249Y3JkF2V<=gCZhL!K{pA2B%FBS zY(N7eau4Ltm_hYtiWCU6K@Rh&9*KMF*F8Y*%r37JbRH)38zlgQw1*(-$-MoCA7FH zIVJ4DQ!U&7&{Cpb1%5Aw=WBFnGy|8=LGV-r3olo@H=DmbemV#;S)wG|F5O5?0J+sO z_G4<%1_U0XJpe5SA}bK$Qw79)Z@#0dEhWbLfKA*V<2OHs)mCm}P<54Y!z4X%R?Y@D zEBD5>Wts>nauC{B@vB4La-bQfJ-C@#a)DpcllB?ji`JMsitfgqh2iLIDK;z{80{ya zfDtDg3&N8{>bVY`%IZj=Nrh2jn#e)pN(EHN+nzZT5dM4mg!64}&tZI`u-gdMp}z|K zWF*u6&P?}Bo&swaJWCOfEN`v&s!9#Eo`tbAc)3WB(ItAxb4)eI(mx@3<-8jsQInNH z>>HNnVadYeU>YjvE*S$V80D+QZ31Ce)>$wwc!uz9`7FBuAPa}Joq<^StgzU5dF9)c-Mx4K z%CWe?3ny@l1`fKov-N?jDhpM;>7=ky_HkGfM+D}J=+Dsh3U(E-n#f_uxmDV?s<|x@ zF~V^2dpP%JS4Y@%Kh;KcH)ZeBHFarP3sJw>!Lw(zGIUBpRYNusBEaQbOrx^3Pb|45 z%-4GZ2Z*erjC=o|=R8344V%+zlPPPyjknyj=NooSmbW&!^1e(iAK6lZ7uur$HiRdo zs1Th9k>lgRywF1X@9g#)_=mYzdp^+>vNvcCV9BXds*xdy_M!+bbTf6w74t~lvtH0mRsT(DgbtP6dlh6qdy zxXzoP6SMLcnockL#0TL@-Bs}7yX`NTvP?|S2L3YSv$$G+2%v4QBDxUh+NEzH3nEgcV`gEpu1q^JJlmXB=Ap!yzo?ViGuO zx<9OSTJ^2B*{R;lXl4)ocF){;FZ?SJ z3|z10jN+jV5JS&UKI{DS&$s9b>EXrW$BWIc09Ay{PrTz+n_C?neY|4?d6zf7B;+;T z8@TGeB7K+5$Q6+m;VZ-90UT`BI zmE6yA3<@$&XfhunCH`YFFctYg0*q>+SbC=&nE=MdsR={zDu}kZ#Ww0J0Zn z`0S)lQ>7o+5=&+un^LU`B6mm~%DHdY!Xk>cw3L~_AUL@doVZK5<>j*~Y#V7k3zCDF z{8-CAgqG2oQ=B?`br?QGA5OxX&0o!Ja5mmmh?A#^bm)J(MmA@41=lzk#fqK@UUMu% z8D|KhY?*33V6QQlaHl*U49d7ke5IYrrniBUx8R2(74401GE8e-y@o8zv|(4f!zEn;Kj+K|t&}EBJ<(f&LcxS-D&J$DTb8!F z1}Yz8`N@he>D7Ek;pCg6*IrVO2PyCLHw@@)nJd+XFQxTew#Fp--JZ4F?Wh8Qc}T~F zN&NE&W)gx#*4##A*w{nDcs=(DMG_}Z+V_s6Dd<&l$k{Kj{gtnxw^#f9vw^W9Vc%E`X&X(xa7x~(dqT0zbwpIJP2{UV~r5_oKt7%f3ba^G`#Yey})`}lb zTcue~j3Mlsr6n?z?;TUSoV#5dC4 znNK&Oa1Lt)8=VYgKm@VigC`IUF&EF2HVHq5y3PVKIr`-!&LF#_H@OmeFcuUs)YHh{ z_UVJ+(ErId?;h{W16LZ?{6|sGs38q9!T8S|j!l);UXVXvIkciwVdxRgSC-PJc*^TH z#$)$)ocL-69Yr;`EC@_g6>uY>!sWy$HB3Ukp|A_}p-W^N2W!9w z!2sCbS`d|q5oKU-(S^J9jwMLBRsz%3_J5}wupGM$Ih{J%i%DQ?*+9;R#Y7aEn5eE~ z6$xzz66W&PkkNpB@S77=hfALDv>_NZ(TSRF)(cWc4@k!*E_G;eY6^)^*5zv&=1_)q zWOp@iWteSG!o{M~8EMDT?LpJG)+t(^2D5~mtHGs!tT3L_yP4HxQbH_mdq;+pi<@3} z6Bck*i2jMu2xkTvIwJ4XIq0o3v~TQ{Ol0mZ+emyyw-;b9a_$5VUb+fmx|jfYRYSFp ziD>$Yb`*mzl*JJ*V>W7j#O|!tM&_=+3M3~KYd-Oj7V;{e-M=wgvzL^q)2Z?GA8}^e_8t5 z)rY#=d=`J?S>s>BzJY@PD46MQapw?mq8uY%0-lWvI zAa8peWC`MFU#$QDwpzRRTJjX+8!*2$0QFt1vtyU|iJkgkbC@WDqEr&gwJh|&;Xf#2 zp!nt8@7?>DIQ+gCo=GWeD`MZBM zAxNY03rktw5Bl0%b6R^)$QWu}j7L!&ke1x|JkxQ3mV}tdxG=aGJ%3C8E9?E6t^1x; zAT(%VsnMrbh;f}v^|z&HR|=(BarMuN@GcGWg1S4(5b)$|Lt6!L2Sx)7Uiv#fYKil* zj2nx*XEpr0O?vJKCEUR1_WZ7g?s(7H zrhXjkLwi$LauCDhaD(i}Mt9D0qj@jq=K(&P!3f49azU+=wta)R0xTXXbdyQ2p>tKV zVkOWmG!Po~5l-v5O~gS+LvQT;%v2s{2B6#Q#3K5MX0cMkaXK@sGo2S3D~VZ=jD>~M zg7kuHLTIhA(gfzGLy;~!)=&U!p5n)Y9TWp!88?+nkZH| z&+t$csxtjo8@;Nfh07U2C6AO|IAs6kX@Y z13Y@zD)*IiK>MV@7+5p_-cUxc)ZpZ)IFfFsBW)g^50NPaIuY6=tsJOgR}C?~`n@H3 z1lrO=r`TK1G0p43ts;Lw*O44JiJHs29GE1;SjDK=_WORlQN>ab)N;Ghb?bAWP6($5 zfg}DOmxcB(k16k}lR^r=Zc3K4JE3ovAQ#-Eqk36B1xGwi1=Vg&x=TOJ}GC(0C38S@;|BfrT##Ou$UTTY` z=wXfqbCL;y71}5!eQZGa5#;74hv&oJO{l+{QF$jQ+Gokm*B9dQL*{|wuLVB>7+;4}8WjkeAMW)RJ#eHiXRHwudtSdTUMUeXt<&frQ`^m`uj3FBw$R}<<4Qze#~~kI8DQaB%8a+b}kdC zqkBqrXo7x$=9Ln$Ua}?PO+cE~&p2>X)AA=9ggB6e4iE=FTJjzG(AgVZ-5PA52$A9j ztspOpM6CBA-fhgVl&+frWJg`JJ~^zjaUYNh1X~@*p0>XxwmU!>fh8$S#tcn?xAaOp z^iV#h;^NVgH+n$0U#-31+bWqdmCm(aEu9sC8zWGaC>Q}|4lOsx%R;1Ru1MuqEFh^T zilyW$OZ9TZim$9MOi)`&+4EZcPsEM*|daOt1e)maFV zW#s&-y?L;VB`#z9TM#TlvdHD9Is#k)=h`sBqUzUiCQ!y)2w|v9p)H2;ja^|*VIuGEfvQ`tP=J{ zJd^Eo;6_?gQxVgg8Lw0Goak-mCNDtsyH326$PBCRu!CZvesbo3%Z`%$-pj<~O3+F| zByM90;9hKVV9Z6|kjbRDE)N%1m>yUIitcx|s(F`|4wM>};T)_Ob3DrJ$LbvPU}(%~ z@wVg|&o3;S&0%}9kqXA#oOHpUE&mGyh3)?dL1AQKW1#&A$Z*XK(LD)(#4bgcI1MWgiIU<_3wiT_d;qeM|c4 z^1H|5Ne_0_VL4H);&iij6uSp% ze0l`y90H(@p@~>tu~LIwE9i%d4nz_*z9=aqhh}c?r!bZvpJ|;4%o!}KqQf8Hfgc`# z3EMv-DJ&!i{HuR(c>3E;j7tp9zdNLX1Hl+po&}t9GYElP) zVjw^o=V{*@jnf}$eiw%T+_42<OPba`-LVgt<_*svb#=b6CR6u7zGl)km8pyYE8 ziIM3yM60Ppu>O8?CK#V^vhoFBm-kJ?+jM@K?G-{|x4a(Es$c;)0=p2{Xu#;g^l^BW>unI9IR1Z*qSw z_&0T-ujH|=rEletPxL1&`iCY*kN3ACSC0_JbD#5Kr&-Ga^tD&_XlwyNJ3Jz(hlUb@B=%bqx?UizyAr1{wMT~9Z+TU8`D0p;^IqOPj+WF{1t^Y zzdnNb)E0r-jhG+9^M(rsl6TctW38XLUjfXa$*%Ez!=8>m|L$gKU%X%k5=#$+rNt3Y zw({jBfYz#KI>raHPlL??6eF-opmfGJef4YdVw)3t22<@QI?osB_@(J) z57_7OGYfPIzS_+)e;P-<50~+8`$FGh|4n>dXTp`nHx;Ne?TZdrm-YRj2>5*gv^#ri z+Z2#xeq&$<*Z4gIY~Az*95|u%132(d{{bBM`Rs*^vp=f-i}k&CxqtK1KzwI1{?>)U z#hym?Uw z4S0O~BaXlRIqp#(%SWUys#~2@v@wE&@%t>q> zpA(R6cw^=!4!|DnAMgO6_Px$(a$7<4ZrK*i|>0K?yWBad1D6f-w+rDhiGsa?Gj97=C5v18UV3< zvf=2kDSYW4htx$zQ3!uawEB5-H(|A$JOSjw5a={EQovHl{uS73Q6G z(D}uKA~u?Iw^CSm;W?E@?H(Q&(iUnAG(`{yK9psv3~L={Q|umqz^i0#guQGoYZ!Xf zZC}JB&9GP(F24GL%p;L4`vYSlSd2Ua1m)l?ETyFx=EMBx^6o|XcqXM>iN}oVq}ELN zxQ2Nra)!0h5hax~M9`4&o#0rh?uOz>$>BUa%Ly>&!B#{e>;yeLR}tsnezmNoG=bBL zs3x8zH;;qmiBIe_)sXg}&51o;-%${mph>Wg7)*aLR+@P`91>KcjFb}>jnaoS@?@K} zWJJ2ZS8Tb0jztxPwtyyxrtq}WC+5NyDA#;C9X3Q;BvqM{i{V>lz#=$$2eJ`g2tGVO zHwyiQ)A(k3){(IL@%9ccb0da-6z(BGPHC>>6;qCXsL&DP%6YYJ z=MZCd%bqJ@KJr8|_iT;*T>lQev51Cq8>2(D}{;<+E2)Ze+eX`nZAYPB$7j;(09-K@3a?%{ev~DkjtVGuLHpGEQDZ3l2Eh{f$%*TJS%&vn z%07Oo+3AfUx0oPWO{RNBKnKjwRW_oK5}zfjkV`J33}sZddpN0?QxIKMl7ih+d64DH z?ili607JOK>Gx$#hA)PoSg>PbtGyP>Ee?xn8g zaWngv3W@8~Iud%K~lGeajz zF7eMG6@PX}*`sIP_QSkHTrlNPG(s{nqH?tVw0x&LeDJI3jZ*iLU4}*3H3y%C7~MH^ zz1kfK#*VL5w|kwiRS5Z)0rFZ4tY25o^kC=ckrIrJF-`ASScT)ZHXZQVi)P82#L3In zRrVC9fyf{dUFM0xsW#0C5*`vM1v_B1>h1}Y`>qY|t7BXZ^KA)kw2cK9-$yJ?g>jatA1moYTY5%DIZVt;^O&EQkPzIw+{OCNkba4niBVFE0%Yg zcK5a#^UH^50Vb)(z+X#>quL-R;o)=Eo;=gUR1u2`^ z;KvWmVr;U+&cr-H+ytX)!^7EI%AV)!s8TP{CCGv7Jwd$cOybAJxP&}ll}C?v=So&R zOupj6h~DHbvU&i-ifvaI9}b2q>>$SBdaq8^)o?imsGC;8AN6lQ+>3D}6n>lqZEFLd ztDkQypuMz!KB45@tp>F-I5FExbJ|kj)W>(9?bukKN1P4XiVr2l#Pp*5!?-oEJc4Ok zrv?ay-9uzWe=dfGJiB>X>l=?sb5^h7?91wnRAL9ED}kEo;sK>JR$dwSQfJkR(mg|- zBcyh95EXm!q0d>bDUK74ChN}^&I`r){g^6m;5*h#;!|OsYkJAc=~X=qy@0T|;HlLM zWCf!~cL#9cTm~J6h2MW=}2bRwbb<$YC@ZE_lA_!rVfTG1)A1w?K2vU_mUQA7>k} zWNutr&yGv9y}~yOWiDnhn4JA}sk58iBJUKPXbB?Z9dDN)Z-m4&uWE9XT`rJrZvx>b z?}vj%fY#tAWoPd9_0FivmSq0x06jejD2x-q0{!ISSNu0rpcFH3t7*jHi5T^gu2?VW z;(UA|RBowBmv$ipR8TD^%Mpw}6*I+6vd%*p3Z>N8PjT(OqaC);^l9|%y&(39itn9$ z_QV*5SC+pN-q9!C2n;m)R91DZRHCPjWlao@q2O9HZB&m z{21@!arohrv#LBTd*x)$0lz?&0PFU*`)MBCy|?R*$On;YJ_t96!1_2#&HnxxXAbyl z$Pq2gv*ZZ-{VWfZhOKO7q@efdd*<=5a@=rsEH*5ZmyP&TZWXzV?`&{;)YvZi9NWWS zVBhyLGhx~>SpjF?3wy7G4Qcot?rt+pr`#=j91Z`jARb<=k=x;owbz{eea-4iXXBaK zJ#|^RQ_sHw5_<{ZS)4^LmWOJAdNb7#N%5dw1L(t|x$;mBCY}A?XZz+LlgTyYtRVd> z^uH1!hG|~VYfNx*q$F>)}Vx8kV8_HDkdv&eULn$WWLUDPF{3V&hyDr040*?4y#_PA71fZl$de3X5< zSWHRQkn@7ypS1)zYCfQ3z&^#8J@9teR?+^hpN5T&IcMMYU^P@)<|=5-Z7tB!%v*8* zLyuiw?INz~!jnd+uzlXDri@l*qpb~ucY?9=NqGBOq$sny3*o><Z>s;(XPu) z_ZI%nQ&BmNDb*qM%b`gh9xq<%rh#!bzQ_kChVMuQI$i|RZ^0FW^9JF#CTy9}d-?1` zlvrqByS4>FN?yJMrIZTtJt(Egi>|=#3=zI~E~Qd!suWo>*B@zUu8hQ)NTkQyEEYgJY_OKjMqyqau0i3{+@Xi zSxZ9t2&lNpo;PY)({S=G z*{G!fcQj+dxv2vn*H`7E<77WhZ!z@Y1tJWVoS+k^TipFt_Hj&FQvcCkb2vnGK(IrxA@++dfiZa7$dSL{SRCxn@a3Yj zVN{59w3v(dg~?k&x_CBs^)~`1K8SjqbX{;v@960Qux^Q|ouXk5vxZbLcbRNo<0QA` z85cG_cTGXtKN4zq7Ypq^wlUp56$=;$sHd8~I**n8j59ca*CQHv*27s+6_rb3C>RLTK6bX*7gTcphg= zZQ-k`=%egrBwOt|HefpnX^dme?J#TM0)I6W{w_;Fk2$47zOCc(dM_+Yc3}nIxAWi; zWT1J}1GIpCV!p&Q9@H$qB7LJn2Z6hcXl>QA^2HONV}S%3cu~Oi5>JxoedxvQ!0< znj9jdtK!^I_jO1{X?PJO@G_8OaTmD*8-Sa@pT>r592-kWB$zV^Phac=lA; zkku5(OTvwAhQ_}uHAGaI9HVRVKhKcws)%S5l8wcRL#S9$E1kN-gR)03#`1&Y?wsdN zd+Cc=Oyb*=GOq`443f{DD98*J?-4a&sRX5b3=)OyP3#dselbvL-F&zDAv=YEIow(u zZke^~6zBt^{I;EQ8M;9}OQLd|-do5HYvJWPRY0(~e$if`KMHa2lyIpeH9-bRxP=G zD0AQwRMPb1BGhnBum8!wszJ&q>HC9M7>YV~<*!&cgEUd#QP53ihPqSRKl#yU0REy@kT4UbI*_z8J2 zW1V)r7<0vv86`Tqi=WY(;GqB29pVR5F?EHPpmpFR+_w6k5;jMz&5Fi$Q~snUuJN$? zLKuZl>`afY*Qh+u))0W+&>mSkx#sL6tayv{!$xIt$x6@&2hBg2wu?>cN2LH2hj zIrRpH#@OgOQoGkKb`TMc5G&-9%|?wqZN~-2QV*Gmf(-PD5O&0i@zkCpB-xspqJp#t z2C?x%y6Z_=P`K!a>{HK{2kCQG+#3qqPRI7xGT#pWQh&H=NL|J!nk#?i}~ZE_X1oY^n*Hfg5kzOK+!@ol0V4G0T@fc}RY4s-MmGr%nYSf3-3I0Ik`HGX8ioXHVj zOKy={1xY5q3ucCUQpyl$ztdnidQ{S-m?MFC?t>k9y+_tob`Hy}^w;`6Wv-oMN%#HE z!m(~?OKoH9lW+sZmTH&9o{XItI0U>#2N+LtaDlvs8VJ;QhjYAY|J<>qc&xr~PWsAe zES`{CBc-%6dxW}ZdaP6S(};z z<{Ydrq|fB+^k67Rf;|@v{n^vXAWOG-ZExleBejLDcj8&XL6W;_7|;q z_iI(-rSd*!7T#m|H~JVnQXX!wo$z%3n(fIOWXJZg*9 z{y`zU#mI@>@NPVQk)I=@(3Jzb&nTc&?x(U+v!zNbZG>8$$@_7}(aCZdNY2=xJ;Nfr z&xOEXrDQ%3y+Iih=fDMTesY1nb76P~lZBU1ggsRR_qtd+J-Xzu;|MC**W$$L2kB;3q3=0k=);EpZrbH+r%tiScM|MsSKuTU2X|{A?59~5_JcAo81>3 zw9IbBCfRg6&Gi<0kVQo{nf$?{-8a~AbV7ELIVvBFuVxFi#O@qdJsw_VKpR}8_zng& zUsra>|1LdEO%{`RKR*3+Q!ldPc}s4TDNbJAEN zM?vO{5Am4Uz{0ojjEXHS9X7{*#M~q0tEEoc)NxiwFtVXXJKUpS8RAKQh2TB8B|nZI z?IQ#n&E?SmxH97hF9xGQnK8MHmkIYmsMgZYsn#)O2nOkTB^uQ-ZO|g1-VLwdLg!k$YWf*J&NR-F@+;_eWXDObmVr~-Fkc>5V8S4TS!*V%&J6f^ z1dMEP$*IPx@30;9kJ9epC~dHBy{Qbpzgu_LR?R-YJM_H?PMidJ69iHp9d4b5f#IX3 zIMm2|T|Q_5Qq)VJ`eqmzkZU?U#ma38bO7iaTGUE7Riqd8>3emRgLb=A0a9>m3*>Fp z$I&^|;LQ&kc236GQ|fmupXA^C?^!iU3jJO(*Pa*0pNoLOFIj$|5-t%s0Y#a8Gfc0- zYU}>bb2CG4I_2UsV@dJ-F;%ijHRE|4{N3cb=6~v|s8pk71W=(4EKXZ7yq4)_tysgG zMNZ)Kx9KsGM<@DSdmK{F9raH-!G?jBxvSt@9f2tF5lOF(PD9N;YBhSlv@TegYnXNj zMZdpwxr8+M9{-BcS*@NoD?G3l55=g%=y>xib)9%CqkI;Vm~7qoIvm6Zv6knzr@~BF z&l)i*H#1{7a+Bz2|BhToj@F!6vF1!m2lFX$I0>AN*i#j&OC8x`1YgZhLh-;)>7+Cz zh^J;@F{Y1eTXyuzEt_+ z>ZNu`uV{umKb0bj!QW2j#ywM=yDpSTTbYZr-Ts5_6Nxy0hI~|^iQB5ckzL8(y(Bk5 z1Wz@`pfMYl+8eE1u34mg=Qzyi+2Ll)AUap&Aq0mA@*5c4GI-(BiWpUeckH?hprN}Fy-Y)5P;q_jaFpmlLB^FY@+FP8c-}9iJ>cO z!rI?$`Fo8~a%{mD9oVS6b(g>kCX3=J}(&`cPiCnv4B!Xs;lIKGwbEV%4*3$Z z7fg?QhMgcKKUcJ9)$8t$?5Iq2{wjJ}5t=}E6xZb_D_fSjCMM76ki;;yZf|g4J7fAp zAasMVA7NG_8~1JL?R%g>B}BGe?XPHLYCsr=Q3bVPlaqM*ePxV}oo;kD;$I*2)Pjon zT)lEF3EPm1v;kUh3-Sl9?_Du1D@u-GRau-V5K{vauN~@YLHpnj#+F@v*^E zC2PHAHWHZ$v!2sS_E^HJtFfXWZ8;7GQ+cC0k-XzA09Y*A{P_h zh@u`oBYo^FyrPjOs#~*zrJ|Vq`U!XhIAI%R1VlYt-A2B==(Z4PU!p0l()cVl1;@N+ zW6T6qp^XWcbh}Kwx0lC`Ja+cHWH-SY$HJ%5nSxQNILHZu=P1A=5_~7r>yX5FkPk?% zuQj`we%jkF`WSQuj~LP*^YSH?lhZOv8IRGK@1DH&j>w|+>Z?j~__whLO3mCsl4g z>2|wx-^gmqRi5X6Y2oN~6pmF5+ZXj#)t|8a=WE!C1{}9Vceh2!mW_f-gaf($P|y<| zCV#qcyiG$21JYoRuZs_o|GNyuQmR7$`(mEeL|SRON)LF zK8$Yn*lTw|AZp8rNA#UdC~7$V-L!VABU`$`QL;8FgFGH6_p!~w&Bqo=fM;`h{Mfz; z4wDRpSmeO{NLH=9)k~=)#i3{#(V6xoulhTzLfxN)gBRLu?ryQFIgE>|%ORhnOWxS` zX1aQrg{C}H<`rBPWtIb8>{gzrsFC3zjNe7x0I)!_^ch^DAfDs!Xjs;Z!%$fXEQt9* zQ@YLwGWkaU=?`XSo|#;0+M#GOU4wjHwRysdYu(q2dSyk3k=3Eo&K@NA9#bNFl;5cHviEbXki4=VxLo^_G5en_+k^zSa;X!9 zOLqLk4nt3tO*W$ZWb`JcO0(l{BIpj}HH5UkM^cHhe5G#o{7Eeb^6FB%W8{9r6&yUK zOR2V!I;axNzTS+KwQbK;&1f}K;2bOX>ImHUl1QP0)le?7ruv}CM!_d(NFKYq6Z@pi zc$pARURQdMfS=I_v5FqCrw;Q4x@aeupGtd?k7l6W()JsBr@a>v?7HdX33j=AFc0~^ zS1*vcW1c7AtQ~c6^{5#yk=%8kHL)}dG#+KITPM~JMu1unr9?kQ*iL>)n*G>3lxYha*2 z-m1N^tN^rLj%i0{|4L=sqgCiFOfi615N9PC3GHe9#Q*E)nsFA*qkF?-?j}}gQeDwM zvGAc?CYhGd$picq7T!N&yc+^B=v%qA?^iG>pGZE3!CIEv?$Up9oYpW@+$>n6SAZSH z>}%HgNsXSL-K9>C=Bq-7L~+`p`LZ=N_a+Neh16R*&U+RrHj{T+|3W&6@iv$6)-Y!f zx_?oLjpMUT!9-MpeW}#P_!^U_eARO!MlMk|gMScYW9X{Ru6i@q=>Y4nVZ(jN!wl_Z zn%yhX*6{k2L9;49h{I!knL)H<%ReHbLg-{?dzGt8!77%4!F0~On$Q|WezTUve;nD3 z6<>r!Nn1|a&84xMOsoefm0gL&FR#V6hvLFa$LlQI#yB?jvuc8dO;VODwgNq+P1Ry0 zo)U0n#$wZ@Yxj~uWVV7$OoqJt#QXgMurXnHn^waZxxcKgf$g;zS-gz$5LRcqn*qy*9UA z?O~OA`|2xMR^m_8=MLQZl8GzUa;(WGxTPWbnUc_d@xkB@OVpX9ZA#Q!^Ag`nG1IGI$Ukxe>Cd968Uhshm~(&S6!(@y)838E5W&=}m^iO>!Dn2n zDSsE772q-_mek)12GuE&+*T$z>}yrQCJ{>ZMCL=oqHrqxujwhtCIV{22TKae7bgfo zs$fRXNOilRtDiD2# zlIrvtx?4zcX>uviX`6-2U?;|E{2y2=3)09sJ)vScg=m$r`xOurLY5<~zF-e$;F3C2 zihL6S0#X<(YWkKmCU86B_)!MA>s1y4u{abD+&j;Ay_P(`)jt(Uk~9;4FXcAbxnHkH zvU$#k{h3`j+XI{f+Zx=H5bhI+4w@=LocL}D6820!WHHeiJa!nrx(?TOojcbef zv?g|#iCeB>H1{3N6CMWlR8Q(cl0%l{STcC~b0|Vg^6r`A9N4d%(_mzRwAo^?ISs;g z-rfI;u5$_AtmI@r{XNptJm1-G#*bb?~@CI$hPs49$a4ks!VI<*r&5;nRXu822R|)Z@X$pp~=3ME!KC1BKfSY4x z3De)L;5sIy5akmjs#*GM*7A<0~ z))XvHRrsTYLE#FSa1z`ARt z^^D-tVLs82GUBHrj7Fcwn6JWhTA5Xtkjd;w*c$Qy)eQ!&P)O-g`;jS@I$)pHn^RqN zh?pt+Kn3&TO6~mExZ@a;U_}07RL7{;@)y_w`x%}G2G}fFnkiddMnA04FCB$~cv`F| zHF?FcV_!|_D-zDhlh)hMm3xYS0K=HrG7|R7H4a-{040P~BOj(oga{bx&%W&<%hNBX|2!{g;Q0#})aY(aRw*n_Z%D$t6pniVH*Wr+n6C{hf#kds$YdlwW z5%X4=3o#$5mbvR=#w`75WIFbh^R+C(#+38c=I4dC;I@K~K0EbF0}G=x;`pf!*m2Zi zE({qfF2C6*vd%yt=D8NtO@+9qS*qv4Ms;kxT6r*^__J0d+vc&G1{t* z{nH56Po2kKgl(d?DE)8B8GdZVv5ZxEov5UiXznmyVpB?yP3p1$WMoM}AF||Nza8^} zS)%8D03`*ZS}8&G-jOkKUDgqaEC~?4TxF# zbx9Ef&2#Xuj;F|HY_P5BxA04<$6-T9X~)yZFbfc#NVZr%4f0I>{D)SZ_D^+n35#q`)nW8k0I$O zE|w*6?3N7vm;FPpSXTrp4e*lbdb|S^y*h4LiJh?~1NU?3`BgwfLRT8fno;*f_4S22 zbR2j_2AT8 zyr5=Zbf>~k=wzF_B?>J(@@Mp?3-hY-mi;<+>Hmlk%FlCT33_*t5Iwp8z6$BYTRmP* z6k%Y=4QslIba#xYTFdKMjm_Qx!Yp|`Po!3N057B2jA9&%?qb%4tk z5?vM|y?55jVYb^N@v%%V)P)S8;=CAp?|^9bfR$ZR$|#dRLYCod_DAsw9rd%hq?X?~ z@c(`7!D_XQ-zHup+t91ouUxB^p-V-QcuvINy}Xbb3*F4q9%=Ne5VfqWk#{-S#FMY} z3@E*sP4e^la(WEf#Wrdg!hf$GEZ09=sulW3X|U=vI|xL}(*CQ#f6&v+vj{}5F!2-+ zR3`^5%u=_fzbNR{&P%tCp&|e`0w?ui0kyhd3i~o7PF3WRK43et6l}tpsFOL$%G%O; zIf_>pozIy*X=Z0tZb+s3qAEo_4aMuJ(e;}VPMcDlyrqqq1t`A3y#YF8Rz2wAVhYvd zgsc9_lCS0)HX{HJoFyp*vZY5`6p1Wge+n3jV1A9!Aety$Q zsQYT+B$hFgjIA)hKl?J()9v`>;gexjsrs@EXGld_%ez_`RmDCSsUsRCcf_-?au?-< zh)XDVTM{g}9ozf`tkk@8G-!AF40}e1Qpy+Vk8G}!3L{V^d2Jhg7Cj08EMf*PeRTfD z8}xC`x_Y+aNL(|9B5z4#oA4|hp;uajyo!U)%+g5Z%fOeAOhaaq7S{7^)r&PO7}#w! zt)fb+k!t6w14>w&)K*uwz`8sLe!*_)O?%#*jMI~?i^$pi+xru1V7vSv36+T(LAZ`=R%*-8S9 zkH(+k`7k;(bX@zsXQr3ZH7fI>qUk?kb2E@ebocdT2dx=bK{E|H?Gk&vhegZ8uhogW z8W(CGVbY=KCd*XJf1g_mQJ7vWVr56fdFKa0rbJo+0*X;jPEMyfQPKBQvx-6uR??Ho(N@;alY2dWxe^X*FOn49T>DncYz+^5%PIE0~iJVTH9q+BlibHqH-40Xc~8$`z@i2ZkO}LDWv(7n+h%O zTePVuf1Dn+hJIDQoaDH?$a$1kl0OGt3`?7owYQnq{U3`{p~!&$m9pAw?V?6jX;MM>%!mxK7(G34p8Gh(kz z=2j08HRLqRiGneb)=Vo^P2wPUvqPT`Mpmbg89|B|T@*dW3;~^6xa!*pU*i#Ndz-4J z+Mj^3Y}DAYS-X!>Ra-#^I?a7qWK!YhkeE5)DO)%KH1iPZ93iV{NKEv{{T4ZF6E=!$ z;JwdQsjufrUgy4GTehEV+s&1Tveus35^~}}M0hFy^-~4~a+tP>gaiuRTlPhyk7OF~m?@3gs7gk(QWy9{Ok%aqTj^ z-84D{)7S7QzhV=59$e(uC9D$V0WOyv)9X-BNDviKZ-(k5SI+#$_wdb4yN#?=vhd#X z=oJA~{BLi)MxYhLAv(#$Lu>*eMy!LU35q%S%&bmxOvTm03Yx8MLNj@cl8A&!;Zw)( zAxOMP6M(Jx8bkUz$gfyrh2HBYPM0y^p!4smn~qQ!QSG$o`a5msNAF3m#!Dp3xgzg$ zuN%MvmZq30Zpvf9fdj<^JSc4=Cb-u|V(^#qkMP{vNC2Z~oAoV)=!`l;h)!0bLMZ!c zT!FpTTNsIZ4NXAsBjROjRQXg{$c6V2KnBg_l>~gK=Jj#o-&K7z4Wfy~f zhP+TQuC7%8$Q(_xiyq@WD_3~sH!-L8sQLwpI63ofDm3hzIGhx>rq-a*i35*DMFv^PZE=0mtZkKKr z`0Yq{pJ@9Q#=6oZXG&W-%t=v43>%LToHr`5u*{RhSE2>KO_W8Z2jT0i=Wrb(J455tQ(6%GT=hpWpPgND!rfW4 zA{%&EFZri#$t#WiMyQv zv=r#PNt=^p%(ocx+bdv6*2QKp_8yc2H6<6@c`rvD@>4yWzyon>5+UBj9Q+))FiXRz z1-{0$8%7H!S|BU7vf!jU+N1;jZm8P}8?vYpqk4XERRXS`JglZlCla@;IKJU3HN~P& zWHKSIU3U(R6Nzm43eo@imrzDDMvne&^v2UPouq31c1 zEtu{H!4Wr(J_?J*ez%96$zO+XL(d!sQ^ajr)|r#GaA&c)rD*&&ad%*pF>?$hRDOuS z>F_~)KG2MXKL4*BUd z8HxM;Qp2C!$WPs0o?jUlU8^ZgD+fC8?@kGBOSv7$MRb{BZQFmjw~Q$;eukqF4a6%1 zYgTi7gP1OEV+cQnB&#i8H^L1>*_fnvqbfn*1Vd3- z6^q%JtLt4h7#d@a1jZKxlbdugi(6_0bF!=6FgZX_OsixbCfpeycLht|;BA?8mc)-+Y(OC-4Ki&R21|3K;#ncNSpg zMr)yWsvfzw2Hf{DvWXT=qhyQ4KhXluEqF{&B6f|iK`q1KLhO1yF?Y&yE(bUYki3Zf z+}X4^j$7)s&edkeA-F`{SNqGOAn1FG_}s=cJqsu&L*QAL?SfX*TbU}mcqMg9^2c|ajD)F~qEc-f8CVw!YZ>BUrEaMX+VVd!fCdK zv~^r9=Fo^_B#po9pzlb3N<@`)tcfxu1ZWwHQ+^yMI_BAxs@db`fzx4EJ{?r0Xtzt?r1Q^}xO1UJvsbp}2vt^HVmgC?BilidKQyrH z@S7Pr4vxu-GvhJRvCjJDO4~Z~OJ-M|6k=q!j{q(gdG`j|j#zZbj9)~*-opaOXj$o zky+gj%fd_i)K#H40lOO(y97H-QnMIkJ~n(E%XhmoAnJD0aJd{T*9uZy`DHr*?g|UE zXi91eg!Zs-k;)3-a91bgqT`@BaZ(P8Ij-Y~2^g-__o$ZnGgY{b;CA+vzhODp&Z(m` z!X-`uo*f!e8B;P7|8#OS)zN28)x;0PKWT;Ml1QRMNg58R0`W=$_0(_1~YayAFlCgdS=n;>WHSxfdi_W_(_FKMb9<48@ z@6+ur==cS%t{EnCJ0DAKs+|%R1)7L6)gtD-$SBUAOE~AEVTWw0_1U8>FYvGfp<_ZRgqBpEY#~j^^Bp)Q_@^PkNR9%Lu(mS%s zUpn9h9^A<2G;6t@VLbpY%Gq=laZ0ck%~!0jrGq`dUCL{C z@5y@LE^hJk!1}p#cc_rtxd@DZQl%@y&H2iu3$5 zT=ZNn=;Wz8oUOB?B0NLupdr8dr`j(SW#zV)0SFO8Lu`VZr{8n-zW!*WI?8)EY}b$ zmpWJN$=mA-3+AeCOP#woFh%EBr(aRnE_@c^C)tUN9vU|A&WhVZ>5}1 zDT7mf6{4TnR~s$&vO#S68i^*Im-yPq6awljto&F-7%;Ti+!wOhn4AN_Ixe0dvY3f6{-x|kr|wLq9Xi{bk_9tKd}Y|EbuWPgv&6Tg0<&YUvd$(0mn!VyFM?>EXSOyOd_vP<4u-LU zoZhoq_wX$oA@oXQQ(1|j{Iv;ft4n)oZMf5{d*}v&Uy@@uSSlYOH)i+Fdhr~lyE;wG zv+Ja;7?NY}OjgGKj)`WzNke*pFZx!C#l{f26?%I@A?mKrpahrN*ty%V=vAV(Fp-0o zNH`|cU>sdRmTOk6B*UN!*-E3`NJdhu0~-M+SbBBN8^ik~uVq-qcu4rG14zWOJ<+*8 ziV@o|oLJKyu00mfcOD5A`Li_}TxrJ2*1`}i*U?`dRzm`(ev<#CqVfQY2-A8RFutg6 zkuVGjI2yjnF)%0<3rvjV#NpOa@|a3&6hWRaQulCz04RHWFRp;B%mcRE=?(9g_x(Jr zkVL!Ibvte#V@PvZTlcr1&j9r@dErP-OlBh3KHBHvpF@Ebzl=wuKxbL$lb7O{7vtam z$LZPYG=8SQ1`%^q8H+K+$S$j!ij%Cn8-1KBBFi}(UaJ^kLTI&VnfIsPpp&2a30Kv) zZQ_=$8rm>$mtSR#z~_QUl(TDU)=!3sn8yuu=mPJQlSo{VDHCOKWb?qN98ro2Deof2 z@Sj-zn2lkQW0BQ`1=W^F*22^AT)gBxq%)Bv-|U+B;g%a#r*2Nl!4~SnOLv~ySb7w4 z{J0C%tI|qzts1&$Dt!F>Ueduc*{Vofei)U7jhAt-gz>(9!`Z%)!H1ZAL0@sG6c zNQ(<}3s(zsV8ZiGg+NMR`}x0PtNl?Py}ap@A4=Ci1}IcXq}g+S^$C(ZW2B?FH5;tI zRLVIseUQJX?34Kh-cFfI;p7PkY9z;`(6LjnXrW$LOhQvC^nE;#YlK^0w|KwQ8YOkN zqWALPR|~;`1iUN;w%5ibd=M!1>?ls+S@+v7NGZAA!}Lkh;B2HhR$wGU)~AHQK?=d+ zf5eb)PxOLC!5sQqCQacfkg(x$j5H6cp2p9r7?+j1BduJ1xBx^$eFl6npFi@BLl_G?3SrsYg3z6fl)2IZxZNhptNa!WP$=7%*UH@@%Yp~%nVD@~>5vBtWuFR@_9Xt+| zs|FT1P*W==Vd|9}fM$UI)q@T2U(xt0{VZc9(8f>pa9qIB{l{=tC5Nu<(={5E6;G1^ zsuxets}qIO4h!F)F{Amee>LaIxecNkDFQVPC$%HOJ5WD3t;I=Df{UZ&t;WJL{;o%g zDSf|svkJEFkS{B|YpzRvG}xyt`+T@a){XXXAgu~dhwI2Jf`eiNfXGEs8pKu)moK-T z(57HH&n9}z7ask^A&k1zE(6(b^D z4>x8UMM~NzX*+OydtadGqCr2gYv5Mj5Ey3TK`+!H6w2#2A+|0VCYUg>0zS5eajFDi zSxWf8ma=7l=PomDc<$T9wy>~T-NozYYdsRDN#p2C-&AGkGT)j!sH#5-f@c5Hl}3)B zGu|`JSd}AY%lbkYmGh^XEikJUWC!lJc~kWYKg;2|F^5HJsUw%)W6qB6hld5&MjQ^! znqa?mV&zGHn|vk~<8T*g4^mWW!Pkyq;`%M(tCA2q8AjRC ze0r=VyNfU8my)~%Wxgjk07DAE>M!%g@ssw6k;X1{kEi5_W&m}qlWQjnl=i%IjjSgE7!WsMz@E&b8btOnV7Kh&0g>Yy~_|f4>oA^z@zQYP82S zM+FCK*G-14D)Wtut0UdoL)x_mCxBDE;`B-igGZrmLMtEic?20_fT|=*Gc@^cWA|W? zr_~reSl$2SZANtV$u|`ns%$Y?VeX*vU;?Jvc<6}-A76-*l=gMIuiUq(e1z#p%Scu; zCoIb{cH{EshGEI;oTLrRfrJV_x><*s)BAV}Uk;X5y$KG`yUA z-8Ld`Q%q(#vT$uxpK!Ou85HdY2q;ZWe6L?3^GbbGG>HGwN__PM$Oa#&68md9?z!<< zciEDK=D$j)2j9FiOlj+M80v#a?cHZ%jqCP%OWMIiFnDn$)D^-zf10zd~D!)n1!ZE`gF<2AAmSS%nl+ zXOENx0fn_!#7|@4tZ>vn=Rjdw=Xomt8Sd-4dRZ}&>)+FN&D4SjW_l6OB4{My2Lq+q z+)Zn306kQ`jckeC)${AK%b{7y;$p^r5*6^+w|j*05q&v8ff-Cj6m~rsX>h6AF6sS| z*UkJJRBm`F4^coYXGnEewENA*_d(I?#((ATN+xvy3&Dt{&Nj~rak%Nk zD*SIa0qut=1IcUkCnlhr8R(TlZkIYYS#mnBKd==QtS8b_854r1p{v6nx8Emjp-*KX z99tn4wk=B$_FozQ!W4IJS9C~T+X!Yg*X9}7weYceO_)h#X!tnIvh=)^j zboG@VDdsxH1v00?7g1Ej6}bxG$n`@YnMqTd7fqkBWE+w}nna=KjtY*Lag&W@3YAp4 zE4bT$V2_&tBltJ&rUsCVzdJN5uZ^lSX&7+1n3dZ$CH?%YB73k&*Q@r~`OR@RC9jzc zS8Ik0hozL~iBsT~2{Z)M5Cb# z+-D?a5Lfw=Q|A(d8L)o<7V^ZqqK!I55Iyi2(*(_yUwi6uEBoEA=1Oy0hU*2TD^_2F z;f@Zy>L~nQ)%QnewWUAyJy26U|6rrg&HyS{YrWC0f32eQSfR43Jo=GthB)J+{3Bu7 zT|kc^Su^l*;ol;?$EKIm7xUIFHe9@2mbHH(r#rZDMwe#NQga$usz}f-wKFR0+z~Fj zgLR^#XBTB0T+e4i%&QsVY}h7p~nKsPzX(zbw}b z1n;?r9(npfX1}>FY8Su`^g@W-MYfC~{r`XvRlGDi+zV;#=drINA_v({`Cq|bQ|2+b zvgV;x%ekc-N)dWJ^Gp=}`tL$84mc<6JSCwN=(uOo8SXF)P|#b1Pr|8JZ)LQa=o^{KD7y~dQ&^t3lM4*=>8bm z&-EWKfG_Mj@@g;g9Nd+*By7N(KBe4t_z5>g4UDs;$6WOem{OrN*-uX%AQ&AQv(roe z0}d<`jSi2*va8%Blxh%Ai4H_RBcG8CCtesx?wPzgxt(>=-RKRhr=oMlH1-2dzUEp| zIwLBk^U>C0H8K0Z#F%$_kas3lyd99o%?2ny0>*$ts6PvuP=WobU7*Cnv5qlBeT!re zz64BddXLPj7_Q%elCcs`RL!DVt30gd>k(N1Nyr8Q&O8zma*M~KkorVzYR1D$gnewc zlC_^1L-7C}_S5m^qgdDkh*AMWH9zHqz}5b^5)SZb{`A)tY52d$#ywLvEeFFSx{ypI zN;gpt5KG4bdu}+3p6wjo7?b=v*nc?)zX^rdt43hK2D+&X4Lclx7@$dQ6_48o(^U)* z;#KodAk=*H(DOZ{!QL{|T@p`=34m%Tr;jcCU97&^U9R;KtSI^#mp1 zrhy6s=@3`5!Xin@JWi>E5P%Tj;XfvJ2wjYJEj_U)@Pu7DzyoZh(pyZ0ng1u9wMStQC z%Vjr`uJ0OCu!Rc{BzM=z7;r@fve|R}-tLc}dOj;5E^cNoa1(zA?a=|| z%gNwwre(8rR;wa7@B}7nM>Yh2Oky^W1_%3PR6(L@dQ4>@W6XTzk+(iRI4**~M-YN; zi#Bo@L!QTw=I`vk-XFIu_{nNi`t>b33d(&OeuwtY?!QuYz%l-M3>`=JQcxXocsz%` z-Iu()z5bMH&yn|af-7nBX6U-8gOB%gAdlICB#oNghJWyu|>e(-j zzZhTrp`hO)XW>svs{O_rF8nc4G@X`jmz9KWY{{6f%H7 zGLH&^!Dt{^Fw(sf&3Vf{SjvnZ*OWAp=jIpet3*-YBUpJB^$0fF=ec4^W2?+NN~BALgTa5*Ui8yJ zLxQtyacmuGLTy$MB`m|ZMbB;%wEv?!C{>&3t*gN7OM+7`Wqi>C}LoZABjduO8q)z8LTKP z{>V^dM$?P)HwpwU^c|LS0ujF1h_I6XY|M^Lt0i2nlg~xU=CN9S+p{V0h$e`_i?k*I zbg>LSa{w2 zil^dN?SL@IP|K0%xko2B>fqXX_lL^(MXh{0!2R*5lELUQW1r#V)2?L6AmOisLPX zP)-!Yqsx<%mIyaQx?F?uC*bmt4P$*_-X&ro@1Rz@}dea;2&`Vh;2EZ2uu z2NY<-#k5AZH4|}S()OI$kbwgBn$cCa?7Z(HMO6f-^Q2-*wr1nWd@6Vttd_Pi`m0Ab z>&m~2Jkrsp9H)UjhK~P4$VvMVr1(Y(2hWPzge z8(TDC6yUuGwhr>@>*77!nw()9*hRE#SWKvr9IcJ-u>5i4PfuI+szwr{1*iFRrRs13 zO{rX%;>43$5BxW2tOwNyePf%KxV^oWeuMwnTES0yypmWl(0rY3Kfp56$Y)4}v6elS z2)bUjf|T+2&4p>_7lCH4nJb%NeBmRk(u~HZ{YT|56Et2yZ!%d%Y5UUCCeIH7nr8mU*7F2odI=;v6X4N^?Fpx!n5l;5RpEeH?CaxC0x+ z25PtC$9L7%BVHu{u0{O#5f8RrWYq5R!PON>Vx|+v)IMUyMc>FKP~M`MhC|d4kC+W8 z_2-gP|1r9n;RvgHG%&=Fbu|snS&qqc->`v*)(eFn zhLkVsln$KqNp6wCoYA5rq*L9hqT8t>YVzWcILzCy0~MsVN2Ab9sh@ z+};T&AONz-B38CJl#)5vMe0`)kLTEno9+em)x!U~rJEg7s&1K~Kp9F=X=;)p5%`|k zOlU$K94yS>*F((LwzWS~x)i;AJai95&3Yu6ieQE7-51ITMVF##zJ87d^mv4nqhM}L ziEm$mm4`2-A&evmvm+;jZ_2qlZ5*r@{ z5LHd&fs+@A9FVFk;C3rlw_N9&)+i`+>OD9Um{uz?w=6FyCqv74g$hR;IHx=I7<-_VD zSULUORSv3t(qM__{s%}{hC(BOY&Ej1qTnkbRkbdc*Uuob++CG0NAAU~7dRPANHb6B zank>|PW6COKlYC-rD`PJ@Qo~;u6$H*X0^ev^AeWAGtbttm1XKo;#LJE2l+dxStjue zLV1+S8Qhry@c>22X(UiQljYtBAPb=pKlb+! z3!@u)D_a>C$7yb$(@&2ODI^yP|0?q@j$z-xqZUZmQk`hcJFicAfLH* zo!viaGnX}LhLBfgJ)7^g;V7i+|y=UP3nk%;J zE!yoYfG5J~Ln6EEREHagNu)MFfH0S1a?3PdR8ugtE1}2X-LX`92Tnu$=0FuiZcdiP zJU-V3^lVMGe3^H$6s4F|!+9H$gwRkp{Stb(k0h@nO{Mz3GEo*9lUwF;*x(LjN)sn& zqe46ADsN?5984B`haU+%=&zw|*~_XyE?b6O9k`o$X&y~bO5GBv{g$T?*!*2ccHC}R zWBlmSzyU25|C=Jt*`d>g+=rFbDXo~!Sbist&D<)~Xb4?Ibj=-?sI!IvO?5|7f@S$; zlvj^G<*_jQ0mby}jSfP@B^NbyLt0hoiRkE8RflzzrbzqgC>7|{e_`gotnwT<;l}%v zhwNwqd0-~xfwj6)_FQJILKP4h4Df@G7^^05!n2KH7#5lt4oCOhIc67J%+JQLh!%*; zuS6|0Xxj5$c2c}JK4qS67K?30ZtCK?r2I(IQVVTCzKa$Fr0$%6qUW5INZ!-GJCa<% zB|ZR*cC&I*Azp@E-Ds7vGkb$B@E0sxfdG+g#<(q5gUu8 z5?Ckc6N$Z522hH3Kk?+m>S!gnEnrePQcE<%=y27k${nzljVOA69F3o z2gm;phaTe&s0ID;IzmIg9};W@C(v8|WhCE=j#Z2qXmskqDKD zghVA3*3W(9n|tQ_`*-EFXT8?qGsl^yy!XV}cg0@22#`o1e|b-UL<=h)>cM{t@HrS@Qvw7=c(x=^#IxuCE>=S)w=WI=kAfb6 zgw$1hKaE>p0yQ*Lm=MAr59A8MdBikWNC#k4f&wS>U9d@vtsmR z|1L_X5a6|)&EVJJAI5FiHahTwKVZFudwSoUAJ#8rLXZFeHwOxIeF$ei;_+W>tn$#c z-&FfY(SaTSG(oH{0l@v)nLfER&d_x{1p1xdkY9|(dv7L}hI-te{L~*46*X}WKyP3n z5r80~BLM&iN)pKS0Pgo6<{2HxZ&~c${ff{!F(7}?Pc^!`qTkx}Ll_8;A6qc`{hYz4 z1dgjAK&~J251;}L#E4J8@0|DF$>U$q#~j5U_0nG#k*hm;n!a1^p8ww%oMT9r$DiPS z`Bm&lO#mKJ8%+3LXy$o*FF;NE@6K1UOC!sGyOMK*OFFhZycHXj2#T zm=Lbt(rMf7@!4QUKVp5C1kCptG1wU>$Zr~Ko2i2NwlPT1(WyN$NRT$?b)DdEvSEA4 zr#Y?_1jXX**=t0&Gys7G%WxJByLUDT+&$>Q>N4EvOVkd4At4A-lr!MI*DLrs@xn1b zI4n>EK(Hde>+xSWw5xcLV}u_fP};No$6Dl&fgS_J$21EP74o49`l7E+(eN>4I~$R7 zbCRzO9TA6;iD=}FDt4Qm9LwxP?uutA-x9D=o73g8x*PYbu1DKhm)6|sKCK(YcxmrJ zzZo{akEe$z)5KeNhR>%M^e%FfimBR#k_-K=ZmPv39TO;bn4WPhwy)ocYoGk%aVf2G zUlHzR?cRc_a%zqA7>()YM-gJ{iW@*TS+D$``mXre*Z?&nJkioK^M9<8rY%+M00BuoP}RN@M@9P0L8;<{yH?xf8%wIDIat=cy41ooG9 zIlnbWSNE+)HhHHtT=6WEFN}HfJY+PgWtq3LNBH@gGt2JC$h|;)6O)C6nX`0W+F5o2Joq(B&*hPm&7oFbc>NhPm6<7-}9%VryeuDI%z2DLOC+;7uEI|UcjOwKO_ZL<)8T8#6R&#^Z)2i zyuQ*xwc?k4Dh3Q!!?*wSu1LP+5E~g)EJ@C2X zmx{TWFxbP;9FTdR%fxgcV(mBNwE+ptmsXE^Way0eO0)i9LlpIt$e~?nq#L>N;`hes zTM0#C>Xd6*rL))a6H9YWbJ!sgbA7%yIQROiM%x;lvgZ}3DM5@Vl&7~aJxrp(BED>E z?u}Dgg!$Q|S{+Wt%$dcw5(QD1v|ehud6Ec*rG4lBaL;@yL$gWOZyrdO$rJ^Y0~r1? zN3N)L(h8VyopeVXSH^wl5LMdo(^HE-wO%af#Op5IlnP*SHv|efJj7r+S_Y&j!)w8n z&l>l3SYrC-Eo70LGOjH1tz`qYMZ%Dy@%}p@wDW02vd-72Sz1NJmAX{`UEiuG0_g~+ z{aPj4u$SZFKP2PG(bj=OZ?AsS<)o!#E>;8i7x=Vh$0_U*hQ~CUQye6I7wEkQw!pAR z^hkVE>a=3+V>>_}x^)vV#OFIv_Z+i~yLTFUeo2JSGMAu*Nu$Zd`?=XpQHLPH+_ecX z>&Nf^ZFQW%*+3(UH=7A+y+eq;OB!XfWnkzlDSzhkT5{6qC*jNC=Cj&O&>r(6w_l6R zW-{@++!y8LLzB@)CX+b7ZZXI+@`cu7!&WLdOgYkD>BHF`lUQ|iX`GbZI!~(qF`;qr z9eG(sRXaYyx(}fq@`t<}rtOyJje9YCTvT2pQO%<&-Ag$L^LA7FhF~MO@AfuYor=PZ zjVht2{-2H8w5#A`PmL5!eG~KQ?@v$_i1Bp_*ZhpDwbP@|2Y#KOe}b`-sP{ArZC#jL zPZW?jhXTuRHYOSIbbMleXFrakaKcyN;xp%r3~=Kc%c!^DSek+|Z73EHB%XCM-j^c@ z+cF&kgU4^iuUmRJDGHn>AIer&@|K}z(5%Pp!B4^FLgZaZ>8wo6UeM3m8xgN17UoMU zv?%C1*1tlY;h&IPROyuM$_e8%|7Lat-LRdl>E}4|cn_+>I%Jq{D_bpL_D7!3N#&m_OaB8I_yXffz7q&+<|UVT)_!B*S5 z{>wH8w7*iCdKq7k4!;zHtC}1QkzHEf`wZzh5*fl-}rGI#tRB$e(A8bGyIUpLdkZe##ry*#HKz>Lp!w*xPEe~RcmmP-9_BCoGM}Gxeg;A* z5NC}=@dbr8#J;IE{#H}bE3RaDtNETkRJSOjCNY%A@kYk!@%1RZGJol-DK(0&c_y=o zu>whNgIw<*ba&UJ-P@^;n&aIQS3Ez~_?A{sYfp|jzV)(>a+SZ1b*tDHFRc?5m)4o; zhB*&WE`i{?G2P+XP_`YV*m2ES7#i~79~)rl*@OijXfAxtL$4Pt0n3(m*Ddd-wAYx& z_b=raBIF(&n|=H>g)&b6)K*OTg_l1B?tgDEz*e_yZ|t08^~{J5gc*8j zYWwQH!`qo(%s+`ah39~RqPSfS`H48vi4K=(2_ z*{JiJaw_egDU5C|H{BI`-Q{r1b~r1w6R{VvyO4dmI@+>UiqOErinQo3*v(y&Oseq{ zw=hestR2}}O5FCa_dVt1&#aDH1)WvOZ3Xi%4ESXw2}t9$=oJ4RQd!TN9fCOv!!rm0f-#l{~xySw&_hlE>M>Q98h} z;K0^O(=T)5qW)!Fynlei-qjTQJZ0GqCZcdsZrO%rPO5%PhtIkNl(qb4<41eRg+*T~ zVmr(GNUs3lQa_=}tK4L+877oxaSLh0&a}vGO?4&W?Nr)J9*lAnk~D=SZ6Fv!$NR)K zXUn`+bpN<{-`x6tjGaS_Xi>Xu%eIYEwr$(Ceag0N+qP}nwr#uWp4<5I^77tIZgVx( zN_Mgudw*lkInGDatK1!L%&ufK2g#Rfu#IX_Gx>-4dKENOqkMv{P)n~~7gW3#%aiby zpYFQUdV`Lu5h?iDKBQW_b5}dUDCI5+4TOERubf`M-?Xn9=j%%u(yX?buIu~p<$~v+ zi&#LaJT1}WKp`n-GWqbecOht@AKc?RUNCe-oQ$k2vKrOjF@tG)?3BOA&e2Rdfcu^X zxNi7iOSbM2<2pg+kc^OpbJ}tbQrA<4l%&W~TGb~S-(}iwwBsh)zum2D^ePkfQD}2gJ9_R%jw9kGKpM0EP6Zct;iyu;xlBa;7?| zJfw5HCCQkt=F<)KWFYd(ai^%S_-p>CkE{h{gle2xXCqm!0wdy^$e!|U3$QqN^D*h0O1hi=^mD6u5MwQP_D}U5$HZSPNRDKp1sRq>#le1)3~wa`OG9T{6TeZ|Q{W z60!gEX9lptC?dBt*Na=Mef-ByxDy+W4`Ks8SA_+m%!zgoS zkDU6^=Si*f0@YdbPWyAy3MeZsTVJ#go~0Dy^C)DX;1ymQIDdw~nCACGXyI=ES!AFu z)lzG}g=-#&QpLt|d&}VTgn3glSi?Ke|ETiPcV7pso-b6h` zV3KC)`+Vz;!_=pVD(`_Mg8&R~7J6(*o*tT?(9DQJ*(hC7&pua50Ru`t-Jic|(90a8_j;vJST7TQG2-C=1jo4C_TlRpgQ)yRu@u@=G!Sev8!6btt*EZ%(Se!(*=x zW5T4;^43xIG|Z6iIDbFwdz8GIJK7&dq2rI>-XCT;b!`f^hox$(>x-$AH8SsQzelEa z>_q^eLyuRMkx(i(6M|iYt5W8khZo$rsWTo>>!Q~?2Opmk8JERwKX-W`?_F6YcTm1| z%37)~CiB>$vny{zxiar>xVv*EKej3J&S|oX=x!JM1|L1fy5R<+yUP6%8@YchS-+ zhs591U701!qm)B=E0pdntjmKJ7wdNSb!vGv`C^M|z8Od%Ku37eO_6Wq>QiB-Brv<) zc#(FUbc%Ozq(n_&jh8~Yf0e}QA3N3;fuXAqTNZskyXZCUxSoe6DGj|-$(Gsgu_nG< zn}xSq_a&9ZHb&&PDrEwxs*IDe6PwiVdNpj?wTBWKk2h{q#5>BrtH#YCFxT2O)PK)} z3X`sEFqY%u`hC`xrYqx8M12d)cefJeKRm^R2x(h=j9ZbOW0OrdC}x2Z?|!zPb2PqX zkRZXyg5%7y!P$T(*7s8yvxd}w$7DW3C9i|xSZD(*FQkf%-9@0G2vo`p*X!P8kT06q zs3C`FRZJ#QQIsm=;zT^vx_M~P%JPln42HF}`*h2Xp?c_5EjoFSA9~sW*r0j}W3D8> zUf7rW^v@6@9>DP6|sg17xio`&M69t`s)XY5aMFQNj85 z4GPIb+u}hpX1Op{ij;qIx>HRmDlj7ja^J?5Fw!qx^1vpLZ9wRAed0>?D5LlE@b`l{ z%DVl-qbuxkW%S_~)^|2Y@u1vv366Y0ayHy|CHkPe8s2g}ElTlBFHQqPRz)?G`@hHFEmb{0p264GL?ogPA1B zjHC#MFNa5Ah^h_Vzru~g4ynxR1J!XNLyp~lS!1J#`E_xyl}0aBa$GTg#n>Jt_2H6R z-O_Fe6Gyhbw*(nkvmZr)Yh#^I3aZG?9ACNygl|8kps2+Iov6B1X2~XC+s{LuL^4#H ze&J}ZH`aIazC|LEhzVlk@tz-DV{UvX$K>~;49)xeNXPQ(KLc^Tw(29~gkbTR7!(IQ zSgJDUjEhIJA6`OckKHj8>}iGC3HBVMY@o%Y+xr}8iCm(kV8tck6PGn-DT|zIQPj&f zt?w_K>shN~R%V3uOgVX@kqAxopJDYR(fZINu$J3F?b(x(E8UpE~ASfj2)LvIRDpDPVaqh32c1qJp$<+-e&x5qsv zz=EWPn!f%=_VCD{DjGX|`AfKy1yzp)wG^|xB&uH8YfEmMa?gz65Vai0Q|MEz@GL2^lc-J(o?&bh~+=++2Qnt3F~XRNvfdlJO(`)4VM zTxlZZ4+jTBc@vUcqvpwP>Nsb_PXm|PBB+X@NzUgHy8FV^%3IAjz%KQD%+5bsmhm;1 z$ZHIp2T4XP{Vf6!eF~AaaKWm&B5iC}$(NPtjLX5?-+~==U+RbsKltAi=t;TQSb`RlK&orU;kCOJ<`+9Bs3NB`-Lh+)JBvy;r!h*1Sm1$nE> zk$7T;&`W^1qiQk=p%HT z_$A^!x3%tv(K}+1V}ruOh3-2Q;|D@x>y4sNbWtcg!M_Hbyj(L}n8m7NVNG7o1@JHe z`&RVtl0V>QlnyLhPSO^Yr`tC4xy+cBvRT+{3+>l^R!r!i+P(Y##zG9>V-+_2u)J#a ze(*DqzE+BXy6?a3>d2I$yHh$bDXC526y)l*^T&=suI`*2ft3e-FH`rnM)GP7 ztXSR0&~%k5CP|xmHY_M2P0E!Vxhs{%eVu{&wKEcCR>{j4CJzt8;L=J_yVzH;nNOQT z#*9I{QEeAy9834lWo}teJXzb!J3I`gqMZ8#u*mC34#a(f(G#ldsO9^B?BpF$v7jm{9X^OwMg$DMyPLKfOg^W5D#nL@HcNBFc7()Q{ZvSbF1mvstNFyztErmjqA*OnfE;CStvLR?Wh)n>I-r2 zFHhDm!cMMqi@j;x0)u8{s)IDb`z0kc-WxtoK6?(o@Z73* zhZgb~jteY`0jqvi)BnECvpDLJ3)i@7-6w2w;;Yx-`lU688i-x&~lh#9sPhxLfT*~LimOVuf-}PnD+g|#HZSazB!gm_Q zh=&1OO8*_;;vNbnGO37U_1izGZJM_Om@tUQ0=9E>v98nk$2@74NNUytFtTI-`uJd&EI)Xz*7)3= zBpd#zQR}iKo07h_q40z&87>#>CQN~9 z1qS%$4wvoROTvA0w~8Dd87WJIH`A@lK7T-d*e;O%Swb&wlHgc&cP$VAjOZsb3vYW& zdHEnpBeYIr0s1yCvHA-ZLThMQuHpmY32b-f&LCONC;8C+ZmM1c!Bv9J)ou0o;|QKD zzv8UlC}rlob$GwkLo}lGQzt>DTNu8NW3~p*;e6wR9%&QnQ<{0~Pw`=ClpX<)piMu| z_YKs!Ot#3bvO$otseP|MTMu?E^ee&q>+?TQjM@~YF^(#II%`#oCP>LRYkD!F5(`o$ zN-vk!qyMj%UC~uyD--LnVHEX#m^2Pt8kNUHt4U}Q6s5>dQQ+`8(DeqC?eLbbWRa%b zB#)#FTm7oJrG-nlalDDE6ZexcgjQ~Ga&vxe+oCTGR|z_WP?Vkc;w*NBbETE0s%<6N zKO&`9p`FHDOc|sWOLH0FMk_d%#eRq=hYDuG)L>mmERo#gsB-d^vF)WO+ndc`(hhVt z-b->xnl8)y;d>$}(?nZrFUvngRoK=dkR^(F)t!?>fU|1&M|t{^*I@Cnct7Ybv4nYy~Xv;RR=xP8Q20yZNVH zVfk)|NER->T)2DZPghm~CZxAm(9>TJJCbSDuCGZHn#T<-#oE8!JFK=36)qz*VlfSw zkCrTqvpzpkcNOCKeBct3LRb46zW_V()q?*o?_**3U-CXycDDbE_p!78AKu5p!ASo< z@;)bU=B(S78mvD8Pk4n?ayCUE| zycj<~CzWQ9IbL&K01%5;Xb1tfJov>F7&JuK6XE+p4)X>s(6DA(?QYgIJD%LO=vQwLFQ%#SmnA5> zv@(wY>E;AdN$m%^zb)+N(9tg+uz-T;yiUleA|XK=X$uF-y{I5diHj}KN`~D!3H!i zeb4v_S^>fW@>9THzcSCe6u)dCzZDO^f8KrBiY~(Wzu+9dV}9`s!k}^XKhgaNtI4p4 z0IdK0ays@8_znDosbZUgzdF6&mT_VQAO-*}01BD<)P(_vAI0L|61aKPl;NxNF{y++F(;Eu?>K@Wzhr}=PeaZ_6 zD!BAIkr7jZ`L%Tld4`_|l8!xzOkCPJux{5h$2TLjf>367=C6%RX667bE}( zM7ik@^ll0NlEXebxsGOiXCkIK{+j%*u)&-F2O>?&=I1Ts%d+Itkf}hy?EDidF}`(L zkeLd86}qbio#`e+Uebk!Xrw!4kW@7C=&Mkby5NemCZ^_=!hfq-reAuA#Js1g^m5Q| zl1l5~8_AKr{~8--|FIsLV31MhqEb2Ai7XyPaV$M7$;uY0C5&VCYb^039nTDG5LeMA zsuC%!F03F|ip)@0F!o#r*f!p@ZKCuLo=oq)Li#wTDe} zk2>J_KzD0%id)v~he3tNQ%nqSXfom%VkTa|K$;J{>FTyyjtNVk}0ahCU{QUF^31m0yNW{^_P|0fEIj zgyKe z#m8V~eV@hgT|~|2E&4@y5ii4(`p5+TZmS(chcvak<3&{y`}c8|JmvZ>EsJ7eGNtPK z%aK|o_mQKHYWCv1y8hsUe8%BHMY}asRGE#e*@v7LI79_ec;HnAQMt2v!Rz*+$=Ktm z3UDd$RlM+4H6%8x2Sj3k2*@L{*Eu7c=@Rou!rtdBf2|}z<0?Y3D|JD(z_L^Bp;($Y zlU}HFR__}J8j8L?5AVxH-(4$cGwv0mgz6F zyAyx-oj+qv(pbZ|yGkqDXO~urZVPx*zLgOXRRKMAnWFMVUm5n@w%ph^a^la@1o*Jc zS-Q~1PV;hNTe7h&=f_+Pch`@%JbGF;F?y1O_$(+o!X!qG$1ew|f>BpH#}Mb6Z2OsI znVqPfBaZTimAO1^Xi1JL=!PjjKy-_A5$pbT3h=SUYaPQRBT|Kqr1Dj}?LMGOXAM}2 zRwd1uAfHLmE0Oym*Y8LiG<7BAC?}f!s0zH|G|gyMfvU-GaMRU<4_$T_8@0;=NwncV z-|2M!DDcS!sed6;J}3J8MQQ;5&n+D$di{)vgk>fpdq~V~6SJ9S6mj;UC2G#zN_s4e zQ(aS{C1g{4nU}xDDP{MSYbaht<4k>c$De_92~AZ_qBUu{j&h%<>Xw$`7)aqlFF1#|*0&4Jkyl45B+H!1eu zeI%LP9KO`WG`|x6_eK0`bmhM}`@4ym9X-5s;v!g4K3N`k>xH16hmFfW_B@{v^vtI* z6ih;uak8t`!J_CFR(`(^$c}`s(+2-KWn&~i2R7OH9Ld_np{OE-twjckf-`{EUokDr zJIBK8lFPQr%By(S^u34J${*`b^dS1Rq@A0uvL}0EqPd_wd(G%pe&^c}W1j*govY4M z+i9`}Oz+BOUl1Qq(tBd9R$ezKBBX1eD%ka2j#V~dPjs|5co1=K**8+?4$KjKa!)%l z0D+!#uq?~_;++@(OLGk@?Ya^syKyvv23=z+HEt^cw#Vwm{p=J-l)!GbO3Mk~`^~`P zi53mdb^XPXK>r9v#5WMLcZ(nohT53)eT(UtOiX%<3jYN|mESJp2%C5@4#dpHUesWA z+P(OvhH)?F^8W&r*e|z@m1!W|k}!;Lly<+miyJ0L- z6nTvsT0TA>dj&Ec-k7FY3mQ+pvd0QJABN1Wna3m6UE{7}UYEa?Y@p12EM^As_KtLAtf8l^cvAQz z99N@4JZSY|wkWL1|7#(h^09*dE+lGmVJ&&Zy^KVBka|NlB_#6%yJXm8K;8RfnLnQgxW^rldIzugK=oC4omf^W7vyC}YcxS?33EZ(V0g{@L%3*1hA>`}zf>}v z?xhWx*4(3~$--iUXBeT|GZ6GOluQJLKX<$!Xc@l-MH4pK|QHT16l;IhrwUzz|9Tx5%Ret z3|#tJ{pF}4xIr|L6<+L%dkB?4>Wp3~f(JnBvAt-E6PciA8%J|PLW+dA zO>@Dt4y>E&r#AY_DXbJ^ah8mONhdi;kwnS-8KizT;)2qqMcwQ{4pd)=)`y4Kl;dP*k#nK`<5kBgR%fk9PN+0vLJoG@A#jn?((!ngW!6 z{bB{woP2l+wov`;zZb(FIDh?8>N;)C!(}9ooi;G6SGeAsox+rP7ZonMaxrY0%s(HS zONWDqNN%x>;q0@*8%truKVOSnc1XPg&DpoDAh4%16#h73n$=BWs4fy^c^v|IE9yTy zD*j&jgfZiEg4~R)9{PZ1uQT?N8xi++GbAWR=(t}Iep)h@KenAACNY?rKtJhwhFn)$ zdrYNUHFISL+LbNpWG{7Q^(Qf2FHneTI8O3Nz{u%7#Ac%A(FK+$YvhD98%!gPZ{tif zBN+Oik4@IPM!tWIjyS7cfkmervoHU1bVY4Furhzt-Dci61?4FX??Tun* z6ys)8pM*QSBaJ1SODEVBT2FPIrq?eI`B5XsB{u@lrlLQSQn=l32*MNc+?V>v(o&y1MAF650asj` zfu^b)ZcE8R<|bmLx#@5>a&iuBRu&VSJo^7 zVQ~|DB9HfK{p&NkupGe_mbR`m@^&Qd1?zAfBd*Q1BFdB5B_aq_wf-tWq$AF{eb+(p z;!2RhFp*_FVq)oPM58HFH|h=i=6HuZW)j+-dhJkd z&wQAj*fhG>WvqHjV0aI7RrZt$g;eawwG-1Sw*=#ZTS8!Wr<-LaKM?C+2e}Wl4uCZnHp30 z2#P7Ctz6=(?NzdwkJ;|ddV6S75K1u$XOUzu{w1v@d>VFoLfqc&?;}t+-cvz0IZQ$u z9fzGJsAFw??Ci!AwCOeyBwdN^>l>9Js+T5r&ny<(5s(E@)go7*ee>xrRvgzNtP+cSF;0^1vUzS?2NitUzy%#d|k!y4kEsga^`$s1F; zsU}6vD!V=#@=T55U~janTGDtiN(*Q`dTimj?S@?agf)b+Q|rc<`UFC|AABLPeL67c z>cLn#Qiup)(9DpQ*)&rlmI(92x;x5gI7Lmc^{}d&q_28X&alU&0r-%rs}hGP?VSVF zfH%4Pc)5)4qN_9H*vU{nZ5@yT(48o z{E{P|=;;!*;$S?=l!TPfGoDTqGF%G9LZu{QK(#s_^$pD4l`*KxX>6BJna>P^$&t0@ z4Xg^bLcg20Bn{QHO}~OwO{BZw{Ga$o07V8T8Rjnsmilw18mSHPV9eO3JZoG__tO`k zY-q8s%wq~;Z{>}j^p=~W`dVq<(`!c5>h}lENb95O$7i`M#E#7e`E_RM*GGdHj_7S^ zMe+%>mrv7=veoNSzMvWHr8Vy~2U2bdRBfJ0;!B!!WA)RM`9z0K#Gizt9u*e0p9VtA zc46PiJ>ZrDPEfI%3mME0s7Zaz#By8#XawmH}RGq;!YjQPsCOqeE)r9`iIf2&UxMkzBiBdV(&Y43H%;Vy

{uJTOIji!}Wos54v33 zqDrP>9emdu!lR!>CZ+|e#j+h{!|-NWGrXL-Msu+ToDH_kR34HvCPsXPYxXqjx8Jh# zGyI53F+WEx2IlXNmZvo&n(^3-@&lEAV5#jWoA&G@w{spcwf?zdmltfti^5$cAXn!g(L4C&L_d93?xfp(Wrt3!q$(!iSU#hFP6A8AJl2v@^Ii+AY}#>s$S8 znHGeDF4I-GtU7|*N$=nCLM31>stSu8)@yGCUXIWQF0XcAE9k*Yhb~0UA2IIgL$Vt=Qg zTc<0FT|KdlZm%L&D%M(_=_^U60fRM8%N4F)A4j%j^18Eim-sipixwi@aqL`HVfV9h zaeZiHy!Xau!ExMkqvfLY^_Ru$!5^1#UF(vPk9~cklR117uU?8bBHQvyguKgFf$7r8 zZGC}dylv%wV03j?k}gR0x?TI7nUUKuEngnt>@9Sf?^ZE=q}s@-3i2wz8MA*;X3k5u zlL=de+%C4PARp#P;q?VQMyKFpD#pgMm!#fC&F;LQE@N1UYQ`z^q`iAY;oN?2U-95VmXa&pXALX(^6_mnHCJB zb;%bDze1Fb2(u9Gf7djV4>CQKsh@#(#)20hn+=FOzpO2{;NmbTo0^jyp!qxo%uljr!bH>Kg?B#Bs7 zjglg};hdWZ0`{4@3&tA*MxiP8nkK93z(Y}DHckj@;DZZ<5*Ei;rjQ3h_no%vVnlkfROd(!%SIiV^S>=OrOxmia+GD!I5zTQL~ zZ>ts~2QU&zq>6US=#WgK7UKK_Eu;HiCUIW;Z#W1QpZm9?@QBAwEHBc=UOf(e{MSK5 zCW}>9o~CAo<#XB}3jVhWVLIi$bNlTLxw_w1=;u^`hC_BW))8XGGHPvGUG!OPs^F=K zW-!%wH5VhDd9wo9om4qvl*s7NQjuBhGt+SgRS%Kh!}))0QK6Vb>kEa*FUXM(C=QLp zz9x5I8J2RgjRTiR@;pbr$z0pq`%fH9-e zwXc%TsQ8Me&wPmK?!zuQE|&wtr4Fen=V#C}s@9MTCPdXjh3}S_ksQ;eC)wL;VT;KN48fo-=#u*_;lTH`rf6T&c9H?cMe!5Lkyc~p%yZLUC^ zil(DuoY1=MEi4Yk0fWrcm!j?YAGd59V|$Nm-*m7AQ*BfK<%Utdy`|n zeQ*Q$@|~%+%`rS1TLi=diM(HarOfv)Zn`$!$N4yDQh8Y8iBLb1fx3_YfRZzx3T&XmPRCL?q)s7%*z}&=zZ! zi(i{}reQ`fy%-o^Ix5pER`JqK^=+e7x@vQrH~1zXIB!VO-7aWdLN=CNkcI<+L#C;4 zR*6pBc5D8s9#z${<_j4tG*tVp{Gb>zn^XCBS8V??gr7|uVJ*3yUZc^`8S%n;4cZlN z&z(!_I#6yuBmy7jzj;-N9@F_RW)8|q=$);`CL)3 zY-7-sf`yPQZ*`V+ViiWt+(vY-qjn@QmfByNs;l#rXn99}=cvjkR+qw#4v?5_9oV1a zs@s3X6L|Z$D0@-2Qa(LG%7_i0dQ~`jQgBm~k`T1dXRULVBc5onzM5g%+ponY}RK%rmzQQ&p zJ=Q1tjy(TBQL83<;GLIL(^xV#3BJg>=gQp-mR3E+_{-i>J>^InnT^7OKU|Kb-UmM5 zE^brAtd4qO`n*2k(7F&Q5t@({5h(+Q^F#XwiJ16Te2@|-dL!lTRXp36J~M15iOhi5l5h>NBU7#SaI#aO?LcuqP0gL?fo5qJ%<0 zMooT4M+i>yRd{H0R1D+lgd$3~o%{|G$XnlB zEe~=DIpQIN6VR6hh8dt!;Ma*1W)CQ%eSrISO(uTE9G@NvY6xqO9s*@QJo-+MT|j~V zYM)=pnkO%x3*{cX>J^*-;>(kbKaT!v_xwlnR}w_%3l2<(K%C8$egHni4v=$jPd+a{ zB!_+jodyDgD9Z=1-%gI074Hf*q`yB#z3+CSpC8u5hyY`AZ})|pkUWiRLum~e?030f ztd@CAOI3{h-!pIJK{S!=sd-z{qo4$IyYR*HC3^eue-Hru*kl<%ke%wuu)DWMrV;7m z>B?I-_aan5-r;9LqrfJiprR(HAc70>_{r;qRgTp@nflz!?aI#4F|`Ed0k{@%&0%hW zFhiWc4Eq28=IGm!BSb$0fPsG5j`FzzAV8df1m^k2$fIKb-Q`}XGcCQ(;&FMQOre|8 zL>%KmkbhjhzfPb@(UB1Y-Mr$z-M&YZm1S7&E+W4z-uHS_Qi|*8`^5#3%Zp1ZqCk9z zFj`#wIm@+(>Z|KD`Knd74q`$ge=OC!V)?E#yzK?q`=)~9-Q#n_B&e?n26F!ua==!Q zP=I&={rD~UxQ+Y89`rSN_l^JRgd;Ns&OIU%9M)l)Gq5>ZB z6{5-a6|RJL4sdV(ZdJpE9f1GIIiiQO=+=K?=BvSCxLmoas75F6Z#RU8nhq)KbuiNORBHUC{mdp!no z5C$j6oDl7gPX(^tL;k;_p^8<#+BK&GPyhk!`U7{s*OQ?G*^uwj$mt>OObiapTZXzl zeEDY>JMgk9lFPYRFq9)1Z@kCqY$uL1T8R}{nQ>5rJ8=BV}Oj{5dlWqXf zyR9$j?j&_xbs3}B1NtJGPjh^kXj*h%022)>C)M;Cz9w^*p5Fv_!IRBTsy&4A^w!y0f1+dH0T&h7bbjdef`V5YJ*(ksgPnvasI6#d%JK~c^DOKFoZwIS^$@*a zDNJn+ACLCN#K|+tnwz+vFg7lk?stoqDJDFlb|~{@d3I4_vzj1T_3F7;89D<2hrg|X zOmc!+jn3{(O;i)4Q@20i(lK`_BH{0&rwJS*S5nyXg56ds`Pr2F@z#u?vR2TcA|jM7 zO}p`EWLzY?a>^YCC(}hl4Y{ltL0vWv%+mA9F(aOyP3&8eS8i5|(!-hC)gPGc#p~IN z!#gFdr@R3e)-1F+UH0ED+qVy~z@yfqOAnfuyTRo1JUnG^h?VB_+$Y2B@v;oKqscvt z;l7hS`IQ@nuL%;zCFSe)nOZ-N?A3+49_Z`6Fw_oUeNulPNH#3}91<8P-tOFNUgn46 zmcG}yx)>BvXB1eNPIxDhk*AI=gmUv6v9ozVC|hgT!Xg*QpRCOR9iiP9%$^=V%xgrE zm~QFb%*eAFcq-eKE-aK7gd7R*HJnCE#-UOjRb}qO-{B8_k{!#@YkS`aHkJEU0%|H(nEhjQjTY!QWw2&RWe_A+p`UcVsF$@-PqCp+#nUOrIf!zNxl3f6>d`odk~?SoIyP?p>bOGNg0`EF_xC6D)+=?FLQhS=Cd_1@|i9xB^pnxGjuPB>CO z`F#^F4P_9SgMOFqRj-Qf_Z^`kB)PngL-D;3`)Mt@rYRBuR%6WlfHFnhbZ|;MBk+RJ z>%p7eylU$bO|_#!qaE?K{&Wu^VAguI5Z5k0opNBTQ>d1hyw0TO#(A$uGNU+|Y;oBP z)R{fkIzNy(`WPgM3l^6yoxm-A$SuvhDuA3;T^IjFo|#VkDU3m=X-SX+)@kB>WO_7g z5tqKCu3P#MRM>omZeP`GiP%g5BkcZ*8Wv?vy&hgxw(*4q2*>7Xp*M@ocqQrytNPp$ z-kB)mdl;&fsn~pcSBMm{h15zRMmvA6w)!TAtGHU*YC<+c8o`d=WZH+_7PU0oV4^{j z^e(OPB&>SG;G~tXgt07roV8vE)240Fmr9DXqvdSmT1&_bfy^FOV6VF`yah*(DMTvI z+L}K9NV~3ev&TWH2X+be=UL*aTntQ4Q*lO@ag;6YgXX+eMsQ*6>ig}8+KaE=5pxh^B&B`^G7%gaPN-t} z00!t}vlPlHfl4&5S;!J;E;56tfc4FMruR)!WhWLVcUXKDL#{gpSW(OYD_9Q@V8WhJ z5L?Wv>dzZZ?);+LwO@-Y6;Vr5C+9uf9A_Xu7iiKpJz4tT!T=p)IV+^@GnZVkA$Ev9 z;Iy!qyb&>?+3V!2^WvfOCe!G?9pIm+lkTRMTiw?MuO2lJ!{bqE=OM*po<;OCJ12N| zVs!fGb3!;82DDPG#=1D{$(mblMsZO|6-)BJX&m6lRlVMw=0Pl8h0h8_lb2kqt|v+C z%qOzWEHTp!xU;&$Y??tM{qFm;#nYI`F}sq6rR!EJ^n>GrdVf#oWc!dt2tHhS;Qd7y zq4Ch%S>Acv`#|J6!82{Svkw*!jDPN@g;%8}-LX zN6futEHGuQdDci=3m(!Yr`h5iIhhoyDX)63SajkzQG~EYl zhwTA<20T|ZdYYjZ&Ep!>AW08HIv>;wtio{?85qQJi2)%HbkaIa089++OjIw+(?~5& z#SP57+mwmP2(E1sFApchO`7u{>oQZ6>8`UirHYCTeXRLZGOIM4nR_33f>Kr9!MZX$ z^r4UFoj0Z2cBS;K?6VG?^aO0g=+}BH9sHpEzSLPWk0W;}J|>u=9EuX-Cyyvc08wxFsWk?l;B1;po0`B8Ra-E5Lr|! zNGkFcS(!&9&Q}!rt+Rf>!D2%ToOkz4VJD86zcD0(LM^i%kP@PXF|NeAhm$iiXB|7J zZ8$cEWXSW`bB}9d6Mnx~KZgyuoIzN?b{o&Dqr8$&m=slUViv0n{&Oz;jL*r`+M5^VBFspX7e6CPMOdBEX0 z6Drn|AhHK-i#F@kWyKsKCEnyw2fig#*+Yk@Tg$1uN{IMuF(%k`kEtCodc0+}x2@JE zY6VQVXdYJj;gOBBWxtB3#z)>nCS_oKrckD$Y}j0;X8_#JjB^Wb5pq7qk|wXZ1+r_N z8X5lpCMLzhS}E&pxR)*qa6OkKgEN=)ctyR7>OReVU)8J2O zy1L&K1caI@2_w9Op}%o@tUD14ca2N>MJ!j*eV6+sPy(j_=tNhnkK=ZNTf{KYgyF-V zv*JEYIQ$JK?QcI>Z)?5=Je4c!LT`CCExl0P5hwlu={|rd5pAc?vv%*vvv}|74Q*qK zoXvf>m)%!rJ?)2Mj2=;-AlR`D3JTLvNv7fS!xJR_i|KVcIKp}vLCzCkJTuR;FuZt{ zj9fbBH*KJ?E05Kh)AdOPRDtEd{v6%65o(d#x+)voxskvJ!T##}_KE5i+Bm(bR`!bMOso*0X%?KQGq3mg_xoXQebo%AJb zjejHBGa^O7;ZLL@+JjDp)JS6H))<*nr6OiQ9K4;D!?R7PoC~VO2kFSm`j8AUA2C$` z`(?lV(4}ajnCKi7G7ym=qUao?2!JE|Ygvd=)?PG^Gr$HI;}&|VE8apk?t zpd3n^W7Jpra>e}~h)i)}%PnWsz{btYtKR5c3hYoZccuOhEHs!@{b z>gy*>XbR}Ohczjs3pFXF{#&W@?(2(2p(;Z%UlnHViF0 za(gZ|$0~2mG#>L|TvKz+i1j$mLbwJNLyy*^L>WtFujEeP$_!088QcvH8eXg6jtZ90 z99n(?#g(rOL5ZMNm^e1lw)F0iV#45gd z+Ifi6Kq!Yero(dq=jorR0(3Eu(KyaU(b!{ojPY&xVKKGmxJMHLUY-%MSf}R-!sJEaEvH8q;8x8XLID_juNunS>=l zc|?2F-~F7DY7KZ+hhiQz7*zSkb)h9?Qhas9^1EY_jdpE%F)AShDeEbDc78ihp|qTe z5{$$K9xi^Vkak&kwfqV5Si0NACC$d9&|X;3>>si#Qcr6{x$^mO^$((#>s&$F()fWPcI)yAj!2%Y~Sj z2hyz3XeJmAu0QvXOkDVSKSb8I1pqUoAv|MlXPS@bM{7S)iR4a2vI2+95*Jr`yROxb z=BTAhs5Or5S6*@2c!yk87ry?SebrTTj8s%puEY=8m8D76A)9j-DQC308PMEtv24~Qi&=+yXZylJ%$*1e!fuc0q*nkqY5X+Og*1PA9;I-@VK{4$$*t~tNV`V#%T26et-3kty zleQ7a&8ghozXQ;FI3V}tsBwQ^JjZ+(thuu6Js#UE0;ja=Kw1&Ahg*RXmp3LYnWO$@ z9?>+1Vukpa&DWWCJ)MyiDUrWuW?CUHBzZITZl2)hV5XNi&xTy4zn8qKBg0Wy^!^s$ z4sM|*tMk<$05=9_z#q6`NZ!`WQmk~&pAq9MLc&If;neq`i8UvcR2mysW-#9ah(|<$QFM+30dwByZB6SX}wi(!JlC6Ccb7_tie}{ z4&2F!FBCBh5vM&h^h9$3a72=u6d(yBdoizja!-=HD?p32msR2$YekKUPddM z?rq_IeK$v?nzK{zl54T`Y`(U#@#H*P)J5fX!`X0Z1YYgGRELbEh|6;ejCL`;7}qqp zFzbXY&(!*p?gMNFx0n65qBXlnVa3E-0R?Us%X5jzQPV3eqt@C_!**xvMbH2G+@I*I zJmAK)SL<4lb23~L&JzBGL~p(2%kJ7^g7KiP?(giixN15^86H~-h?Dbl9GJUC-{ zGxP6_pa<|)Hiy~UW(OP;f9M2I4O-|Co7Q4(2RB26^Nt>`PfH_*??f&?mxI9;kDmVC z84d-##x6T@eE1PT4DKu&S;~=Z#8vJWJf4T}kiPCEE@#rL9=k3BB0W za|bt)d^feJmg9C|e^txd%%=Hn<<1iycpmbGKi|pkCqIMOV(yTT-wROKUb&91N=*o} zO!pU1l1arJJ>X?LjPdA*LV9Pdu3y~u+CJD0FZ`mPH?tt8Sp!diU%{2N=w?7mhqkvB zwnt=2PEfyTX0{i$$9t>k%*muy8FhvJ+fWVx1nND4CY z+Z^JoW4b3v4>j~AjEx0e9##qY%D2bK!g@a|6^2+dYB__^g)52rupVU4GTn#*{wkbu z8-~-9TO3^R8G^ABssS}Z+N)_U4+n{fo>jv?h?c?T#n=a8@)b)>t5zRIl2EVjM>IOP zDNmQVo|Harj#fTDhX|!$Iwr$%sZ`xThFGrF^|SnNE^f3W_KBBh5C-x zy4SX*0oLCvYfqX8o3cv1@sst40tB`V+P%ymSdr}*Y-D=FQ%ks;|6k`7%Cs?i@(4@I zxjJl(L`~+7O%57uD*egl8J8KS5XJ_i;$mv#u8+t6k2Isji(K07^q^$96%&b$>*ll-Gi4D2ZT zEKJl%d#1wjJ%^BT_?B0YkW}VS2ra3h%Rl?KPRr!L4@|AJ!CN&iqH!u0@KzUNbU+r-f9%+uoBkGtfXHU)9-{AY}WY^M%i z>e?3Th?nq7msq%jt|t$9l5&M=B_IKGedUjJB5}z)hhL*E)Twi(yPcd~vp>cf3UZk{ z_QjMgDmm6+%%*GAe7&NYUZ3k@F_hsNo46xyh?lQ&%SPd+T<2`cx91wtX|HDl*vK%1 zr{qWeLHcviuY$n2i%IH=sfta-vcScuQM_C`v;iH~{%u2^$?^L78dE&JIdJYSIi@&@ z7QC#)c02!W>FEU4hDwM;_G7H-a$Od84pPG^N+K9YRrsiz$CgCdKBvizhH-bMDwWTFR9_UcYxe zmRg5LKANM7GS4S9*sD`$I)kWKI=J12h~y5)09>RIz+e{~6I-Ee$)af=uIf}^+EKw4 z>w51&)4GQzgm+2^#F`OSX}MlF9s+C=r1F$FE4tX=ozQfw>^0ZqWH?sTWM_9d0oE23 zdkt&n!F`}g6*hh@>iJ-iN#krXk`$7-^sqA7@0$-0@#YgFHV2(y1=oW!SpOadJ68sG z=t{x8Z8|}|Ge?ZG7Zp%?)=Jl*aXxsio6jyzI9C6j3D|Nl{NfOV%F)=a;4Pcy6XnKA zBBS$pAYrI5*FP$Idovkn8H>fvuC>KSb7o7P?_YQ5-phjzU!?yHVuMkn5m^f(Sa}`#x^bXjJcih6tCsEn`r})KfBCV)! zNm7p~a|Pl-fR22Fudr;x)BHT+RxhUYrKU>uY=ebv{b=jW{1jh<97U+MW1a&;T^inWkI)^S@z z;y!(gVROVVAD0uO-e!!5#ba&}-*S~nnT1(ym!%2CT!+#;6ofX7|C!6$DNbr2^x4`r zXxieGp)&jMP#Co`^3AvVosCJkyLW3Tmu3Tt8B>;&zvn!-v+duB7GdqwVXelCW{XIe zjqS-=P@71QU8-kd@;@kIN*ag@l9P&{aZ-X)s%~v0hlQ2|_ApaNwHT}Y8_*g;wFKbuU|L5A3gPHk% zVvH>RQ(MZ;%*^zEu3cL}RZz6h*<>J@oFzsjB$%05D7LpL|6#f?jKVOuL=->)eoX{O z3tR^GcA&1sFfxMuvPJfBtmpC$hFaRtN^fHfgP z1;GRZe=zeJYT`){V9-#(0!0C~w#XowxWC|`_SgbFKM52h(tjVQJqPp6X-a@ZjRu_+ zCGt{gxej2_T~f&El_N90$FG zdJOt&^x*;q5_{J{_DG1gu_JV{&uJ)wm)$~&|3I$1BmY~wUYh$M7>w;6|4jU-Lim4W z!vyy$;N>V769ApVI19B6`hQq*4V(cq5g8MrOlLVef~(AifB;{Tf1isDgQpRY-`NB!znq51~{{QjA-XHmd`0h5*r^tY!c-~l)R`cT~> znM3~BgnS1z`hbC-Fy zOGiqo0>GdlA%a9fLIejUL_kC&AbCafw5tk}7uMSW@S9i}?kogY{L2!_`|ok}x_bTi z!;0b6=exQfY=8<4{_sO=Z&k;E5%LZE?wj&{oAk@?@(*M5MSS~xDLlEn`PrQOoc#X1 z7ve>@@%MeN_jndHYOakOkOO$|6`tPr9Z^NU3A=lK)2}7NipT~OEo%JS4Gp_f6b9+T zg+(w=o5U~w>X-F}P2&cFWEXfR@ZqHpAcVm9;^(_k>EEi04S`pS?bWrvn)~vURwaQs zezWQj86x#lu(LBK0Pq~vkjBA)>5s`L=Gkiv@Fy@Jfr^#|G&~Om%2wF%(|-+@1^Uhp z3jQT15QswtqCV&aM5OV1c>V#80||&*yZHedD)J{-@?Br+7UZ+%zskI4Ckhd*v;5f% z0T47tw|nzF_Nx!$zi9oY4`I-^v%B*8RU=6G@x9uOg~Y)F2n8%5SFGEF*V*N9che~z zyP|JmBjFzDHqGfLN8!2NKJb*1OPYA);x?TBlwfS`O?0$XO@37{2>B**@4Tm-FDx-0 zJZX$`>Le+gZ#L~)g4q7tPu+}Q<*gc`5h8=SBt+LEG=1H^!*BfSM7N+_y0{=BoR;imuHvLLp=5P+%>SDz{B*6Br{=~X`uC-3QT zfaCL;cZ+qTcTZ@J>bQXw-+Y&rJU(nqXICRJ9BAgSV;Y~pdnl?i>cXFWtDl^%tHft0 zQQe6Gim%5`D38DFfxUD9`di2NcuI)(cw)aLlH}S2U>+K|I2Qkcl7-Ao25q^XvdU*{ zN`f#1vnjH60vy10cAn06&y4I_iyG$YM2S1-wEVhjo&LDdPdw7LjU=Pjng~Dp<-U#& zYjh|fHi3@{E1M=x{&-AMD$8S9YGHf9V^?s8{O)mn!5nXCBaZaMl5-V zpCn&R+64|x_>s_Hx^oz1DZ3%vk@n|9;e8|@nD7|rmD9GGsYoD;@5~PAPz}32t~$t;s(u@JKV|99Z3n;qyWwt*>Sn%{?L=rt&ujAvj_^T6(dkp0QA;=B zLf$TkZA4x98S+w$m2cz&s%CF(1hLIh(i!sLrTDp2l-h`#e~)2NCMPLiwzR11WMrWS zxf`ASkYGs~fAx_jO=q#xLac=;>x2Vyu3bpXHn7b7R86|E-hFb&_TIt~UdMx$hh^Tl zCUKE;HcyuY+8&$84(42AVcngr7hC3trK0ozsn_rFL{wGefz0y3oVXC?1lx>A(qNJP zA`mUcj-_|Mk3f8DuR&D=mIt7@^*E>*oqv%WirgPbdOdg^9N5h#h_%8tP7>fFTB*JWwW4t-Z-nr`ej znOG^ASb^0NjhMVhAIu9h<1-l-9yq~PA$z{_rK0NdeCnGVj89^XY3Z&Rq5iD*Va`}%=A21?PJ_jw%$=jVAE z+Fvg{S(|4xIsoL@@bM{~-rR|ZD;!0M2!{F#N*gLpnQM|a-3>zz)jaS;061#ln+M;% zZ`sH75Af!?x3&&tJTtLH33NxCKLPGi1n#kRnF9qJk;c#FI5)N6^-Y_*ugdk}7uULy zA}ahC;KStxxJ_F&Zr-kh1x+FS`Kk3C%h040%arj>t6#m&Yi?~-BV26=ciqu9O|lGf z&26Onou98{^5)1wp*M{Bjq;d`dMSD?)@x#KW+4lT4x&1S4$_zEY2Y=Y32yw+{bnvT|BY@f_fh;*ySt-6Y)gm zgNvB8j~d|MZ}>5@#PANOxUz8;-YxBUC8~6uX_gYAk#!QvD8&y|yFwF|c;7CkUF0tr zwbLJ?Tp703RKAXRqHBdp7$?V>y`10me{)A-B%E|GC4qE}CL6 z@2ZXY1$tw;09fn36GvE!G}h$z>lK`|NO2VN%%1+GH^-oXcKFbV+mn2>Eu029H2I68 ze}SBpZo2^8mF--K_fpZZbOs`V^l*JK1ipTr5|X~P1JiSTw5OH3+K`F2g7`|TwCPp8 zWwWgN^s!b7fjZNY>aMwf>^%kqks(iNOCsN>g3OV1!PhY>JpKFZfeY`WsTezy@s-+a zW1MMdYh>>#r%KlO$BB%Di$nUE!b9ABfuc`XKB|7)#vwZuZ8Vpxa$Xmu%2P-)WcS6; z)q7nFl{}?>1lztj{MGf|<0Sud_=doP-99JH0cc2m2PdXO%!28Yh+PE6KMI2MV4 zX~fy?GhQ_W+^B;{tKXf&_|*I@c(lsTS~XphJsK?kZ%zML!r5yabC-^MtPF#=lR;&9 z;gQ((soF?SrzsleXZe8WS<0#<{=e}J`2`R*?Vuy{4im@pQcIMK{q9*L51b*tS6NM` zF$Npm=g^-&`X+0*r<4A0$>CEX(I3V-En zVUADyn%kn9g3l@DSkY!Ftme}fP4Q>9D%m`DEAmM!nmNBTM*$qIY1A=)-%or9#O0=* zui0y|fV!!>y;sr``^N(*UxAd_CzNc&v?!ipBhyU3Rrlq2YcsRIIiLZ1;5b*C>K&`r z#=urr3ba;nr<>YPsDxke8=FPuN8|5&X$>r=p`J$PbQP51Eqn7Ydfs_`18{WO+i5+b zZjZ&|=&IB#MlJKxQcBT0o;0>}DCf{(nOJ6YY`tViB>0!Dvh&*Ba@19g1#y_g;qLZ} zTTOj6esQ?f>$7IL<_B&J0QXt#lZ%DMC6)EV-z?6_OuXw%Cle`oI5T4XSJ{Ym^2CyM zXKqCrs3XQ&sI$jzQM)ep7$Qn4fSfu0B``vkv%jB^)L;RL6!AkBWl3y_k7x~X`65JT z@Pi6`2b?zO?=kO2%xW%NLI@2uTN7Mxs=S zu@%zc)>{*5*S!he_rf_r0zV^?a%GNV{%HsMmQP4b!$~A6lTSEU-E!_Ol{r`bi63O> zk6^EAUgn9*y87`q>dV8q>UruK)&@P)vIap9I8%nj+hch#%%dTX&AEeeMr9pBNO%dm zz3t48YY;(vZODEdD!u{p)C2)l*@Jjo4f?x~@opJ2v(%9>?$C?VedB+EXU75p5}WhS zNc21A$R(~aMPWv<6vq z7gUC)`=4$D`Jk0@&`6&oe@=m9~j!%m^?niQLF__SC8O*|c`Uy`C<8*o73o&8s})JBpzGDUi#|PpMRMTJxxv}B?bWQfzQEC8LwpS^jRtt z)^8Q|V(e6SS>M5#QBukh^6YLoQ?AqFR0J_;%zn@sZd%AjU3WaSvGI0rujE@U9NC_ zR>w5;lQ8;dvY94zsj>fS#oyd+VJ-Fdb*pe!5o~z=8EHe;SEGlu!y_@I=~xhDsC{xB zPvdudf3ulum*t?N5tUgyNwQG4ol0HB0mVK^YgFp*XV0Me1L;J5kO)|hq1>*td;iH7 z(mdI3wwDzAK#qdx0Cme;+Nj%&W_NXHoH`Cp>4wBgaV-%$zFtvHYy6~=)DVu%;J_Nk zwLW5=^EEb0Y7dX9dW=cXGmXUvw#x&T;(iZlu)5uU%!mKRpQ|Fk0G;e-5Vk5Ex;p|<`d~2 z>|2++4cP^!#RAg&_P~Gkf5nlWNeu9H)TR3~a$Xd=R;nH z!%1b7T@<4V1J9K4^v8Nr(^?HhL{HtU(zt?h%eARK@xY1buu2d6C{V<1EoC)4N7ApI zcs$zY)@>M8ja$;CcBxr)tf2fC-z8SNTsDLp+rB{q&{Q5`GF+8r$f}NJ37TLUOTA0W} zyk=ui3-}EzlFBWb;!&>wa=X5~z?RG#D`>n-Btb8fjf4*YhY)s$biFjiWFCL?zsz6R z1oB68{`%~dYDa|?k06;y6>zP#tT?~!yE}IHRA;sHc{8B_Hm~CkkZf%@b-t_TPMcAT znYExzRoBFqd<_O~5@b3z+?SMzjCKKgRn*Poyr~mfjD<3Av`drv<2#P=l zSy&zYOO}jVB_OgOn#wq)tz*l3jF17EJO37aRCWx|L&-)Zzrl4jBzw8-`hu9Mwlxyi z?`&kJ_4STT!DMKtU#YpR2E>2N?)siekVvrvksa4=eivBTbiBZ|o)aVaOS~V?k((n| zXp+yeawZ`0DX~1zwU`aa$JlE)iu5uR+ycis(02ZWJGHGVQ~AyEN78wq_M79z)!2Rm zj#(H({DjT$>y*XXz%x#8aQZLj;bs+$`5(<-m}7*fL6^zVl--;0aZ}i{`pK$io^3w# z?onE5^zKiQ)^N)Uj$Nt>X^QdH`q!s`vf8~Cb_+Yy{ak#Ap|#{2n9@Xf>XtoMa}<}W z^sT}-xS}hy%KBft`6R7#$gKM!LbJ%!8gXbLX@kaLe-UN*MKY=fGo5dBz*Fl16|=>_ zgG&r{j?GXcr1&>&G22AeW1T()MunJACd;1Z*#4g__a zVFO7BLxeIUjgX2Rf@|rJr+s?d>q%;ALmW#x1aOTzfjxXuHK@VSB3Ku;hnjX_*rhAg zoHY^T;FB$l`j7OdkXjJUMn9mklH&ejR<$A5h$_qm?@kK4#tBr(a; z6y?WvYHC6s9P$kKynV*-T(P+D#Jfo;@vG=T{|X+mPp}lOnr;`C#GAaI0s{=Yn!7c!fbQ0ec|tPUIG|Nbuz%gy95I zi}`$DW?i*Om;=}sG4Gj2v4iXBD9voP_K}FY>9~;84)~cx^~jiOvW&GZv{tT5%eR(B zzX8adraYH`^d>h)XO@~Im?_h*eYA;kF~Q&V7j6)L-z0qEooSol{)}5nl+EFVn*c7?G6gUUYe8Xk5N%?Fzev;+8Ls+y4 zu=>#NUi3?$2MZ^`lc0DANt&Mn{qXkE4fK-_bblekEh8kQC^lfMT_j45C4x1dXq@Gg;V- zHXil%X_;>)6k&ALZQlD=pW*H9IRkt?a-a0x=w5$`C_zG3Ww^ZWFpzI~EPReXpM`5_ zftS))jBP;ZL6uXxAgdumQq|U*9c_*B*zByX|0XbO*~B`*Oe1BBc*-vIoHayz2v>^Cka2>+aEaU zhOU9Ju%eiY?T;H#Q3ek36o<62WN-?`#;3}2uUbX%H3>VIs6Q&NWQcL{6!ghwzeKGE z$WiU8`kEC2ridbOvzu&{urHyr$x3{Kzfe;Y3gcnjVW;U{rBCdYKzPt@e)AAqBs$mRchGm;`VmV!KC6*s0l0bS>4 z<&XFc|FGlbRBJ^QD3X4sz|-fw^A$bPb3t0ZN5j&(WF)~{-H4f|AyX*2VRLa;179;% z(-=&tkjlH>fO%yN%L5?pfqK*cC44y|lE6A@3Pu6qmz8>~dWnPa*mP>6GI4r;i{FG* zb16XSJ?_ENK6)*`E6`K6t)CO9v+`*5{}YXlr}N0CE$_q^ma-*e!SXul}w zs2_+bol2dy0H?p+b_{^xo9B_giGs;RQ-(e=v1+XIchPejd{^l1I_Z!1angf&!qlTA zC`9UUsLYimo_L3sAFUnN-w@c8peH>?$74G_+RYe$byE2V!|N{lS`J?pzsk%rWhf0c zGsyE)f*!*0Ls^=c;!K*i1fz*Ne;Jf1n+1_B8izb3h>WNx`#!T}AK4siwoLkyvwibe zu1cTTaWG5TGiz`C!|$umF+*{mRQU|dX65&e61g(;P$JJRsbzJM%Qmy6lbe&ymq`X{}7WfsT^dqwd4P-s?3ith?u!J=n=%# zEyW|?LgMGY&Bzefch8I{6A}2GY?ggu|1BL%5mpaO>G1aO@zKc*!jR}LRxcE)@?gnu zE=kI{dqwF@KFXx?JX2n&yW=pgKNAb>TOe1Znmlj{eOowIFY?+TS~2^=H|f=~2601; zgI1KwUTo5~#%+~mJd{1u%ZlWas;t46fHBtP=;E;OirOLt_f_|juCCG{BBllAx_vQP zk^PH=@54G@{;wxuWGnsoyA#t?9MmU2+hXY_gm{$Q*Z}t~<`qua?kVWA3+Q&J=`fw! z#-BjvS@-tVwj>MGHuD*Ytsyu!FHmv+wM>l4n1$rMT0@x?Xk7mI5(!2hec^)=Wrd0z z1ma*9N%ybn+{a+y0qG^9wGMAsA>9Uyd6E#=Br>EK@FO$ zgT816A*EAw*bEVZ@x#6FBaG)z3%pSr>X+?++c8H-s)8DWnA;Ka^!G%!*M5ge13rsv z4nBEhtoRvUn(1O!GFq;dGzCAkW=6bvy^3Sny?!)r38Q=Q*1R^0#+sOO=?IgPOt}o2 zL^C`JM!41RPp#Wt`svo-gP-lv%v#!gspgwKbGt#37)xX;%_FG%jji6ieN_+t0l{mNUAd?YySBHp}sl7*PLc{l4X$qvqRUdYL6B zv;U@wG2@L%f}-(tl;{nPlWHfisVU~lERqA*Y+vrB8y5N()3YF60mz1S{1qkG!KI)Ho`Usb*d@F z;-cOBFh(m7=vhvPpW#?>BXXla|Br$8UA$7 z<$}XBNkT0ebBG92C6cLFFA1-VV}D!5Yu7r#B(&X%LyRH6rXOp`)x3C;TZLW5bUNf^ zs}S-yt?5j-DAK1O;R>Hca0cP}SRk?O-oD(DS(3BC)E~DJ zHc7ccNEUsPdv^!*QZkT`tsjv6)L?Zr+n5cAU}*8^-W>_}pDZq^aLOut;^b8vM~X#@ zB997@Bi8%yT`Pv04z`M7!t3L2^SsC<jn|5_ZEBMnqY4t~#4+x>p!i=MacyZ(@IMeG{^<4;Fs)JE>==0b`YW7!V zQQGr6hKVLvD|;RjhaAe|AIFZT_iaW8)KKh;jbFfxN1W>aB?H0!|I9$Jad5KzH*EeN z^qiIbzu%Y$SUFia|IZAB3b=~0c4LYjXb~~3im?1y_v=n&5&)o(BtZ}ykt!Dukd(MZ z!2)PJm8d`l%egEJE(#%3z+J)4Z}08TZT0gGmsNJ-s~2;l*^Afi{yqa6vXX-+hcGLm zeG-BO1_`tRGea_2K^z4od@Lu-G3>Vy>2ZA!(YFCa+m-L&a-0GLl$CO5UY#u% z1cv~7A2t9Qupg;OzfuDMf&dCg!h0$)5GAxiD9C^gAlLwaE(p$r{6t~M9jxHM7O%pZ z@=r4efbJW>A52Wj{@r~5RD24g6Ci(J7y=6TGWZt}^f2~5n0=tY0->LHAc|Xmq4$Z1 zFc45sNMWz9U_=QwQ9eNd1ejw$hTsq3cz*@;zFJqn5Cr_Q+mRs2zW^tA48Q!Q%t6>x z5CK6zbzpXB&?q;tun>NoLh%4s>j2ES-hT1)C_kf`zp;S?d$pB-e!=~}a?ryql=U%CqmV!Z_G)@@2zP!Uta(TffF46QjBtI>;Q-Am z(f|n7ets@yc0nN@Mc<)62fKV-C?2R`UlLgNYAhk}@hNbQ0=l0O;&+Xn z1Be6m418B?8QLm0YteLoy+mpD7{2FtVO*|HGbN|6|YplF(rM0Utp6kIUKPr@LX)aS|2q74CiW`_*1lg_RXn#Rk-e z{mV{56%`Q%0vRb05F%X458zK+8i@c@XV}|6@f5^WEtP=p=tbW)0zf+du%X?#J}CGP ztM{J%z#i}yb1EUwqy`1R8+*T9Ac+9o8v5I>?Bg!sFLviY_?#c&C->PL%-!=>%)fW< z*I&r%&kmvQzD5sNUfmYEz#mkAUwL)vGyHZe0bK(?zTfbRJOT~avcB!^U+-`RUs(-? zrDco|$NM+oWPaC3{eNGfKYs+mcdZs5N&o@|=o>slH8~RfrF1{`_|Og_0(F`{cFC`k zK*zUST15y6K#+)#uxJg@$PLMlfk7>#W^Qj61}qF>5Rt+f0$@`Qqz~4g7z_YedW@BcM9*0?me9orG z>Gpg#CiS|U<)ty94kDe}F68$Kx;5%yz-st>;h6FsI%+*Og6GRZRDk-Ne%qjag=J|sR7fpkb}O8%QP%@OO-CZfarD?*oD*pJ1KU$u&7 z%2k%-+M3Myy}iB6mmQfsk(QVmFT}$*fL+FgTRhc(t*XN7{94Dp(u>pc-lczwl#puQ zX8ubn&4Q_`xRuY|xCi;GoCF)eGxD8wy0q$}l(s2GLXNm&gKg9pOQkyvJ;|kWQP!pX zJM$QueO4z0G<|%8x-SYvTx!tuV6VqH=N}Q=6uyB-Rf}2eQ{IO}5BSdTVdv_PZ2)?b6C6YdY zzy`oX9T^p3@(p21VtjhH3182r2jzBHeNCzK$H@0jn2wAM)45=Rs2n&fbiT*j-Fj*^ zP5fu~IJ*73a<-fcLTA-C+0-XT zf1*D9-Mez0_aiUQ4eU;cbJ=}nCnkIFIw_k6z1M^IJj{!%?e)Gz&Ru^P2;($!*${sZ z7n^tv6mkr|(lb3mI?y%cyk{rKi1{=mdpx$(2g-!9ET?2hE!q+jX46TImsWa`P#KIQ zK8CL5wBD)ttt;g3m^94l{dOgSm`gLOf>A(I4qsyMaM};rci<(czwK`(RV%3OI1F9) zZ8zU3x2s#9VNpv=;Yg34lt%{X@da-z3|%k6{IcDkurV@w6zvt8T=}D;bxXW^)=;&1 zOQEvn4E9NtZrT;Q+8IB>d5ILwzw@mzxI~VzUa9fdmA}r0v~P*1N~kP&aBXk(p1aky z?ta@rr!k#x_2EhP5Oh4+X&wmnq@g1bVq}~IL`*qZ4vXa=`rB?*8u{6+GA_|B_p0*4 z$37|3PakN8Q4-~HcXiyHBfO7@TM6*&@ec;V@T0u?=bL4!YNcvcwte{h5)XD@#dx2$ zS&XPeW<)wCHOsnE+uhVF{z1VVyrb_Ah3lfjDJ#j*lYlKt?I#+qYvA%F2Rx`Kntjjl z@XlVwKK2~hn-3{kNjrJWjg$nTBs3zxzF!X4W9$zneqVKAvJc= z*G^|0aiX&`;IR#Hxx1&LNwCEiKbh!ATz_ zJ!Iheo4M{;DFxQUIUjcW&!bcKJ3q(H@%_=^s5ltQGlnL@ zY1$hZhWBLH8*#JLn!|UmTVQS<{tSH{Q!W-X8BzPIC6}JM*JYHvKlYHTWnJ$HqzLV7 z(MW?GL#UpEt0gDL>ci(tK^Sj^ZEw><^wwG(wPMaE}A{^*k_ z!3af-e5$R0dRV7}lRyrQsk|HX=XP%|h_IT%`6^dboNBctySFA!#uAHXR`7~5?9cYO zs&qg6P^Tu5MNca;I>SmgNc-uk638~ZoqaF@?An>n!kMDhjBMJs6Y=uI(+@qSPE~|< zafB5XrgTUy4R zoFQcR?qBIS0CDfLfb9jp+%J#*t6;%wZ(Wv$w?K7LG>agRM&jz!;__yg51ZJeddWoo zuVTO$F6@KDX$<;`5F2m$z3PLzj-ZZRP=`oa)!SNH>V5U1*jpY=jmbNgWJ?! zX1kMIUlV$uX(XufB~!$rJkO)C8_Gc{7ixMGl%p}n@6iSqa*@VC=C(uf^E&+X233m93w!uP2hG8Q#3r#P9!tqo;Gp}JFzV0XNImB@l3%ai z8$@PqC;oL`pG5k2jQU<*L*e{sm1#?TqWm^fUjA_DsH&cyMZ#-RR(>PhVpDoz*LZx*XEz%ev1QjH!>?ac zb`P0RwG;t3-yL7X-b_ibJAabwARk5QQRU7CkLOb36WH zNQ};edL5I;pIPgr>$~%hxRik2^f1x?8MxRgDj^@LmrjtZ1^HT0Oqb)HqU5lQq~d*R zi&>iQ?*De}m4l|bkK^s-qvaGFD2*+}n0VmDah{tFphu!Kg@cCO7>#{*7T80-rz_n; zX#A`8w|&JuTzROV5NP?sM|WQ>qZx~?A7nVqS#<)BXjsqUgs6cFGy-nI8{}zUz3GyB zIbq$lg4^qm=!<kO4mw`@Y~2w0d1VPi%dv>?Zs5A|9n=#-X1?Zsu+*^-CY=q5)e{ ztI77N{)9;79V@~DnWE^7gK65(wUl~- zh`^65N(*xAoNwSST(yyk@MczK4X-jz9LA#@i*-1B2uP_^i+7tYSpX$~z?TjgY zl>3#2j#!p8fj;XYCj_QDBuO-_zSEBRQ}nJpZZHxYIH|X zVdBL6a|+D3OdtyJ(Z{t!IW--qX7teP!YhZ~l0GHeTeIaT$lL0a*bY+SB_>B(Zak-s zafd}1(7-Kqf|8?}a$GNQw}r^x=Z$bJQg%!S;1H&%q}}X`tf-z$<_TxFIBQ}NhH`0VHn)drFq|rD$R8Lz$xk}nk=~Np z&+EsRh%G4wi19h{lV|2YuaLR$W^Yoa-Y^+vVpA$uc6&<7-_)OIl@^j{nw_?fld)!- ztP~Wx;J;q`$~!BYn7s_wZxlIsO5IZ^4Wmq+9Z+T-%p?4T_7Rbv^=$U6o~ln~CqI2J zSS5yN&68XmprjizWVZUvaPshXQBuGXQM^ZXkdqU2wwB?Hw(tdM4(;$)d~UP> zxevko7vh;}ZyoV>o91w4R*Q4apUPV(5rihEu{#%6>Ihzav~ma?p(S?_E-xPgNM%WR zRyBxFTFtJ`?1wsx`Zo(_Su=L}|wI7rkXn>l# z(N3FWHL_}_(=Ns-y&!t213JgpCuh4J)6B;Ze78)a+O_dX{m}QzG9J&(@($4#lh@1I zmi_K-Pq3SBWS{)nwn#;_Jbf%arp!RJPC2W|C)p%&jO75fX0QC;LYSOu=t*vz9moNY ztVbx55#to7ersMU`$r`8(q5MHm?SmJ%%1e%94$S1OC0KxVThwH$*mWCp{(y*2t(33 z0=!={lmVDQT7hG^u}gGEg=`tQgT=CT%`I<1|cfeq4Nd|GNB zNi4;?7W``IZCNMW9I&hJC`t-=>j|S(Qvx4vKgm$?mUs|1kfc4UG`VCYGuHB&vi(_A zeHE`J|E{b7@~9yvQ13SZwUeEv&3JbPtNafO-ZK$@-b+GoJ1MD!5%8L~SoARm;hkhN zm&@wY*z*7cnZUk5h#t3+_nW?(ZmYU+b`pBr5BnBBOFFs^!V{Cv>OJ!>`fe8#sHeWO zt)tb&{BQ;W)$$r!@~8T3F`H+a>l0NJdkF)dh-N@^J$7%WDx;UK>*AmAzSsYx^D@Lu zF|*OE^=qVKFn|8_Snycij>x(p zAjDy$;veVay`(^HExi?!pbLR5t2ny!<>^|>XYo1y=$SQ2lw!{*Cg#Eg5LRY7gQKI^ zBydi@RgDKsn|llzMz-ySW~3rK1WaM5xj)23JEwNo(Q;WwQzzETvX%XdTvvF@Tl+K8 z`cUz@^H5VxYmC7$ufvBN0hmAEd@gh2C|6p}M?aoN{5r^vq6#W-4u(P_Dc-8A3_+5$ z+V89UQ2-p{cU~f`YIW=MdHal&g^xBitYN;=&9CA8?p!0g0J;p$bAxAk zX-mR#zaGB#cu!zaXt9z1cN@?OjCeK0VK2Sy$_tcCG?y6JZi>~#g+0dQ1~i-pV$+73 zZn90IvQ&BwF%cVG-btVT9=*!9H(bt`#_REH`oO<3od4alQyu8iC#x`BKt_T|&Qw=) zE8cJo=MmlY*-?v^vCD@rMIl4m(%}^z34}R|QzJ*oymS4mQfBM@9KG?`AJDmu;wmMT z3~IEE03^|BE0xqTURdY8wkP;@=wm5>L@g|2J`rLYd6(}xiMY80O+)}=9gx~(y_k_B z;0G8oDt`5=V>NRWN59KD|C;!Aua0DHecR}qBP>>*WH`%Bk6N6m_)KdtbrL4!M=I0n zf+ypQ&9@+Iiz`~i!~0pQ+JV`J&qVlK1d^}0H8S8mN-%9PHSzBO6z*a|7Fd2+6VazA zsxF`BcNLJ3u`yIH98YPB#(da?%zZl%z2^&m{*(2{iF`=ii{Vkx1TT+Szs?Y7e5+dSEAEr==%Og zYv$SCFK6s*zi$LFt6y{}7GrDTJ))JHn#0%g1`{n1)|zH*9}ZRWkgblJWf1y%+Od;EU#%Lh}>>PD{G4TMAGPy#CXV%4up+N zx!~Z6<-={yY81n#jjO=BlD-Vi)Wu*rUu70w)o4UVXSEaB=0r5 zB1!`IwLh$Kuf@0wr;Y7kK_c3E=kX1?)U|ev(^(X_lDz725*jxw3AEO4K~-)Nn@Ox? zard=ky;0OTcgiux>a$-4k8|-OO`rgXgJ#xDcs z+xVM-B{`U&dIePP5$1X+7@;A@He`!)kf5#D;oO{Fr8hZLlbWpox15z&ofE6JN!&6e zka7+RFDYO%dEO^u&YVma@5fdbqElG*;GXuVFk;3;RhrjmvcoIBWnoYlBW*VhJ}_M+ zl#&Jh<4vD5GMZd$CRXMc)iYBa^Nqtd{Sv7UBw5)Xkvk~`L&gntAPRQh;M++B0E%8w ze(u_1QI4qFP=W>=Qc&yh>O1hI9AcdfOnirbn+a#7-e@sOgMyBQ1nf11Gkknp(bAy{d3 zIwwK-AKyhf4OK!tIq_#PmXM86`s_IDrS@HD*GS9n=97;T&)Jz~n?1jZE=58oM(o0H zdGS6!T-Y4RevcfY?2|O{^7{w^ndnW#l}4_6AU|(}%qo|aVn#!#k9!`S<937hIbQzvmkTv;TVE0z}7xOa|OLN<2O!y6E6%f}Yh1V>Kbu@G}-`4(^L9bgX4RFCG$ z5m_x=ULsz7DN9P;MwEjG({0MZ*J;KL@ zbsy1bEcuF&WR5l{!@NbRWoh47WRbWf2m)xi_bX6F&EBggf;=?_*DCv=-7l0t#TqO` z1ToS_6m>qtN(_}9sYE5_1tsp3buKr@+_#i-eiNtlhmi!Xkru8{&aWo#nDTcZ)x|Q0 zx)n<`rS}$&2FgC6gjt)=-bk$@t%KCvyb&*+;93Z;iTc+#IWSHIDgIvp_pGDr!R};>;hT7p3 zZVx9kmpjll>z%7cxkUQ3;`|fwHHFEcbcM;se~FbMNJgk(6l`62&qdK&KSXYfb=&F9 zGivuiU`uA5{;|UtO)-L|FYn5tSV?I;&ngWu6j+pR0zquzJ- z$ojDL2Q zDPa2t(f&75`ww7aXZqhy|Ju$B4F5e~bN|$QXX(zPDtDFTkD$1QH=J{RGJ0 z`3KHnW>R5B1Dz8i_3Fs66B2`mi;sqjkNflcQ$Uh@u_2xjfs_Vz>BIY1Bids`_!oq; ztHfP*<1>(({Vti_X#pSvV<5(-q@cdy!Beye@aQ8l0MCLIX6M_cMItibGKdQ5%QxQe zDc){6h!$>{1c!8Wb%pfWQVeTY3leTb0rQ+QFgAw*dKF-%*klLfD1$?N@_q00}XoXT=Mf(_{uY}g(Csi03!;s1Oa4F3M}_aK*=7zwEt z2wVWr1RjsH1PRuz2*dfP$XgN?8}|BLh|kKrW?%Ijklnac?pDO6P&jA2KKXYY~AXhJyiVSRg=R zMG$i}L1Dj7MNLe%dd;^u)q+Zofth10S&ykDA|JqthozR2#HV{V^`Hy)lJIdhSXRRuR+#a zAFA$iL7VNwZLm%Mc_qhYXENt3Mc);=XCo=r7?sD`BcFSBNrVb%ITYH3JL=1k+E?bI z81+!Q?hDmBje(n;o@u<(8AL9s`Rl6W(uh36yyuRI6ut!RQU1gvuNN@rKT;Z>%EiRA z@!9!Dl_p~z=C?Y&jaN&wk3qJ z&6>JyurI>+#2n8W<|Ts2w44oYAFc_y<`KEw#S8~ZiTo>8GejLW9w_%HB|%Zp_YQFD z9lwolC?#j)YRS_g;?XkXC(}=@4NcCGG{?CWp>xQ{iygOW^gWu@Y446Bw9Wr$XDBnA z?#&PGv8W8pKpj3^u)%tD>$FHTzObXBlh)4BowlD?tCfuSVEAw$Eq7+Rw-sA+pgB@- zbZF;&4*VWXDgWI20v>6-xzl5l%YcWbNy09;?kiO4_#3!vliO`@*2#uAllA7f?_@{r zwq6ozx-dj}O_cmq8#+no3jmU|!q6 zA-ZPOlJSJN-k~O-r~pnf-h7BH8d|n;4r(jCL3{;lrmQ+aaC(jTRF+&nmpN^Rl z#<2&x5?L?Mp^h1$WkiL;z`hRq+ckMpwtk)-as&S^Fl{d>xK!Njg%#fQL1>*%f48m% zwWibaWAC=G{t1+5S!zqIn{;%QTz?Rb9!WeAoX#eK4sIF7sa9~&Tu{YksYxA|Veu_Y z?98hKyC}r9xrLVl3v?x(@UQ^m)Rjpix@K7>R_vuw1+}nBzSE|-!o0_}WJR)TS>h|%sRT$Ki=O&{S;QRzt(6w$eZi??UVRjfi#4m_1M^Et@@HlpB|JG z$=yOV=WRD$T4I;RJNAU2{`0=XfJpPK(1jb5xa>l66r?p3tj8T9yZl>?>ZVL1(MDhE zw^q)pK*2n#G@faw^{D#2<-|``~cqb|?R9ucwB}iqCVo-IT?ebE2r% zN>`Nf{IQh16P!wR1g7|3G#jvc2n#XCF-{#yi-SwNy-Z2X6L8kmWH^m&m^I~Mcuk=U z-)inS<~oMln}|wAA)?&qFd4W;Y}~J=5h_uIsMzoqF6xK zBK7e*O63PeBQ`&yP&m*kCvN0!tdXA$^~*^MqOPVO^t$jghlRp;Z7MN7sVnE!5VBfs3ejb(_y*z719izH5yLuSrY9(>zEhcp-GHa7v&n$vWaAk@}e3MA?X~gQ34@1<)nKZ~t$0SP$ zZuI?rd#DPS&_=OQVWOTAuy8OGsWW6x|DhS$BsT+W5uCP|w)Mik|~ zE9mkxo4AW^*kKo;L<8aA2Jx^8y)gHyMM#@f;Uk;j)fh`*Ul-fCAXvLkHAr?Ke<%(v zuv(CPv`;XV!%tO#P}0GA5x1c*g^BJhFWop@e*IQPUF8r0$-9y2Tx&jG5F_0em5hRI zX6NlPPkvi_OK~e@*1cN(RE+>}bGaF@CF@%@-CrqC@0b1poTR??j)vHltiHK958D2U zwYSjP2f)dYzH+#x0`Bx%G(@wouz*Wws8=R}(_4vjwB=mNEN;x(kJ58TKprG8_j?iPU9?I-( zuRWlux$+ft2wRv0A1$!5t2VX~hHd6XgMs5^n^GxR!crto7cZ{QGBk|fR)R|Ag@I??%t9IMD%N#rE*-qiy#PXs{@ewqh9 zLF@|8DOCNfq@+3DkxWI9FQ6^bdw4z_Z<+c7LC?&|O>jPwN+xa!uY)3{$vEdMXNI48 z+K-IJou}tmG6kv$fqtLUFdK=lbflGuI2vy+tob?+`~zmqSeSUarV_99zskIHAvS`+ z!0v@7pU=cD4KqE6FP2BWDlYJ_H+$hBSu8k^#Bt_q`&m2Qs>^ksnm$Z@fmg&Bx5)?!~F{ z^)@zU&wR4H=yJp1&@4)4t)-Q>)n{yZ>S8n=2NGBK=x4UXOZRpek*#FUX9O>cT_dj0 znrC43deee1**5iZo<|zs4-N_w>V|wmX!?=u)BcL)-ub=_Y8xxIhN*a-6Bg*mA8M*R zkqsaD$g3a$p#UF>sg>PDnLHf_onk_R&Y*vR(WTD*76Xr0hTV*T+PoYm~k>#qJQ z*t*bC@KL2`i`Ecawyo%elW}>iTQRMs^4;Xiv2f8Vra z_RaQl3 z%}5@pBvoW1K&#B{QA=BKQ{`{IQRyRNPLR9n3Us= zoTyCL?zyl3;NP56{^5zaF5((Jkq$aA15$Dyn{JiYH@H}hW-Fr~KiiIWD~mNP7Cb+1 zNEAFewbJFNpVR3WW1H8Y3?}b9i(bL7Az2zOZ2ji}Y{Y`j&zG%Mj1(WG#Q%Jkb%JeJ zwfsI!H5g)Kr!9Mvd!}SYo2q)?E^DcFW|mmFdCfUF_ek(px^t5p=lTedgn+M|^Nc3- zQLvvoIi#(Y)Lf45bZ$Q}7ML0q$@SGNp(o716gs%Qtw%O(9e=al*8op!r6-D1H@13A z)us;xzOIaV!Z6Ensgq^8UQ+np)F?fa3yE6>W!s_jKApIF#&RK_aOM(T&z@?A>6olF zy1M{(E7Oe;J?J}slJ-!%{Jx({rCW|p3O8bedFu%Ag@!5j>T~owiLG{7|0S-z|4A^r zrgcBsKreo0!*h*|?Lzy4#wAuglx5Tf5*S;^vf?FX%a<32U3S}sNJ0( zdYi?_htu2GcKX@$2#f=lW=o(%H`ZU9%_*Pvkj&WU^8k_BF_uB3clZVA%8Pi00Kt$_ z91y$j(bfYAt1I30bIrN78`_*FO|EH?A4#%%re=7dWo-jQ49lUvPlFChZIL~m!v}W( z>*E1vo{n=8dYK&4x0EAbBTgKa!wC6KZO~lsk%ox6j#hP&!_M>BSjSj2vbRz31j7HP8)gPt{YgAKhA8fJ6l;g|GM)z z+Sr*Yq24Xe+&1bSCjH);U9$OQX$eX0!LU2Dq|N1BQh%=?jZmR+f?wXZG1$An7lFPs z$ExdQ8~2l@$52yJF#G5r+V!rs-~{sQdQ}$=`CT4EIYC!Sk_=oLNq&_CIPM)xs#>79 zIE}<3(v7HP3@6cQPyE+GE%nZ>wn3fG%sdVKUPk)Lb$@hm5&+Z(qX4voXuggxBa&~ zV!jku!D8Kkf>tSLZ!UPz!hIy(D&cO%$;0x@beOSaGg(ugF?y+U9aFLNg6kR}8tiVI z|N2MZWg>`j`B7L0?@#ZD_)JHFp@+aBSG1UJ1=kk2z`E4&cU$|ygdwyOlmlA3*z%6? zB$ja*Wv49}FY!#LZ5^2xkM^4>lYw(Om`Y$=UwKNUAW)`+11jOa`&W_A%+qCMPs0th z#%&UR(g@h96NYf~hC#hRFA9!)jx2w@E(=|%*tg@a$2k{#q$hEo!|ZKNoqQErq>ijy z=Vnb|z%pJ0>)(W;VHep;6wDc~{>{PmtzVaI!H6Y4P&d(YzpuAr8`bS(mX*4>JICed z{4Bq(70CavRqwE`=j;eBlgAVHMM3xo{>g6Mb7l_@@NsXqRJ3I~gt7GUpr3^BSn+hJ zPPmGGDGvVj%I_U{>pR_wCv=D$AZ}=ij|Bg=j7-2-VxgzkATh$0R>)tQF{K9gX5)i@ zDc;RQ9V`#mvvg;#O5cBq|I@Q49zGd<1YIg+ejY>Qp7yL;=xC%%#b*>&REsXRa&Z71 z3xn5AW9;;;B-VZD315|G)EquzF8^WYyRoed;DKZVOz=H7Q2jUw<@_wjk8PE8XqM* zg{e%zeBxD^dI9d@oxdYt=zI+4ED8An>^X@R6?c*pP6hr>lka0?p2rG+i|YuhY*tG0 zDxXP>hqY$twWgq|I2*>QFFJJpNB;&Fk1tt(DIZI8Ja-7kl&gcsU{Ns+i zew;NF>K%>JaHst($u68F*n@c8fnB%K94_wqlcsiQB3ZcXYnJ z-k8`HxVe3I|7S&Ces)=ViB;ucZZ?~JrIF+nLI+e$^M+fwwVUvbbWUsu!tFtVpD6Q- z^0G$s$+8&ZChey}evSHMS!NMhd#6kwolV9|Kg^%BwwYBb>mJ@UuOh1wS!P(|ACBRh z@+XBwZOv^)p4xpdeqM*|6>s9u80+fyHPur^EQNKPUft!_Gh;r7lu~{B(MVHG^5HfB z*LFfeNq`$ZCEb~1!~L%#bTh=AW2$)}eRN!_TKVo|Af< zsuvQnQv!oFC>l9V9GHxqLTSPwtPZJ}uYzl9^=j{f?3V43PH=`!MY43M zE3U+vaHs<%T{P%CjQzF`4)>W0K?L)8o?=Zv_!@4mZ3F``aUA)$YH*9&QRAbwqn@z& z-!1eVu1N!+Cf8%>-?Y?6n#j&nXX|QI?ETv=9=40GDmS6gJC5AUpVORW1x||2S&M=f ziDxpe`4ve=@tzZo(=K!gVV8S2mX!B}NI+BduC2yjgStB$Gj?>#8R}O8rgxPHg88eA zm)KY(QFIQ}D(-vTjA7>VxmZ|o5CbHdGhW@zi!(B~t4dV4>koF_^Rcx#VJq+6yD7K4 znyIz!)q>kqTwpO&eKNu{9zKP3R7u4>Qer8k`mS6&RYEV5{OdyE9&%)fildnjrcdfN~p=`od z(5Q)b@QaWjIZ^lNTs&WIL0y$r6@1tD|bxADmWCsP;IX!~Xi# zS3RX`b~C>$9ar~YuSK}NP`E9T`sRoKT?IMvd=sXjMcL-Ujb`QjSR5Hct{>?jy|n%9 z-radypOCHFIF?AE1eZbM`|sdiK8&ivZE#AQUuQ@2ndM=i#3yObX*h9!(}>+9K}JScq#L*MIZi2Da^t2O-^D=s_4w!=t7;v-yO(oO+Bs5CHWglCNQxHI5rfJ6 zIbYp0a?V-6xn9IqoW%0l1)58vCEsHsk>RVNCQ+j0?eVL-1xi3OMu)IWB}F@x`sVm# zqzsw&X7ox@u|#v;j3pAzP`M<#lH;z7@4?Zz@*wbHbIT4>N_TQ3|FW9$V(ozx{qAc} zqB+%gu%pYM#=p-w3a-XuWP3b5;;B=d^mobXL{h=eH}Ll9dCXuH;o=7I=K2qHHo7+# z)ZhNHhpCTv4*_pmX^-r^?Ut*y-im7tREEUVaapAs zXKlKe-uO(q^c+^xMPb@lP~zX+#+>g=4f2xiB@KE7I##-9E!KXhMM$n>x!TQMuS~Ms zcQ{ZzrlB3y(^d5QLyAu2Lr&FGa*#>{ zttQXw{LLTXns7pI$kxsAtRkpEyVp48RImRU6nHzP)fIM&=?Qu4!h+SND3{4YHHsCm z2e?GSH3-By5UlLL)<+XZj?hlgCMW?XYW!Ok&$jgv>Q$Tusv=TL(X|}wPi6$n2Odw=5Oj7h(#BZWpFIX()(egrV(yFME zv7%=AK6B4G`SIF*$$H`8Ab<0%{%Bu0>At>!LI~@QuO&t`flv}5V(ix^6i3f1GBT!v z0t5{D|1;pQ5Mmi|51>C_67SObxo7QB6`%c(lDPE|Vd0fR_3gRwP?7V_&Y%H*fO-?^ z^(i$FAqk;@$3Fjr-^YPb2zBnt`@``4l1enGGaa_Dyw;WGf>?L#&O;>@$c z$dRER0J{kwQNJ63JZ%{c3F@B-0ugs_wgA2Sw><-2Pv2;eF`rkkbuD5KtCd4|>NLSVSU`v1rG4;DvJjv=5*z?o-Pd!Kf5o$Fi}@Q7 z&h6_z%c~5nTB-tq$?}k(un_`@U*&RG*FX&;I$WxUGsZe`6m~^BJ3%10P%f`apo4uFToZRKgGL*^*{PC46yvbB)=#r>A?KkK=gT^Wr=(Gag4U~d!l#s7^;H+d~8WL zy+A9VG=OWsz*pQ`K9Md1V0ZupefOmY?uR=*pbtQ?i4Rg6#L*vb{1XuqWB~6s-MZ)1 z>jOw3%zEwx%+}2GlGDH=rXZyb zkEtkw6Oz(GLB5)y`api#q2>LwS`y;P|HppYxBX|od3|N`?d)vrLVPl(&_R=HV*20k zVYdKF_+L8c?f%#m|G2*OKz*tu{iF{6bP%e@sjcjocJDm@48RKX>eTr`u}^s#+P|*| z>@@@4c9fY{bF-HM1om>mf0`}z30ryOg*o>dX<*SI0tbI~?Hj2!A;6cyj4rdkDUK0r z@2wWJ_r%k9q2Yd>Ec-kH_ztC5hA2D68f3R-z{VN5cy3{Y*?}iih7^HzbGhKLXg2%PmbMv!MF@8wP-6P!G zpp1oSyOUZFcz`SU6M%_U=^4Kc9jhJbnJr>&pqV_o@kNhZ?2Ie^<{dlFd%4PE}yi zWdOkP-ogX%lNMWpv8*S%y8VxtT62SQ9_l4t*j8zsd?T@;BX~PjF$TPSzI9O( zr@5IIQ|H%`C>zwXo?1j2_@XnDyA@;c%yFv&-n2wg4e-$;UUK{5XCji^K>9BGSA`zs;wtI<%W%i4b3$8cp@9zr;cic& z#md$tBXVYc-1XF=91KqO(#=-$vKlM#QT+B4CJ=r!lgH@7U10smZ=7Y1tsK0t6KU6@ zWQ|WLNo;g^AwK9vZrp@I>LDA}Jt&}29-eRRUp{NEI|;zN+{9ep(6UH*OP^Nq&CyRw z<Z28&*Mr4}{Kz4eLr25J zkj!F*k|Yc}C6-l0gPHwn2T*T5GWW_j_x5?DE0bT}jnDzQn(z=X`B%P)p6EZXx*e4VY|72=1Fdk_-E?-oKoa$Uf7Ll50e&NF&^ge81{RJqCH3a> zp|TynR0l6Mn9}~ifa+Dgfgs(gDlY6sI5&VlYVnhA(AfwsN;^K(Z)jVMG zy;x@L=$vsSwSvrf@74z{A98`qGsVGbd!0L9=uLMUi+x@%ZdR#Yuu^KNZesqG<8&S^ z@jCWMBcA$|he36VuefKz+N~I+c{V>=D@&}oZ)D7&yV5FpXTqH0EVnY2;^%pH9g^)w zXflh|B#a}MFx(~dqb&k5Jrmox5F!7xKB&Lf+xLZ znfl9-fCz%OLf};9$iKoG?NeT19m`C&l*YuL`@o9IrzR-uKb(4UE;mS>yU1KRby<5Vi)BkCb%br7k_4JtR1g}I<XY8vmiP@ltAXlIq2?uPNbl+>>fUcvK> zAD@f-p0#T8RqRemyQ7nTj(;fs^4v$xU4F^!XJQ;!n3?)iwtV@)N_)i=`7v&3Z4#!i z3ZXU0if0zLDPsVvu!houX$Sw!u`I2eedm`5XFGZmsp{4gb z(Ta-)7mE~n>D6=9S+;)3h~Ph+aR;)=e@Kp%hza6+u|axL3Gp$7h+e(o&bY1Epblem zIkll5MlWJ+gr*LXaU0caQ6Cv$GI`a8h2k^BZJ3{Z8>c^DE5;X8kTz8u9#OzXioZh> zX{Z3aEcNbm@?{RKQMje)QHPM7QP++sZUH_yzoWkGd6pb5;mb@^GvQttetD*Ho$O8@ z=v#z6bzM!0Acvmf8>g+{D~OfSZXKVZjDY@`N^2;YY%iOO+z+7Evw5?}lcW_nNw zCai82G`n|e@Z7ut8O3e!ZsJVuN^L^T3YeMrOdRq}0Y|(;D??E?4dXzW?5d{*cur>Nm)(Iou{yI2|C5*_;}uY5QJNi1`fA(M(48F@x;Z%FO9c#2Loz zX~`o$I3bqBc#{7ZMk78wEp2`gH_RSj1#U?NJzKtUh&@}O|50kM`q<-i34X%8M8O?T z=qtA3KG|Buahe9ZxJGC{3EZ9U ziu+3LE(g1vfYF>Z4Yq>Bb4*98s?t~$UELvBscimrSLy~{RQK~%bpzGGv46)03oSyQPJS=Itx^1zrwx1 z)}dsYjL+WbuE^5Y>2ImXi+?UOP>8dvroc%(-Hl?> zxKJ^(A}~3sV~!@cL2CE?dvPzUaHP%Snldn$2XhaUQ(EW}9kE(2#j_l8h~E}SzT!no zwraM}jSoX~e4!n|cL_w!UY#s@FDk#MKt{tXqlrsmya4acP$#BM zJ=(v;V5!U64D&I+sgs26E*^EKqkUlxqH1W-(or|DTl05ED;B=sAPKW3pYVi~5EdOr znbIw;)d*7fIU)>!_sk(tFii;8I3Ff;k$ac;ARGl?xE1k7t(@BDWq&#|j*pABZnb%P z)K!_K(a+Rh1Z-AKBRaM#@4i25Cw^u8vmWYFHT%7vJ;LRs73$WFJFWuz+33N%u5zQ- zZ+^|*Ff$GF5>=0|70Q+RhgLcIIx8fWtyI&pGBWohv!xRzO2BdQ{R|USm1z?2+ok2~ zGv%PI0Xt{7sL*>alJGXxTug@+n3>-$eXb%j)Fp0RJt}1jnDYItWKaYD09Vm4oHz`% zf~;93MIiMZrR9^ejbSn-$~cT8L5PhaMy;mhso+tkgRzI_YDfw{e9aP``>edx%30lr zw$4psSvP-ps3&-n#kNIebj4Gf7a&CEj2-wP;Is;CC67 znralVbJIQkyCv9Wdu}0*&rl;fG#vbx=K4IX{gI)kFY#lxCwr`VUAGAH!`%JI@nvr@ z1*>jR?(4wyB*xWun>+jV9sZGeDkr(plt!%UtvaNMoN6V$ZTMLi-+8{^7;R3n`}L9| zKyOv&)FPk28uWp2{BAkA*(TEueAOJ#SJUJHSx-PouM{9rlM4B_@m^@+5$n9oZ4E>#V!mwwG zVwvr@3aAa~D38hDX(ICquLMnRnlOmA#`_^+YDVDjMIAeJUGsWScTyL^?eQkwD$Sz1yA2MUX%8y&^W zUxhg}@a=h9zc%{iNSK`dSt^pO?Bk%AWyWLLcD0bgsO~;PDT)LLc-N*{(h|!2_&O5^ z!Womb}i?W8f1RB21F5vGPz?Piv=AeL82fDAF(IO^}Qgz8y;I3i6mu zsEb~UnMup+skG>eh`h4x5pcJwXinmYzo%n(*=3xkucA_=ayz)Ejh<2~8WT>Mp|%63i$ z#Sja<>-J-#WZ}GWT`#`bA?Pr3JJMVX)8MWW`EDnn!WVcej%vvK|lRLN*lcLnqbs%+7=~Y`LXa>M}S7xSna#{p-i7oqGT|%sY zmq}+liK#tj3SYCxMD5`+hcJQQ`wA)2@>n^rV#@{5115=~$Y&KP6D$hj1E%?FEl5}6 zAQ)qENnkA;?T8#sV3;KRlKp3np+1oVBWQ^KdOyHt}1yu{t5Zw3Op$6;<733{0!RlS#DdiZ>f z+FquU{`6c+rP2vxYO_`%iC*inKQZy^cRnQLygl}w&me=uG->))!eDrgYRCt*V#!x# z)E`&~%*!;I>wTo1da$HsP~l$aNZa@eg%!GygA((pWj04noE_G8g zvI@CH`*gE+r!Fn`s}R79`yQS-m*8}fR~!7~I|0cpY!G%R?WU#*`CN*9=&4yBgxj&9 zoB^n2D9rq{??qpeZ`qJ#TE`3RXKzr-K>_NXhzoxYalL`o)%=SH`{3dS*KQ5VKb&se z+RqbbE{NX9iwdB^2XIyRolNo_@PwW_$WirhOA9PQ&T)R{XlcMwa#?f+})p6yJ>29!IHfXtqb`L7f#NrYe+W7MBnQL~SrK1fp&;b~D zi;ducxCiecaGzL%)RI>hN-|m%S4`Y6Z6iFFM4^~XI1zow%RTSrr;cRrJ6`v!5*Hc= zy3JOwwrhfIXu+5pV8%9w#!} z1aM9&%hF4Z39a&0EHL`T<7SylsH9Ac1L@HSQ%fHEx;qQ_3m!6IWJE>fvqzZ1FILH0{CvLv z-gB}{z;Ov0;>W@3n;FZNZBm_jDHI*GWodJw-(?bizuSX_Jo}A04Ki@^O{Vk8$4wbE zHuaF1u%^7Bk`@uwAg$u+)&`Yl?Njy>44rIM7^R7Jae@-d%gBsaObjJlF~nQja*d-Y z+INfQUT;%SM{za^=6`K@_Si8f4)??bu#(F5{fQA?hBw%AMv-`W?-pneFTTZ7=sh8s z&Q7rNtkH={(K2XWI+U&po(+v2S}7)70Jg&K-C#=#ifh80x;+EQa4mhPhX3nUFP#wNVV_=d@g_g8 z6#6JM_*{ui&FPp6MbzDNU42dQ8pV1{*$62m72?`>98EZUC;zfLES?bA_e@}o4RmKM z2x~Lm2hkl=qLP~H^Ng8__z;-vUK6>~6aC;}jYe;J2xh1Yh$85@^hi<2u|xUQPaoqw zI;K-ovi@hYw*t4YZr#5c-PbzxgcbE{m5Hbd&*H5!qfONIqT}f(ro-&x;bo`@XlW9} zY01{4(x&|DOGR$&CTmHFrj6O`o?4i3dd#<8gXtdp>`vuR@V{R{H&ZNGZRcFGCj1fO zl}I<9>S5Gja9j}_9w%nYohUVvZ4ZrxosD+yDi7ysw0SfOT7e!%0pHYue2QCQ0W*P3?WAH1%S~+6R+1wI$9qoCvXDQ8S z3&wBubJhIHW6Wm1|4^@Nm+fD8+>aK9Gsf)WMH51OWXSH2fS}2KGladeG_f;LDEzJX zHw^=GjgrFq2%Jm<)3F-lVx96&K?I!i z?@ftEf?B85=ymRas7~rXn`e5SIoQav-L&X*fBkZM&M-UuEw%>oD(fyJY;@y+sIEmT zu>*BeoxZkEx#mp76nlem3P1`4r_R08gHelzg-f0o^heLS7j=$wo@6R(!GikNG>&oK|*7F}%u;wA&?&>YeG(P&pT`j7l zNWi5DCj3}CDrXGQCtJh2vV*+6`W32XX(!(bP})~2WYo#Nm8!w)!no-BTG3|ZPD~r~ zbSlr6U&k1FG)77us*_pvC2q5v2nf#4_}v{m#@tjuOiB{{2TKRN;ER*P&;Z`>Sj6uT zblfUX8v>k;7cHsPiyJ2*O#G1rqeTFcnO%%ii+~(MG2b=X%j_}*{c#~q5P_O+#rb^S z^q8~|W>qLDcVeJ03l6tQ>3Q|Lk*wQQnoLFA;>ff6aW%hhV`~e2WhYh?|^ld9@ zly&H*M;HYP_@8z~G8pDEmsx6oDMQK076mNsx!#DFe?LC)2bb<6c=P{tO3GKhf6>0| zw?#W2aSiQq8CO&HF*y#AS99F(;&5p-%+4j@2UF4ed=d0FOeFpnq>=f5N*Wm$*#9rm z$jtVCp8jJ3{(n-N&S1((H_voi6kCiGghZ3GL_}NB;!v;y0HMiV24}ZlM5Lq%R1~D) zMU6$lgj-v*6t@HiSr0$iUMuZxQ<^*8H`_DSZhSK%eI-`CEQ#Ak*!*6FLu{syV?c)H$H~%1 zq}YVR+kcx?h8BRUx4&mr|1b+$KV2DAiVqyWc96n@4B7n``2gT1ETP6Z`^GW zNhZOLhHt=~-dTum)vzCFsrzhD5vQl+*~g<#)%+qENKga2UF~*rr#SnOa_o0{Aha+y zho4svS+D7O#q856xRhn?=s|6W-(#oXBZ0&HNrXj0hJfVN0E;k&2%pK@vm0RV8WPho3?eF|^Z}z z_6P+L=vE>e5&#N1hrs9{;Qzz7a2jafw*mMfSB10v0}%f_L-&#SIbS`X2WtI}48pmc z(O!rAZIutG{)yYfB>@i1BLM!{QT}HF`We0cGXd!ne%yq{uVrO>wzPY`e!_4rVV&;3 zVfYi5@j}r3(Sd0}Uj2j!gnq(RVJtzM+`jERIo-Z~6V}Jv`C&HF729dtL?1digTVNtnX810s`iLMi z^*B*bk-+@p#cSD+AZ5q}kdXXq0Ge^E@-;is;DEP1NK{Tx|C~7j{XhK3Uz!z&07zG% z8CWMLVY|NX)WlN)Fv4qb5>4|S5C7`#ng=@-p?uwcHsB#U@IB#VK2_zNR|sw)`OZ0D z`_s_Lxj@1!2{O`YKcuFM?As|BNzH>g)3LqVdCRVbICSI-)8n}t!86@&96m=%X-dd| z!i8>v;P%L`oBi7u!lX`sVHGZ-Sd2dLhWw0irolHW~?yK6Rrr8qKPIxI6rug^^q;k3Ykt6{nmByS+a$gfwe{ZFo-4oj#M) z_<4Hz+1Smc1O~5b(cF(*6JJ0=?x=7&Wg~S{MlL(4O7|qRkCNW8afP&&B2*ERoCEtHIOAhg;Ut08hQIu0b4|q^>AK4`bBls4CvrmVOHnfM+a9uS-c}t@U zoe}ynp9xv}Z}9=}+}iUlGzDDNY^Jp3O5(c(lO@q}eSs}di;s(oRFQDxU2a#zXZJxBAE$v2?s)ic8AZ!rr5HD>wkE+cqD`Uw ztSB6AE$$t3FiqR!3PV#hl{YY$RQL}Xo~EttG7uLygKol_&y}FpQ8TuEpzR}Q+JNO0 zXx6L=hiC2-F;%DB&%Z9bw9u}x#5CCINwqh?wo#KL2+B{y=>0G0xb`?~vzbTpDO5p1 zU_ehG&gZHFpgk6NQ>sBb)4-mFO+*U|7B)7^b1Qms+XI!bn2ojSHt?kp>&Zyaxx9PJ zJvFAu@UtbhRo!AIASgiu!5D;j2NN)G+QzFW?UKq)MfSQm+&9cFiFmf-7Gn3u{1F#j zYU2o8>1p?ue(Uu#_VDT9=WrZmFAh0uydQ~lo$SBRFUJw~@e^|_=8N~8M^-%O;7PbZ zbgms{jlz$Xou&5mu9u;>8Z3$G!~z3%gZyHqdxxu4f5{4`{Vn4}@FgrhCi-&BW zQ2w@LH=Q6ajbFcjN)yMo<1q6^9AHaR^gz9*bvz>cwgkx^*A!|yQDL#6i_VdqPD0H= zFN!OWJI70&Aje!lZ`$AazZg5G7}0_N+qP}nwr$(EZQHi(+qP}nwr$&X_uS0nP4beN zWWMX`{8UnB?Y;I_vl&&VQ)}tRBEdg>3mRhB{+GC09bF9}2i{-!D-(` zMra+j?3A}*Wr$KQ+N>G&_^l^{E;1@eV!vTkoZNeuFNB85U4B?}m-^qAt)!im(NhUF zvBK2&G|Pma1m20G0ySK~Xb0YlYjP`(7W@F@RyG0qrcpl{eXljgAI4}Fa*b@M9Pg#a zoV&2s{swj9pGfgOnHhnEIy_~I!l$OCgW`bINkbQ@03`)I{>C_rOs~uo4c#f+evG z>Ub9a*3#l;+9ICyJZvKn+bjXNbw~LL`1;04Po)kDBN5OOhES3dfI?l2Thi-JbhTL} z)#eXW?5GFilNA%(9K49RNH3f?v>q{z!bthO5)d%amPUEx)=bU@d|U!*Nr+0?_v)Uu zJdZ*`c9~>WT-_57F}2$J&L&rGMcz z+vx41;pO>0&d-UW( z9E+3p4=N6Qsg4&}`lGYrt3o5*YQ};dIvK0JX*hG)yQ{dXg=7Qqk$T>J#f zq>Q_4Bupo;Xnj~4?ukf@4gri<2*>PGLL0Vd;l&5=_9v1O4MBQ3pMYgRi8Iek<-6pH zhffFsJ6d**4e-p6yK+!{gAD$*dzMBoObT3<{VARet=}U#Lbh8HIjlCrx5juo6da?W zCFXjk^y1B>pZ)_sB)96y7!7{zR=2BFDdGk$7=oCpTMB-2C4XXnaFZ-NiXRg3*S%{z zD4r)U44N;F$*#1##z^$;+Ry zd1|wR0qDy;0M_;VaHYy&g%sXWBAl=sE1Ic z^jwF-ME%QUscjFa(;`XJ-PIeSyulQWySdjJ=kBe}Ejk!bc}%P#`^u)s5HK3e#1>Og z(HR+G1IRlWJ#k4dtImzO_RS-YAdMQ{Z&$*;LNQZ7MC@MVu@lxN_tVLy-X3QhAoixz zcVKk^fnJQk1>N2*9NfgkK>i~`RPwHC&q&f(?=wpSWj z9<>2=7Tc|3s|oVEyf}Bb)dT-Y>K!O{SSCBhVfIL7*vWm9_&soZ#8Mro~A{1bm69UDaoC@BbS}bj+cuXOIbmUMl+^s8NgxSDQe+` zaB3N6*C4`0f4|Ze|J6EPW3|4+sh0U7b~3~n&w=OoL8`kIelu#`TLvlJwFEZ!Fujma z#kMcY&h8gc6o^)L&&-m`%@K4S=5ndxLv+Qb^hb^Bj+4x`Xj1&ct%w%-d}zvARo+mJ znJR$a;5k;S#wKpT4n#dN^udk4$+^wOzzQGu)I2iLUyt98{%UczEx%5T678zle8I!l}v8%1%mc^*e^${-R}U2wEM%*RZl zr%>Xh`K@sCc(Hz+s9&+c=MWp_g6!PBUC3G^u(XS4C+m&XA&R4-00@8mKFwQqL>I`1 z1o!fN=8Q_xLd3)xA9zU(1XjdxP&6~m)=?4{J19X7MI%n6x7(XeHb@2Ouc(jhW(GjE zrsaX?F4|205#eFC=}J`6;^rn0Bb4E%Y$+)VLr;xQftXPj2Gh6cw-wE}HXHnYqntF( z_xl+TGF%mwP5i8Md;xc13itGIOZl2fyacl+IO zfA)?->Rdg=j-vm$Oo0T?Sd%T`k%)EwG-7QU)}nfzlrfJbRr(OqIA~!d+3acf9dPyc z3Zb$^7@MSrE!6P=gGJ|In$QT1bovLZ@-Z3XcxVakV&IGaO!7)fyS|GN9PfStt~7qF zN`qh6RYQ;-V}DSwH7>qd$qH7K*-N&Lbaj2Ys3fq^F4meT^N>*z@Rc!b`S2{AkG$OW zb+3m*Gz2;AJ81J>sHJefvr!A2t7Q?6e=k z#od%q+D+r_&ihQ)?Gu<@5pit=+&Z7_@YnKTz(Hmjakj4{s$hoCEk}B5jfr+!qZ2q1 z%z%3v@sxJ#blbydDkGW;#CLq-cDpaLz5r>WP~IMmYZRLrSHB9||I1zr^hdug+)IE_2C{H--4P8S*P9GDyf-a0xq zcd#}0x>pGUhUGHda4tzO-c;=p`oyNfP?Zi zL7a&X7<1(ZR!Up$sdDz#eHUzkZ>qdre|q4e0*Gp~E#!{y8W1VqjfgK;`};f@F9<{B zv?P9!V`ptpWWun~^4h9lzjlc0PlP6Mq7jRjx9Y8XkKS6F60BN=&2WBxaGm;Q7q88yrEYxcZi;e3Vdi0*^ULXa zJ@OGz^l}L7s%(Sq?*5S(lnL8Cual@42TM1$T3Scck+s2}4ppQdMdbQwwUM>5vSc5i zqJJ#v^iv?1(+x`bMRMt_NgohQSgt2%I;X9G#YO1w0AB%;0nyr|CpqPa+=%aVX(P-i ze?)VjfP62%*pTslx}N`i)(*YV-e~Uak7>x__Zx}wh6?TbBDY2&P7A22*DOP&_u+AY zTZU&ZT1l~vXPYmq$9>jCS={`M!!uK{SOdx=>$<_>WM;KE?lG6dNsMX_M$hdhy`o6_ zU%J@R{+rIT{6`-d^F7z*<><3Qm)&|q_aEWnM$Je-`IKZF3 zQJj&0s+}`QHc7fd@}YLd38jMhK-EE@ZQGP1`fyw}JLK(%5GulF5I^A`+M? z2ZqPCJR~kWhPDZJ@Vx!UY|{zhiLxyB4|Y_KTM~EPj<|n)-h6bwku{0|2KzRcY8EdC zB@Zbgy;vF5LQ`p_qq@(FLESbWy^in}f`)@&)?ioA-&62W*;4UUKOuB|Kv zOMOZ&c5^CrxoQqR)w+$64^%CR-WsEksTU_qa?>JSG3>o73mqf+1;gaEimrgr7t541 ziaj_>DVt4BJ4jeV$rX$~#Ki@RJX#N-{DyBR&xsn9^BtN2jo0BlzL%~b*9As);CW}p z8veBW`+J9X*jn~*wOAs|eB=a6_PysRy}vOo_l;<{8;HWTLXRVa(#Z1Fl>hT6g6#d! z-DU87?Uc?wIqU1(Us4mO0kjF+6S^X85z!5w5q;+1xzfVTRzz^r61Y#cew%x_yF|D& zdks74W%;J5-4=k{!>fgN%UwVxy~-i?@WO@5)L9iU zBR`ng5ziXDDiaMt;z9EuX0o13f`*e|VXIRD(m#ZudmUL94z6fIc&yH)BwJeCWo3!Q zMyyO}tWb3pd|qL6iZ01Ib&J z{CS`rQ#``jc%zP=eZp*^A zrzgK*nOsg)hc-8EV))T6IM+x)Sw|YS!pP>aJPF2)=ZVmq(;DZBnwz5;LP)?7pDu(z zXiPTF($dm&&#fyyGnefOEI%;{)W&3`@G7@mngYaWGV&>7sMP7t6Q-g1Yn&H~4NA0l zL%3xUcdLpIn64t~4J`r;3eBN7Sk8u+s^eM3Wqhh^7*jEi$t7F%NOTW@2>_$>$JCex z6As`41aBOpt)D4@rwpAN`>v^V_k#DZPzeIj`c9g$J(HM1N!)O=hfjE8CuXa7e~$kH zf07}g(r`D32rB%6$7=f6+CwR>b0R5z&%&)P6n){x*u`Ds+%GFDy+*;!rZ70^Phj4` zMMC}U2>zK~((`(RRhfduO+dwZ;3y(F-*B9;-vb2yy`|C4xqEl)dwGVvm(tjKo#fLo zCqVoDUp@Z6^*1;7nj?~r^=62PH7!;%6%*QBoqLe`&yC&Kso;$fe7Gc*%P*oK3*45~ z7t2FaB8{UT{Eb?TtI-Y?9#)tZ430aFLy6RG(U;I!Z%K&zh|v@;5{C11>#D7_Eac@`2o=qk5YH&5(C^z>x zzCjDXbU54Y8rRJnQ2aWh8B^a^`xOT5NsFF@!>tN@*Khso#Zg+NNeA>~d>;gi$4WtxdPxT@@*JoricW9SUP1{kGeuY0x1Xp- zC|r(ujwefK6nsR6XjGY|1Q0VslU)fsiwa)GNL+Y`9K6Wnun~xsJ;~Bnxf&-<&b7xyfJ5+e-3#gvEN&-wY*)uT4!7PCe=HHNLGpgv?Fb8-_w9vNcPdFbc-by z%)ryvkq9jtG-&a(J>OYh)Ee?0rxpCttCDWa3H3|Ek?`37$-&_!l}v5G;R{icgeYz! zdPLNY1sr=N7DWp#DRl!3`r0UP>)1Q zJ)-tNoTsDW(E z_OfV^ObECA{=e-!c9#E#_x@Au$@IT&=lvJZs|l8pkX|JyM-p};mT;3CV_@#)HLthI0cq*BSQlOA^iCuE`eM|Zy~_?0ZSwpU?QL2!9)B5q0yoe zkP+`69+1G=je_IN&5APv0q8MSK=px!1?F=UAof=K0D%8;Kg}8s7qTBEq+)3h=K@z z(SZf@OSqvx>;nsP0RIC;6nzTH@)(yfs386bfAs_qaDs+bgLniPIK~j+-?lj+^2#k? z0QSMZxbeWEL-Xq}3}9e>tVH7VSl*O-g8a*fx3}{lXhN^l{jeBVP@?%=@#Fc~FGB@9 z3BP{mvh*Rs7i$R#IFiCSfldN~EUmsm_Jscx{r)|B03`FOl9EUe0G z7(mwj{>9zO7IRtu$zT}l{9zGlc@+WfR&RIhT-`kc|3b9Q`t9}Jst?Vb#nz;6T=OIU za+j8bK7qYKMvMam820U7@~NAZ{Vxm>-g z0|5C=J0Rc5(*cv{HY@n|`H9#7{|(g?`b|^AhYHx=`sJ>S0o{+ui?kn!(vVOQ!l1Y( zhZ{5>=*mYR#z4Dz5NGl`IO(kd3nR=ElYqHeZ3Wc>1^H2jyQQzg{`m~(zkG0mifo9b|*w-yjEbmL20>R{!01z02L$P@(R8{RaBnzxwauH7;?Qu~sS? z-N9(Pdg(bfQQ?g@kf}otF{f|sXFAh!SkDSh{whqcMOZ#`u-NZMv!0iq_j#??m!rP*kJ&&r&)kcTgy+)t>?>2t#yA#-E+;cV6*@ zbXI#qnJq?YDrs4Up|7fn>)$@cxkysWb%;70hW8p2Vkkfd6A8H=&wu*s#c~?UV7zSBWm^qgi73Mvy)V1;G67Tu=w0}7B5)34aF88b8_JFs7kegMBl5dx-3iHi?@>(u#&q0zIolJbb9e3}Q9E(iR1W1IA?bN0 z{bh<$=kIpveVw^#qT9@A<)>@&3!|k@xA1WfVYTccG4Txh^yHckr&CVqcGdK*}a&|wjOXXbjJT-ks!%qwQwc)w6rMZ+!c7r%)b?`jAG#CHtevJ&0FfDbyC3Zb-mGsc`oBN7k^ z+jjbQDeT{y1veC7*>Hw8d7+GK(Mir{K7C5bKJS8v!{KU;gBu;FY=1doN3OUyQKkwz z(Ns8$&I$g+TE0DcoYjeqIXOH?yd-Do4~nwVn#z3iV^_TvKi;ntmc8=U(m3LAq2Im9 z&V$VorpH2=Ku=2Au#nR>Ey#F_2d0ynb-|2)I*alqh|ciddvWw718nTV{Ib``sSf=0 zY8!dXv>fSDRr+IPP+?9wd%qM;cqYkZn>i#lC^FZR&#rq+(O;i(dm>uMi;u6o=i(ke zP_~Eh)8Te+o8fLyK|eUFnL7fa#J8qtg%VROF*HE%X44(aR5D`0Zdls_Lr2D&d&Lea zI?>#!9WzHc^hN4g}zu-1Lu=hq&pU_4@2+WAaW$Xw9g5HiiI+(UiXM zQTliuEvsA3+M^J9UyY|f6|%DtR6G(5+kBq=%h~|?kV;R?ww5O5D(V5~60KTdR>SuuXq(2DuiRb>x^C%NNpX>sikt3jU-M@kCN}#bWvMF>oE%wMXT<3v zH==1I<6N*^q-Yj}NpBr*ttPovUv|bs#*@5bU!HYf3KsFG)^``JU0Gs3P!1o4wQK1u zsE%l^+f{cr(i(mChPJ5eQ<^j!IP{Q~-zZmq)zDtOLP0zTuVuNACpDPZ3Ef85heesj zCrMn6l)27t6&j6f=X;lWo&1BW@6RnkEubj&%b?9_BIO2QbihFwfgsk!Gad$zS(#CjyDS zX7ZwN$qoT~vtp?n-5e%ZrW~FUugkmJ)4z^d0=t!Q0yBruaoDbO`_v(_z3uF1MUGdS zgxt2|7qDe+md?zX~gXcuoMsPcil>4VjY6Hj3fad_hvyiU3rT0OO`ciF^k z@@ML{8I&KO%uQR1nzVarEi!=%;@7;SM;Fc0MxEVX1agPni$8mLS*=D2Fd@DEkkxDHt77ltpC~VUXdH%PE5AJ7|5K0w ziI&2*j>`5$VJvNU|9z`+dOhu-+z5Jcv`~;sJ*ZJNSK+RJt-V}gOxKUI-7fk24sMd6 z6`gspBbX;N+rwTa=qy5QZLlZYBR9D9I}YRe?*R$CkuTc-x6j6}& zR}8|Z*R%Eo8Y~##+CQ}`i}SLMXgOKUYKm*>O2y-sy~a)vJr1`5)cWI}8xmC16$(Lo z!sI$)nSa*`(nf6zZKD0NLaQnt^ouerv~L_qjH1X~)+d@Zv|vDHlFWuZxL7N{hMUoB zoc^@IdYNa?YkalGb)H#A#Bi`D9l3=P=Z7W+lo-RmzViY8xc~ z3r0;pV#E1(P9}{2d64tesD$}=676)7vBC>M;lSX5f{8geMR{T2bMkgM+j3~L;`#j> z)-|LsnJY~k`btJh5mxvjb)2Xptl`H3rlq;?MsbGi+WW&!j=pxSnB+@U2A`w>8S6gq zXVHz-T--zERQ6@165%CxFKFq~OlR7bg}Kisx^-bJT0J+d&^r^@=s-TbLSB~xDbqRM zkpIC8d#vbUqEcCe?@I89d(&6}8U9gDU4+SN2;D`t>{ zV0saS%tEA@Z2cJp$84j-IGinz*+C551+!!Q*+SED&}yv7lka$3%*a7uUT+x+qb&)2e#GA@pv`2@yhkMvE3{*()lt z!qOIrK1}><6)sJV_*Z&#Q9aj-zHtTL81dos`zElZ3am*MV z=Vq@+4rtnb`sk#BsykSM0)A6L80S^z76*-gnH~X& z7*H8YKSA78cnXm)`vq-oanyENPAA*f48s>UVmk`gxr(R#jrVF4&wB4?Y){76zP{p${mZU&PD zE76``{jE{!iXcDTNif0EjUj*|d*a@B%5;Ln`0L8IWb?_pR})xNdkAN}|L7PQl?}bX z-bnd{HRAoTiP-cOChni z5s%9_qpvGl7yg;}04+~nxR1?xt*|yOyB{eS1f#T>=`KC$)~tUr3T6Kyx%N)>OFJFr zSma84DAYM@h=1!mS{)1#oGZ*DFqVCG^#;b=Ixyj-phs zggx1qZ?G=#z6A)vIP?z^h5YiIn z8k{nc>F#bR0XTn(RzAU36%XE0>*Pp2fp?3dXo?AmRL_ac@n_t;h-i?$^PX=zb?S;R z0~)982cvBQdqF#-y`Z0RCE3}!62s|lVb>Wr?@4^3A~X*2ZaQ3Du8TW2hAgI`S)O6D zSKFtFX|u^4Mu3E5RE2(UC1u9Upe|aNhU&<5Y#iz_>51zoYBuD?HXRp1=FUrDe$bFn z?=e8vkaVZE3?@o-6l~Q1d^%pL>9F$|TXl}e7ovG^+1Dvba6(TY|5H74*)V4#nM{p; zm{)?D05>cZc5GohlM9?j2V4$@hO_u(m_5Hh)R18i>I5=7;1TjN;uD+u;qd3L?|82k zq)w#SfGf8j?uP+NPfd>MM#5;@?5KV$F2r>^*9wWIeA0^0>Iji|)-}%te;JMU%#+X1 zjLS($TI0jThV;|Qn~Ghq48b~bkC|+C0T40%VYptr+hrgdU2Q_|qs*LI*WqTC{jG%J z%g9Z~>2slg*Cf}L<&b{svlzp>7)$gwt5sn1yD(*k!OVVhEPH$X`?7khs0Zt1&wI}f zx*U`7LLP@n&yS(k(R+5oOgUhrtSVyTMY1`)xIJ$^IBo|B9laM>n~XUeAC+Nr@;D;@ zs^^|Fp7M*#4*GLZRyuo(=T9;$C8!p$%--NuXNBij6awB#3H$7J4c)3pA|4L~R0xk$ zj~_+e=mbPg*Vp-hV*2TXg-;1-!`(2cm?64SECfO#UOZ~Wrs=~W{I#&lxjP2A6ow7PuJa|@m8hfD0Qg}dtW1;+b|&}`L)U;7He)o`N~>3t+!HV2t8;+ zXFVp!${9=VC`1WIKrBgHX|CPT9T!nedZoD zUsj$`jUGV%k=RrWKHsMtUkd4V)+m;aB*!OSlkPZD2O~=)4XE)F_IzfOvuF$vF)wi; z^|UoNH1@_8jS)T~&Qyy{Uf{|!^|a#%ur%e)El7xHjori|$e9>fZ3A%XZKvVc9_Wmc z{EhP>BAPr7c1tO#j<@48dvpo09Bg4~Ob?>&QAxvyML>b`i_7J>S#G7w;^1CcDNB`OqntMcUL zVj|Mb5R!PJZXJ6NQus6vBxGjf@+=*1jO+H(Ak?SU(2mPx09CN^^L=>pbzT#+n?Xw? zVEYS_1()I=HN?sUlvhfBzR;h0wNqH!vOfkZ}nbL07&8%)*F zU1r4d8Z>bLIeltx)gR_4xdem>K&k>64b!o8G^m!|g%JP};`Z}B=35X6v{MQ-u8IX< z3}r1YF;5+%MEcT8mA_AD*Tt05t1@8Cb-~8%rh!ji>OhIjJ952ZMQF!MU0&7h4H~yc z?+D7cPOfhp(W_D`-ceGiCFocd+h&Eu`a0?dkIc@o5lp~$f0(j8x(JSv6aund=7dP9 zY&j*Vj`%)IhXvE{`t6MgJA&Z9SnjmdMzn3K|kc&jQ8AO@(c?>5|=P zNB`IqkVm83Cv_{2uojOpGha{lWz1j9Jo`Xl-$^=jhX$AAtlT&CEIn$ml)t3EW?wV4 zyuJV1u_G8uEvOWEi?&{_&I)poXc1#K5j)n28*up=vkTvQgL0NqA4b8nlAu2N!knez z{}J(JZkmYXcq{?Ai~jU;>Nmw_J%-7Hm@yN!(UCFCnJK~MmTTDyzpY9?(L^S1taRFo zq!WYaffD17iWtuW$C3UivII~+drNl6=D%EyyBMi>=66+LuwQCOH85khrPn;`eI5{a z0Nl+tN57VUEmwTT{O;c|#4$OZZp(ZyFx;)tF*C5aRrigcM7%|bM8!gW{JfIlv$ML* z^v5!JZb7`#Iq^2?IOo9B&S?{T_wl>?1mUTu&r^FeItONR`GvH7kRPo71)k9MfchVh z9Q*$f$uY7pGyk{1^PkR>jp@G$9TNc~GYbdH{|4o%-He<|HdAb=l8X#8^{h8rY)+$W zHc>ZQbn0yy6I7z1s9&acN;~D_oIZcfy?ncjx~v}3AJeU3>m?vjlPo}GucG}8U0Y_F zqMc#10XU(tiP6bPxv@F9}Um^BDaj zAp6F~$022*5Ly~v?VA{w-M|#esLGXQ3#{u`P&8!R^)GlP5@fksK0m1;vy$BdZ$+PEmGS2_Wmv8U-U*TT!(OUDlXCj|f5JSLyRbFYxd3qhZ)g8bn1Fz!{oz>G z!uIq&`GGOLHNR(vK_`dSYV;$@1LOdjhQXDs3Xqe_HQ+P)UC#Dffqb)D#{Q?z^ZL5B zd2LJe$!?OZTT1O2mTBA)oiY9{X>r~j16rS_nW%dt62&B8=y8e zvvH*7k}wmRON-dV`dJhb5W9~r1WnEe?Uol27}8kJ*|#+J(WvJ)scQ4Dq_~3C(kHL` zYfI^$5+AUy=S+2iHlo(zhuHT4k2<$DgS#g>IW_{AXJTsb)#a7z&#{i-{S%ht2X6Rvss3s+BXqe4k_S6hF1FAe=WtYLAdi7m9s zGX1=BQtxc}+d?nu!mtr#E4z~j)HOExOjvS7EwDjysb9(mQM5`OBK=#9ckI$m(Fj{I zJd)>H$6Q`kr2YUEz`oso1>x~7a4vtPxfdtpXa|35m9>EowGy!g5~!{2{*g&@|F)mw zZEtPtUUrJrWrrWeMUEDkw23gt{+`4zzCPG@-!lJ(`?a3DjVIzVHLNRbDVs*OQ`F8Y zywc>EI6

))XnWNTL5a`H1G#->55z%Dg*(jy`=U;N_Uqq_xoHM+NlgePI74Q9ak z)@Z4HE9~6J{AX=#pZAG|8<^S$w++QfRfk$MSc6__2oi!8@K1UZxAayTIX$w~gN&j; zcRYRN7Im)TNWYu@s?F{=I$gfPj1$8&%()JFk`Wf;a&L4SgILsU1e9K@W4pPPY1}GpK2IKF z3GGU%;v#&D5{vD0g*iLFy91wEVBHy6PR|}4+U&SOE^=2CAp|gWI?(eihOUUmJ=N77 z$~Acz$28j?zzv+gV-cQk3;YD8oSB~tU~KbFJy(DJqQ(}L9A}r7Wbe^0Jm*KSD{%IX zHH>M|5R;$c+LVEKg0$V4y6(e`i*p?&aZSzGv5s>K9hivb;A+mhI6^mhHoimt-C9m-o==QN(Zj%)ow>iu9OPn)5U`5-`@r=bp&aW|uE)Ng%}^S~_sAR0RP;+KS~eALtv~7U*;nhi*Ae{o z$2ZU$UCp#9w6acbcT4e-jt_PLjdW7f+H^%IMr_e+XAKP;%n<~x&41S6Vb|nh_b5KO zm7hH)k9kD?b&Q(D8~#&VS#5Fp)m+ z(Fzhrv;eMXF1jm|Sq;;RTkDfL(%Gr#*M;!BrIJLaKppuf5^#Ghvb*Tc{#U2Kol_Nab5NuAnMeG7<}Yn zM=!A*u;$a(Oh>L0r+;-pTKThj-uvO?0Ie#$^n&grGTLEyB?tCH<{8$tp~WRE%i z*S98b`A+C1w(f$yph<-0FBiO)gdBY$74HmjSNm`GDc#JLmN7tcDH!0%JUf3CM_!*= zHH)@ci)p>7u==OI4CqxdD@?4n|0Ez|b-4r}Cdp@WVHBjf)B~J`<-7oUR))y4iB`c4-Qwib@9}Tsqlts!x;V0L zR7N&L@~6HH!Qe*6(lFU+gRuRD6+L$=^5^U9gH^f|t$Xka6b*SYp?H*Wm73-oaYwDY zPM~#8kMG)_M&>CJ&jrv9P%hwL$A_d+H!h{#P0pN;>V=k9^9O#H6OgYG>YoNI02iN5k$WA5T}l1g^@wDT<`9Sr)NJ$-^`ylF~&r! zNN5~QJ7zX9L*e2Bm1?SS$-_L*k$qrvkXf%{v*bl=Iss|b6_aU8C@B>y%d(8vFlbJl z7u&KFH+RNx4LEP)@W9C1P_2<9t8poyfV@b9`DIWso@V;oBWDqh*)|~5?n##R(j5vpI-? zkCMlkDEMV~K#vh0QVt^!xT4eT3#^m*GA{aQ47j>Uv)V^hwwZA z1V-LYE#JWfW>%)@E*>`c|%z)*c1e zj^(AQLgQdiU-xz=xKIsNGagkdWeA@H=LF9>+xO|mbBw>o@jjuA^;R~Ls?vR~^CG2D zOyM(G*^`xchb{7Ckv!Y74H*8@JbB3TNSgEGCmGJKgl(clc9ju-Kk)HWxl7B|m6ShY z|E8A>(rlH=JXN=(S9=Dra5a% z$euMV9)j+hMoBAH32!7}a5U`gq{idk+?>EVEWsk+=?KyAlNU8^k1sVU&!Uv$16e#0 zA2o}kN$KllB&h)@F(@uvWq!5HVi@hMno1lAH1c=?fb6ztR| zSGz1_`EqpI`@N=6TQM!rE(*Rowpt+1DYEh`$mJ?ChC)n=5f*4V%6c0})O&ew7e%8? zP+6q?5hE{)o7^Np&uLeOoZb96b9CI;Yd8h6GK+euO9pc+qH7Siz zUQ{m3J=SN1v5aY~IW5yl1ch_sr@z) z(dnLqCYtS<{3O;c& zML#vl!p**1HF{YH=W1cs7V5|)euNfnc->r>OUAeryfy&E{pbU+qP}nwr$(CZQFL= zwr$(CZGCqhCi60r%x|bl<>b^}YpY(@OOR#I2I!{Lc?A$1Psjy1o0gO9R@lbSOBva9 zctgR!&i2y1PUt(Cq~3FM>!szs*!*!>_(2|$1a$IgmXxC!jNHtT!*cYXEBys3Kag;{I z7(7aHlQLWsc_F9m&4l8iiM>YG`3hiQqb<~(&m!KgfnZf*Ss~B4An<5 z*bJ_Lbob-ZQ(X>H5r>e66x}?fgTI=GatP6(`wMfXA}$A8sjJ&lNqrVcyr=jaKhYus z(rQbO!nD$3iXXL%C|YqucYoFL*X4yrQ{{*fNfTSS-rmQDPCt=i+$x_Ozpv9UAcUhE zNi+g;71{$F*roZqEZ2AX-C&=UU^1q2_|@|kwK%=ijL34pU64sHGB8XDwLLu-=LYf< zV&9@->56~B$&G+DV*HftAXp)KCCp5z)0XuJnzcWBMl!wQfw4I8{di7LSh6(x-e)w1 zKe{#g!$6@$Gy?%}bK-dMFp)8BXz|HBrn18D-lZA@JqHmav6-`9$SAfn4O4}JC>V~g zoEUW7EPDaj>2;z_SB4l@lVs;E@iXotmkN zicC{xFk{}%NP#Bh{(FF>m>zNLu1N&(vmcrV-?$wW^+M(%oT0KEJ>+K@Vkw)O?tDU) zdQykT?Xax_h4Od?j&++_?W%e%o)dfLL(7KHD#~n}h?PS%c9o6eI|!lDJKP%YEV|SQ znDg6h@J^F@3cWSjqLTOgyvc~Aym>tUG(^0KC<2x<6_7-f^Jo`suAGm0WUJ8Zg_j*8 z_p^I?yC-_rf+fVcrqbHTb=JRC$R_XIN^OtyTKhLfvJ2BeUXcR zG1i+o7QgPq2X5y>(xJEz{ndA6q4AqhTgJ?#wO-g->jq&>OA2yc0fFVOJ;&=vHq0&a zYnDHVbtlYZxfP7FtCIk7g6MG$I3kG?dNidkw#U>OE=E_nNR zvVUT7DbjkbX=nwd>Cm9525d+U5q;|G&Sm$g42*+AmfF#KbXQ^)2wCVem|M!VLwX(P>tvLY#D(SC5)Ii-97_|pmrORuM zWNE97l)|5ijiGQv=D$b}A=oB&8C}`e0y?3aKcosZ5<&Mt^9Y2aQ!LW_rt-XHtkqJy zZ>lMu3=ZMhD0w0G6V%WBk8bh;fsIchrYX%WqNP>%aqCuOX`zjZIV?I1px?KNo;p|@ zF)%L&C+}GFzASUmwj2pl-gQ-W9(9uN(;@nncZ?3J7oRPQp);s%HM|Qsrr`E_gPOq9 zuHf^~fiVG|e`3@AGQJ&&p{dn>5d}Y{7!X8uLVNjkk_XgCK&IWyHq_VQ+&ttgKXFbG zJd_rfR_#GHxO$U_WUoei`t`tFz5fa*=68^se&r@9SLQYn0Sd)87su@E#DJd~w=Q;9m8qy6Q}L~$&TLQMfP`vQ;efj5uc8|1OWDAL|T zETyUx>T))dHAS%Sim}6}*w*b&wWyssKUv3tqC7S~FU&DU*KwsMD!sY-MC^wI^fX{9 zO)S42>}`q73lW0*5{I=JIt}5{0**L*9arUOvsQyR`(4SWtn`P! ztx|#8NLx)e*-}GA9ZD0j74H*ES=&svLU>Ad?7HYuO3+Q}BCR1A;mMSzA-CEzK&G9w zuHH>LM`<%aZbeo@$=F7O6U*xM33?+^my`(#K5t05bQ(&p9uv4I5WmO)TN&|}iu z33jtQ<7W#P#PuDw(_RSoNz61}-QDZJ3>EI9k-9#XL#`0 zgz%ub14QqaB!^cUr=uCkfWz8zgkb>_>YaUWtoc-|33s$(w)x9zj|lF*!>$w=Ib!TDE?^0mg3JK^c@Oa4w%0snjmNod7r5i#)%@c{iELa`E6HA9B*z8waf_kShMA6X*$JDy^_v&p z)K65|7^9%M=OASaH8YP{J(Xn%Ea*>vo5dAyS|5R%$psMt+49|EEjbmL zjsw4U@mR!&dft(d*ZeiEqnjPawZd$Edn7R$DN}>Sx+4D)4`@Gx(kPFuJd>HJ*@oa2 z^Q*?p{r0f;2sJZMj+iZDj1R$JyfO=klZDrSZ>0gMQQQy5TL(#e={+%J{9P$o4>HH7 z>@_dHbbN9B*!8$dE?j>i`4*X*J`1W@d-tHx{xh+$y&hTL8wLeE*V|z{sLuqE0Jm3C z12#qGBCV#&u@lb!X60)5NU*&-du9XXn=P=SAguM^%lGHN>xhjPuAZ)rf{gLA8|~T< zK{yw``=cbGcyV6{rwydgp0hUKGlSAD2!=*wO|&g^GA}1}48MZ<_0&i8LyW=`3~e*0 zCa*o_iISrVUR!jTXq{QpnaWNVX*3Ll+~AJU0rs+N0hXW^b&Gnl>s}%lp;A0p#T||N z*<@|is$2SdZHtw$=ILjRJ(NZ`T&E0Ly!3GE{I{zT z5b8)UE_I5W6gm;0eccNq)epn8SaSVt*itf7tS?pwU<3dvz3;T}oZibhNN%BV=fS-U z@eOR!AVXmmu8Z`XF^gbp*I1)sL(#{9vL;Vk;R)Bo=qr@RK81A<=r9Ymmq@h3(P-FJgj+*Wi{0z>*A;!y2AvLE^WWNu*HCe*LiEL;%r1FAaPAEMK?in^9oyq;KZpVuKA!tvy8gZJy^#T8f$8t`>j}}BY5P|6?z(w zjT=}$iaow7ckS8cc^L6W&y{dJcKvOp_fZz!{*8HL?(MGP&i+aQt1#_XF~; zQc~#E6F;4XobY3nt!ome5k7&cvxX?`!^6|bO9>jP#{+-A(=ql5@E4!GEwy`QPqK>v zKM4NIS$(_psMVa0yHolPZD?3KC>Z8dPRL)LmdG>kZM^0=U06;$#rp#Gd>SJL%N1zRPk6^F5Bp#5%(6>qhJcAq@#)zF?iY86gV<-IlAZxoY#oH@?QyV|YJm|o728|4~NR6H{}cK)5$<$qm)@w;rK9X5)S6(4zb<@Q2C zumtIs#6OvOu}PU~Pc7oh8LSc8iuKKM$^yOewut^|<|>|Aii%Lq-&Mu-D|&pbu3RfU z(yD}7>~R+hf5q7TqbQqjNi8Wq02tJuzB9VxfJ>gWy0d*VcZ>a+)g)-`G_H zhkldAJF8lFXePWfES9^WcmtMG(+$IUpW>6aWy)o+AL&?;#%<9*?Aws@-N%>^A1_#0 z|6I(Co@QK;W3yV#CY!o6?sC#Y#IAyO%7d8h5)hWGYwV0mFXO5y2aJkgv;)?GqDLeQ z2VQeJ5u}cc!XK~5?!7iuPPWQt_*Y3Wp9Hae#wj4jcz)Vjd56Q@JPI-286$q$=-98FLν57%-;-+vA zhN;?gOH^+O(De|s+?Sb3vZrTU9O5SwtlFaPr1V%b40M3qx*B!FrdnKyn`n5p>%@S@ zF}_Vu_k1J5Py>sIjMhSuB%1?SnJAbyme&CaJj-WlCS`iuJS!Nz%L;$6>jE^W2!Xk^ z6U!}767ub*!(nJngr;^*qg!*r^}h|;N1O>E$`P&SO}!^}4--3Gta<79POJBG0d}#T zEgvP8-JsR6duwh9#j>$Ow$-lasNW~xUv%p;EmfAh$2M^i<3p$1yxMa+rMS3)R<#8v zZ9gkmgVnw<{W%#;ze()u2b5s{8U>$E;rNKPKI==;_kBA|zUYPyB~inzpuW@#J%g8< zU%5BJB<|hYFUDNqbQwVtuK0qD97mgA`xJ$H7~d0Mq7tTaC!@b?eeNwH-zn}WN|#9C z_8MxWGKBMc1ZrHl&Oz}XEwWysi02!Yj@xj0%T3U_=l?NF8JN2c?DA37M@Lu3SmSG_ zT}~I!9gJ72vp(burNGlEp-AT;#VtSm0$Yh_18^?u#8^1{7{pbaQ>6g*lIe>OCHN|!kfwifuft?X2M#L^ zMECrf0{66>OX@>EJ1N|yZT%WndJvwR(1c}s>mP%Vq|h?5!r0YrRpl3OaqR`{CjHfv z18Peud1*+qAVUH&dUb|l`)}Ea(DL8AFmYc|j+8u!4$cqOYG7B=r*I^K-}v?h0p9O( zg61=z$IZhq^&Iy)f#-s~bRS6$Vg?k~t~U8!H0xpgYA71{Z=L4Y?nS!E&U^&1{q=F) zFFagt>_4tFSqj|zU`;rvJ5t3MyK7f5u^CysG&Qau=4ByyyHk)~h2l8ccT}h%_SGW= zQdtUh!xJ{n9vovn9=xm$p235?=nLkx3T(;`6vHh_K@OYyDf$xrD!2AQjA4QOb%=u zSx#ayiSAA2Ht+Tg4I%QwL&qBU*Cn znymz@yCAbdZ(mSR6qyX58vM?t!7(s2HjLhJK#TLFhRAKwaCVnI-E-{idWs1Kr8#b= zQaEko>!S8dr#5ju;qGZ~-&^~V+Vzt$st?bVHAz}!lW4*!I*v!t$WP%;NRarqWm0M; z^%e=Li0}TE$aB2eJH@a-X1`IhtsWoX-`;?%1_}7_I$K(8H?>7}l=TUbGsI?L^VsV6 zD2L_^^=AO9`ZmwRL^W%|9C&{ZGOdnV$43~gK(=OzI5`~!v~-naJf_4xxx&~^I=;jt zUf>gOx*lLD^BetL9zku6&xdI40s{xq>#j+05;OE}k`S0UWSK7x4`}qVDz=e^#9eH9 zpjgtuokt8%lSPGxXhqkDGb)#NWeefMN5g4ai;g(SKmyXrrR}-v8K`NRT~5?KZUMTk zBn{4Q5hvCX)(ni5P$vOINfj&LyOQwBCoU0noiQvs4xu@*SXOOzX``TlU zmE1RIerxl}RByeV>S&bZU5Km@Ri9(owDL^5-wARJ*Q%oClEFLM1{bl$;cUkXZ>RbF zXGSuS?36U8?@!=wCov);E%&EqplyY*53B-NYXNw&Zjm7O87k(teV=iN-qeJ-+cqY0 zm`#Z*7c57*V1V2S?k-)>k&9LAS5%~Fptr~aeV$;PpC@I6(oG)J(TN?lOyFpIX3+5C zV$TMUI3PTpom#t1gPswNuQUEhoFyvyvmn)qs>+_1$y1LjDq7e8sN)UGb|I&{hE~)`ZrS5+wk_>++#70u zdm~^ZGb)sP_;8HHCQ90Tlp|2ebABMfJ;B$t5e6D0Me+mJY_zpz(Ykj`SmkbF#C7Y& zJW|QC3oox@MS-N?on)P7HMrpGK$PI<5NpnqEb|PEJTzFO`kP&h5(yv#_F0*JDBk)O6TrA8z+R4t)GDDckcKV}18jK3c_Z z{+&K(We9H$SyEX?nNRkCKsQ*33;*c!%uH-eCdVc;s+l^YB`Q~3U8Bfrxsi6Gh_JdO zt==A727k17NhXGPzk3{fKRbcoIans2ltu$gt>MEbNdB{z3mm(R=1IR}XAH_j93`!QNMuf7|&mRsBs44BGMd z$B+w{&FE0S)IoEgJD`Ysr&fwYaRYDGbr7Oy(W!}G)VA-%3#?Hc_mXmdP$a^GiA=EkHfS#;SoXqjDv7V}-ncLfPutDC~<7HrgFyO@j{vv(m>|5VTqn-aEiSs(4;!QgJ4>PlL!yoY0*=+_Fhv< zN)f{OKmRFI>EpGUEtjq<3yGoR+=C@)O##Q$Oi~%Hj^m^jKLlP*3~3=1V%|xoU&BZu zYYB(}5he>cl@Z!w=t>=EFU4C+=dXB+;O4?M-^>8pOWO60(e^K>A&1N0o3OoGl@Y{n zBTI^1!&7>%?U&x|TH1WC#XVyfr)@PDgJRyd^w4BX1n$&c8S0WcrjiLF4!z_jkVtE> z%$@JcU=6flG`U@sbja;krpa*q%ZBBPr!C^Syy^HYkl7$NQ%zHnfKGyU)4|^sqkz zyuhK<5RM5Vp1BwmUNJ}%OipKP3-M=t_JMTXb9nUva_>-+hu0RHat4Puv@RVYzS9=- z^Vc(i(}&(pxEY~_Ya<*!Rj9gSycq0BbJsWF<5DmsqMeUd%1)t#L208}ob{t$vIz%e zc1#|C_Wv5c=P`dmoUR4vcfI!zXzoaCEGP6t3JO-vpw>5`y1U9e9g~J_DdC4y*aA!Y z%IS1sQ(bm$VzL}bvXe*ucHXlLE;k5c&;5#d=;${2v z;<3pD+Qf8IF?CjidTYOzA@2;)fAvQfQd$p@#TUG1hFniOESduE-fd;w%zfb#mv`&_CkM@mNU3!-jeCx)5wpU<5STxp^8N zl?Nk|=pR@bxgtEss=&p4^cGhGhMo53Pb(Pf1nCZT>Z^py76SveH5I5PYo2@5PmO1rjZeu zN+3dVPf)Y9ohXu;_6KpQHc0;>T%q_0?+DkLa?AZ2g(Bsm9U0^DqqJJjiu*(jV#=7( z$|-xAxCS*eM^FRtp`-I*_yVm3*&b`=L>V*a?nxG7!sn`)5%-?Ls~6!%92|;TTT)Q@ z@4oB8?!IES;FYFA=>RRjDe}-roM2u+s<2wPwkx8+OFp|r`OQApssGQCy^8}FS6NG6 zXSqM{M(mY0Q9|dR_BIJKg7$Sy`dBmDEhMIQ;~AWTtJD`kDj*adOiq^JmV!BdRyBCJ8f0KK z3Jxg?G6CjfqYzrWy2ZDktomk+tFQ(=>BVOqwo&fm1kqN=>FJyay-QZI2%V+?3S@U; zbjO^oz}`QlT@rET3dTuZfn0z2@~|#-bR4V=@KETv9BoawG{Om*e?4xh+2zTN!Z(=>P+<#VdXt~b<)uWRoiQw2SBclyPF+lJ0?O+3q6 z2^BC$*QFFzh^<*2@2mav#S#%gH^O~WT_rG7_PxLgJ@De}&@R5U0dYC7TkiEv1$4uG zu9eR>MsJ5JvHAi6%s2c=L}Fxnr=ydoYmq5G?Gw_qjfGT)4Q+x3P)*m>zpRsnUKIht z0hw$;My2F}Mvf)+@b`_~2BWs!+B&l*S*Unz1b0;kS|9{rpo>f^n~lO^6wJWw=Ob2~ zB;84dU_1-aZf=8-5;nYMuvqT#bUSn`(hvvP5ME1#Wb5sJgV+N z>%kGIEA>)c8HiF(1N!d72lwCdOPqBYS6vrdE7kL}2K z332h;?M!1FL6OV6I6OaYx7PIc%ld~3)Ue%Dfx^MNSFl43J)2U^&>rHXOb)jmX!C@w zV07U^4is(0gtnNYiWJp;HMxJo~2oI-PdF5-{c5)vWMQv>SL)8E#iE zC2_sBT^;+wrNe5uYu8bOA!Z^jPD9sT%oXY~jS1tfE`|v$3pU8W=+Wv6YO3-y1vzv8 zM!P8Z+8(5|OMC*~KTFTJaG-R0px^{9zqaxN&OnGCt>Z6N+$v_Pf9G=t;l3axINYwh zeoy4J8#MjL;8u2kgIyCYW%8;=kcrK>uVK^LW67$Z76^Qfz#-{h-dnYrv_ldFeS!du zYt}*K>;N=Ty8}3C1+E6q{)tw*0v5ftBg!k+OwkqWmodr^k$ezI$Eq>;;-pBV3g0h) zf&1?d0Vp}!&HFi!B;QnHy0m!e-DSu z>u$TEkd297gS0haTeV*|+K7@$m8G1cV(O%SA%P0cqJ5{58}k^KtiIAGRMqfJ;;u!o z$7#iejeY6%KR9Pdh*{8TLXm^FV|)TpGth)~kKt@we{5Z4BKd0|qYly3ya-#6rz6(> zZorJ~Ve`Q40yitcF&wGY=pr;#4#eZ6oJtjI7lgAUl=<^rE0OR%cE2_+#2Zk1wv$S8 z7FfbLVlOnK9A@he{-vgjy?Kjxxa7Y-vx0CU$<9xe|4Ym3a_%qNd&r%=w$Ijhp%fHe z@H+27=XTh!!|T`pf9$QxjjZWTGCkA|mZG=jy;&xL<+UI+js?qoyd83sMuvnlDo4-k zEPb5cqE1%~bL)}zN#YQgzE-pQCfI-p7Gg^e#$X@**C01NQpxJ?(a#zCtP)0xmb-s* zK!}+MfN8ieXI}uQI+u@uPWG;fe$m+?$;*l|^*Cl<-bFQr9cPnRI|K6TeXmd~nS}!u zr2WVu@z`?B+N_53$$~jQexSemS6}nVU(mb~4HboG`p<1vFusZ6HR_D6j%WHzwO#v; zt$RNJxLR%c{{=SW_f6&8!uptK%$Nvc%R=56x4Ru;cus5sM zT52L~iX2B6x8~zbQ_A3i9Ec8Gzc<*VjV;w544` znWdydhNUAz)zw4P+}zyu$U}uMu(`6SC#bYTOBa-x%+AhFzZCDI!E0lWe9V44IW*JP z;S|3=Z5bPDTx*#d?O!2py{^hEEF`WC^dzi}O$;QcD#{C~s`137smk(Xsm#qxE`V$B zRPC7S>X`rhQJ7g999awSS?gQ~n_r*&*4B1rhVOC~Gtqtx!r2jjhGt+(M^gQ^rFo)d zaDEi#k+{-1*EKo0e!t0DoJm>T0N2{Tt^=ZY8cK2+VyedZAih$v)_NxFuT5EZEh{V#-8VCB(yL! zl9?L(v~CS8@AEm^mdgHG!awsd2|cnw_(yc-#E>;aRA68y1$IyF#t$w1x=}Ozye53@ z^56SG_5ad5{q!b$>T!JN{o@WVeSYfZ5NlakAptgff$nZ`gudLy0j|P-Z!iow4*XDK zVEWE~Pl4*2o9@5(JigVeWETC(KK%faT>pIi^H}D*>sh8_Wc<=JIj7b?(X%N0b!KS6 zX9ZgOx?b-+x^N{em1RwJE%j^vR?~{WP*2bJjgp-nSXkS*!9!&KsbOqpdapzID!J5q zXR?r}kd&I7|F%QD>V;1EfennxEF0Y2`VnJ!cp?3Lyn~y1y?wzQ+JUCd-Wr0g-lF!q z-Q5j#S6}z}VI%kj8G6Vw^YD*KDgkS_+3m>C_)+=(efxPV{?*eTx{kh<`N66~EiJ_@9kI8Xd>Qc;P_@W7r)^$qA51dr`0l3O z8|;TIfw2?;`B{G9@wG7E;!X*@W$BNOS?tQQ$(1sL#RFq(3M=YXXI18*C{=rZ<27dX z0V&T)#?f{T3y=ma2|qv0RM=UKa@>UH%$~vw>r9%U1SPqZ2VTUnmZe{vW&Sa?1^Gcg zl7kiR8L4<~NY5dU!8dOXF*{9;Ae)s_4l*Jd6yj-(_W^Tv=4hx#_6D%tJz`2EMw1gN z3j;lr8&hMWJ=K%apvhb=kGQzeb+?XI|La9Wk!OLqa{u~^Nd=3z*^ABRblMzS~-$=<@Nz)~Jk0*c1u zB6uKE%$(k?u#nQsLI`5owsvVBLOukNKp0~6JxZDA8WL3m!>R2JZUEdNP`R`KdhvHQ z1w9mmx*wxZBX)}j7yCUUB&4}U(0sNiH(yU>V%;eC<;|gAkrk_T}J zD)L;g#v!eHqfFmx7(ygvqsfN&ASQ99qz!C`46Ets%+;kV!U>p=793*+`;~1X^=iPK z71~P5MK3UUd;oHU{OJ8J*=4pLJ`LtFGz;s(rD*Qiweo>Uv!C4fRm9$w)B({Lv2Taz z=thwIr;Saa(*7Y4GH zSFQSN0l~I`(FhEs6%WZw;nI5kHD%L$I3bPkQz^#SnFEud%D;}uyZg1g%=&mc^LLwm+Um?aT|0@_ z-!0@%{i+li&a4h-b9P<#o9UG~27%AvFI=QK98pEs9~lWIBEF(sz?)o#bPqSq7C1_2J5yf#GpA`^Nbn zf-lU^3tErdbzyoC$)MHCsnGGiJq{;IbZLKJSy636m@sH5kAnUBW`|*o7(AV7Ra0jdnAO}k&%F$U{IaT4K)jxAZ0XordOxPDra#() zBT1?@(C0Gzsk@S6B-CEbX(>dwAQr4N0b3=8196!j^5~}t!++Gb7 zHV1R@V|psKfjUmR-jjSuHdIgUwoP|KC_q!nUH#d~K-jwn?t;PbhDL7ft235$g6{1d zJdY0Ik9c@~K@fr}wVudc3rfK3@hd!`&AhnZ?!yz77$6G8PFjjK!pD2A|7u!}{`QH# zHn%_6!ZBVYl?KxpF+5hoPC{R5!Q(v*{>Qkk~12$iof%Mo-f`k=GJD@Br?Cyei zh=S(kPOnRr7e%vFNBn*f8ngNHIht4L_cz2ymhmuc3P!Y=1ap}ty#nmT83U{?f>rzn5R`q@4rf(;Qv!rqdA@NgVojaB!el3CARICeP4RjY_pvJT)9_J;kQ} zFJwUn#=^$%#++mnu_g);!!fF=15BTF!Ro#nR7?_pbDLnAI#%8x##s94$i^jdy+;1oVS)Ko7OGex?(n0Ra*D z;c}MS^hYh#(h^SU_#^yhXkPI&9)-*cFCR=KHLa2=@T1+%?R$2wF`1_;?bQ=sZJCAs zHzS)~%Q!BR^Ag{gqJm3VoGv3>zUd&1RBrLRf@h=ZWA&{v^hH0+*1`=o2V>P^flX19unJ5$Y8HPV!gLKPC z!%_|}*}~%ufQCPtAG|@X^%P;60TwXK&BpO9Qtf|NX&{2Y1U<$l0pT)Sk^RIs7P6{c zn;h!brnf&?oJ#AnCcfwi@TjYyC{gjBFd0Z?%4CU#Ycz%$sk~eDw~ggNL>C1_DWH+K?R1hu543D7bra23u@LA`X+PfuL{7<%RqfU9g_80!#qWRecwJ879cgmRNm$ww5Iixt|o2ZW*})P$jZUvQ5Vla%RY!iVIsCqFMGH6j!&^!+J&tAi#HLk;7{a zngdihKnXE)b+#LpsvsnAPIxn1Ju`NkQWbCob$KZ~IqQV)RcE{^?;mJ3W%dbbM>z(n zS;`w7H%WJJ$M5J!$g`nQzoIW4kQp!-+5P4dqb_g4f!@{f$*v@IG9suuTj3<1$yQz` zXtEQIZj>D6u0~g!z?}4S*fL9!S%Rk5+u~KAB%L9F8L6v zZWZpde+qr8HjeATX=%zOaj?V?>Q{JnsK15!A4cG1w{n!7wBEq&Yy9;oy%!139G9bq zU-7zCCXPs@zRNYSwoY(rY*L+-)9>F~OhonL9qi7F{j=i!U@6vCqYRMj1W4KiCEwF=mH zV)TABn|SBbULI&nW(0>t7?*+NdP_)Y%0jVdJDV!tZwD7HzXVcHz1%4JR@Hq?3^g~q z;G&gXUzwhDVD__B&nO7-P#N%XSGz4JI;L7M`T)c${z`I$rbJ6iF*83~?w@Vk8f1kC zRp0OUL<3JQ8g_k_S4(ILazk(3Gg#w64KI@X4s;f4=9e#))_!^=%}`f>D0BbODu(kQ z^%|0f-;pruIhMsK=(}HZrPdl3!^`MI`oL((I zbiTcgkqr&xU389VmZT-iNE&1K(N485KJPe_hs1C9r11Edb%?Rm*k7g%P%(g3>cg{R z6F{_1?cla)I}b^6d@}e_D5WL75OEtrl53<9!re*p);q33KR;s#?C-}+L zjpv>%FiY~nMt251I?vuPopqqG8&SkIPlvmPI(l%oXL(d8NyDtrirL6?rq?@oGHs~> zJX}(rp1$C6|2xllbEmbX`aZ38KDM$r%n*W&58GTK>FNB9l(^La?W zR?C9#YS>DV;%yBRk{3lG`QFMz;DwOC|GQ*BNuUwxaJ=ZvHwGl+@!OYxsG=p8E(TeMaP?@k zL#g~PLVPCCCNvLAAWm*)Zc^F*m2aTHMp|4*=bjH_CFQuu;3ttXKjZ!pQw!9WOr%~o`zRePi!a<76e ziwA^aEL_XEM%v|)7w8Cn@LZrE#Vr-l)Dyn@Sd6l{XSsM>Wku*ofAQjoZ9*TIS*2~bfaPRbw$f=?4z^9{^d4aSK0YJYqO&ru*o zMjXBCwuhWc=#-7M$?V^AXaf2p&TTwDcZJqrKRCV7VdI&dZiSFWBAC-eHpv+@I3@IR zHQ^4%g3sqsXPJMbAB>k&=Ddaoh&b9>QHGixE}!|D^-celm)$jBm^M0hjCocD0*8h( zPlc%gIi8uDXz&MY<@wR)Gl+UdXy|cMobj%x~a}QhGQ-r_}t@cLvsVG(# zm}wvgma584;Jd{#kIKos!>SFO43)c90uuI>{o5MD-1DWF-ff_+!V^$y-Q<)5(%j%S zcPn~@Wl0*cChViL{^f*HkD5=0aVoX^Yf7n^yUK(` zKW&g+d!1sN%b-<=}n;!1A+Kvxz6# zF-a4;FxewkKrUdGgtOVqvA@c=y?mYyw_pmh?z{|dJuo`Sa9%R6bJdpeR z1R7DctFY3v$h;Xe+5Q$1IiLW+FWM$9-pJLs?DP&l{@W z_ieF0Tdg;Y{pH~6ItcgzE;YgW+8>U9VQ5v<#Zn=0Zje)YGbW+c(_$V0-@c}KD!s0? z8bi>G=Lo1pqLD`yZ7eV-V^<8N= zKSC99ITi<*`ys0?y{H2tO1bIGo2{5janQmp^}qe-&L#Wdf$v&IbXAwCJ9W1e zJAm62fIYXxMp--y?M=p-a!a}j=ZJXfVr3Rsw%(lABnAQk--Tn+Hg3(!0v3#~&clu_ z_sWqw{i%Gu%6D`*MiYdG-3tR|*^uT&5;S+~@z?Jr-kTOGQC&XrE0$n}htma{Q;~VL zia1*kPuMGF=^B>uX&#bt1QSNG0vd)oHn4KgyU20u0kpCX|JqNvrkZh;8Oq&}Ii{J) zfCBcMSY^z@8z0zmn211bJkZ9~JM*uO^&()zlKF#ZK(_P8YEUobQ68}G#f3bryOG3p z?{?!CYa|~l6|ThP(53G@;~ZYjzhkGvY{bwf8TrX*En}fFo?vqqE+PZ~>6h%XFdxs7 zi*`M6Th`7R`DLe<d>$AE&w#L* zTQnnkw33X;t7JHf}Lr!m0H5bxxlFv7ZEsEBnl5u>B`m8AOH3Hkqw1gc^buj-&;ZA zE)~QyTWk$sC!(C|FrVN1pR?($8$_z=feHA5nTM3DCLBcx!`@c)L8J`Cl<$+9e{ENt zM3J<}l&Z#-w6t^g#@B#2La-fhNe_aoLSE-m>+^F8OHrdG%RkdeMMiyMbEV5uZZlTn z=F6SuDSyU*l&!j_V1~GOX7%V_qTZghaJ%*;MAEgFWi znk~rc%?F>U5mqtujsqm1s6W}O_Zlu2G6^xEfW1kjpd|_2miO}5h>Yvdhi2(J{3;VO ze>Cth3Yt==JtKj7WP#f{(XDw0O*Iw%hSy8z(ApWbw00&OLy^|r8$k5&Q{#|WrYv8- z(5aEizduKXmn!YdA&{fNldZp!x?)Sqdj-rAlW>N`Z^A^5$Dt3Od|g#uQtIf#%?HP) z!m3ip64aC-@?=w`8IbN>GYWI;shv&oGmDl;{fDu82ok1=)&$(PZQHip{k3h|wr$(C zZQHhOoAJI+`g013|fFv*3ZJjAl8VIzEp;al+ss(qf>&_4Y-DCG@;1^L<><4=+EC0+;O-vcZ`;<+MymnyK`4S40)LQPm_u0RPF> zo;qTi>{M-JbuBN&@0o?>NDKwK_wQfNmNA|)Z=HOg__!+ZTBLC^tIr7dSV|NkvSjGaSM@GqNh!^aGN^Dl?h71*%m`It=|MP^i6yxps<(iKCcNP1w2g0$b>h??0@3 zda(7ULSclKr_&{;WT3Hpfe{SeT?M}-EPQp6#`n8Gr4h?xzzs^3m$fe1jR+oTbNi2A z?I87&0Pi9s_A`p$k1bg=mX&zrviW_em8}*jO$)I+-x(2ZjPOn# zup1B zhswR6M^NECeixSUC|aWAgKl*B*cWOxiDKyfA{N_kl>&>)*>2?CR+JoEsEYo-&+>5^ zCZDT_hn1DZ?z=O0jW9k8=ABTF;nsj2xTB#BQO?!3; z<}OG5uTH~B_~EynawIkBJpHSPJ3qq`(Aj7PUd`TIayZAaOb|2wxMS<~32-0ms!H9dg&?)rr`UHcD2e+x z%11B?$zu5Fi|hb03l*h1rd6S(3#~W&hA)fNX}WLZ8s8&Wp&;lsd=lL0pH#H^+yV|k z5Wx+Q9=CMr?;L7;3_o7Z1JT1OLr+7?Z_|_)YxNe$huW;#1!<2$N?zU8L z80k~Gi8ZX7sa1|Tp1B``!SRFgU1)%hl#aOPY!?;hXQGGo=?2j~l52g=DGLL`f&sp- zNKfI~^WfTd4sGn*8N1nK zwyTd#olNQ zPC6a?HT*UcX2+b#pJqlqv{Z-<;Ou^6_kG@e4?+#3-UVKZEF2+!_$!(QF3%gN{1E>h zpq*X5>V_;;U$-;3P2#B9cfNHRS;fc`s5pg|gQlZLV$8|OGO?j<)7~^Bix);!=s}j` zOVh98Jd(v7)J||6+J=E zG3|o5gG?W|Cy;;CCPQ?A^O`G@ePDJ(1dma(W!8%^=`-)rk2wYQp2QEP4Hv&w)uhWM zGNW`bDXvVtQ~~o9SMJZEDdp6%6Jo9W{Ip8xPfwr2rCK#hN&MfC>b)W}I73}y!Y^Ls z_OVNElr15;+R9u>cSI10pMYh?r4C?n8Yjc+KSI-Z?vnHdQYwkIIaVMzT!6`r?y8I@ z=0!kf+;xPh_Hx7yb0*VvU-n!2e%=nIHYf$lzv034ArJjQ(-o*sEabht5@muagKBjh2a2 z>J1#7>~Kg`NvJa!4g~9^x-?Wr4ro$N>8spxUvK+W<+4*j&cb=5&vEP#UN!7b;}g|$ zB!p|^y9yH2!sx<(1Coa_Ati<8HS|g)o47{G9+>vjyz9=2?yCOrG}1M zxi1T&R9Nn3u2AA20K}@Db({))oTj~8JWh%I2PCu;y>Xx%qd_=QIvejJD9yk;=qDVn zYA%JVLDJhZ3J1JrTgAZl#NkyT%&WDnJE8<&6Q8R!fpqT&RV?(z>pgmkOW2Frao&8X zAlgyC_i%lv6jgLh#AC9Hu{l0=uEC^9w5&a!y~QMB#XJwEO+!8ZetBGX(;a}c0;nT1 zPhqHm2NJncRSCBjhY4Y8VpukMBm8w};zCTLl<$p-j|b0=3rR)V0V|&O=*AGI2qlj+ z?J`+M?YW^57&*u@gMjrtD%sKA6X!;8?wQcv0MLZ;>f6XSE2Cx*sG4ctux2MkF_11? zaJ?UjZ`j$#wsFm>ov^Jk^a?HKYDx1<2Y}qgiqf|_;gHv(f!*B)ad3e=j_>8fzi)k(|7|B_2vfIaY(=cRS_0Opl*Q)CT z`v|nT3W7$T%F2VFB4DMP&N;8TmD+nH+G_f790-g{B)2IBy!aut{D|sF)1dp`=dMvo zRq7a0oR~;+j8$8S_?FV!I$^dV<}fX4L_boJ#H%@X9Pj~#Lfa%ANr2J*nFG}74geMZ zhgn#B%P#(41Dbl942Kd_i-1N|od9*+iur@kE)i&jl5fW1|K_+T^$$CSYSkB?@#+f| zoN>4}**!DlAwklV=vM*?q2XUZtiq89!3)<9blg)iHP{u7f}%X8`GXuZ1*v|1;!`4o zJy)u{5t}JF@b8-O$smEW#1|F~k*AeiT68`^V{}r|5+aZHmoxpwM|Fff&r4cNt+2aB z7k6Vv_tEgfUD-JKMoBJ{&W+WkZ}@v^FjLpEhCiHMFo*pL!}riC;q`!kZR;Hp6hrJ> z`XBNI0K*5Tr?YGvUJ?G;>L6-thFf)>u&l3MF{vATCl0&2cH-oTy4~v7W$*_RJztutR2cl z1`@axN!a|wS}oCx=AtiYDS4~ajMPDOF#S7EH^RQ(73IVB&;f78_3vqo>oi zc#{6|FIykkz*r%V{-(^4`<$Z=Bo(Z)daqnSN-xB*Ow4T|$K>REi`f-;0Q7Huw_?Gd zuI*6O?5J7Q?Q&J*J(F&B3F@%&#uB;ww{_t=0p7)n+Za?L>qDj&5oYVHT@SRV04447F4VjvY;F~chJlp_ZG6cf13E-~iEj4MC-dYH+W z!Aoux%FhPBHSPE(rNh2WQ-AIEq8ZIWBkom4oE&>Xm_>BJneDEm5QVZq(?ze1w^eua z+6IqFrLY>Sm_Yk#7p=E-FTvzYRrif1*;b-0ihv?%%qXKs1Jzb=gOgFi^s`-lc8Wkv zM-_)gwRr#4rFa`|v1i>MS1rhDWz%zJ&p%6g!Z>B#cjtw4ag7 zy2B)mCkNhC%>DcT?Jgv<$cEYZB|gS#UJQmQWS~TG^KS4VuZ#|7d7*HLG_Otaax5!d z({4D-cri>}Sg zbFpWWH_24K!WBWR z_$}al<+v<7w%-o5^xP-EsjD5d1|hGrF(LA)^fpnZpDh#%Emefr6U~aCQThUtU+_)G$Cn6IOTpiXSF1{p^A5Qi;?ZV<_2nb@gSnc*tp{AANv<3C%E|g-$|;Qt zh_*NX=ZbbE*vZ0<7V-%M(&9m%wKG>R-)j2&kAeSwjqqHW$=>E^rv#&m80ILAgxP%J z142?5wM%bR-VZRImfD~vbhpyNP6s&)JXLk^_dwkDSK1xp*?FM%^A72TzTHI-N(+_{ zC3C=#=ihT4i_HSg?CVn!u0;0A4B7W`xBsaU4NlOADr&jig$$Z=(?_MS!~{!HOS!pM zh?@RCej78?0ddPxp9eN=mXv#uRybBq9e+^Q01JX8CKU@gWF1QpoQ zwu1#_ri0~l1ob-$Qp|LWeAUg(h?D0GBT(LK2amLt2_FhtSwuN$qn)G~%1z=ivC2p7 zGt?o}$Qxsi@Fr9!NVHIz)XGx;tLYi9oQt$mv6lyLaQn zERxPGX6j<)rZd`p6QszhCiG&k+`FFYna$lD*Bv>Ff>%t(U~dOg`SnNDj+-V`FA2>l zV{3NI|IRS&L^dEzG_Fa>xLsruz#g3)3U+q$aUJWm;P4EKFMBXR4msym(R5ti_b@dd zH}1&iVLSRs5gH~UMX^{Jrp-o!vE#I-e~B&Rw-Bna;hSCpy#hq2+L%uRP%&kpJ#myMl`nG`}BY3hpOQ{bq}fanen&s{;hKyS+ul zvOLapn@@KOrsx@$6nT?fC#h*acq`MVWonFtJ;6Z&)oK=raJ+RewGxPkLU8#WD-GqhKK+T$R&hL{(^Bwr=E5BX#WR8ZvE4U2I(hK#0$yWK;9(<-1-nPAVf{tL*Te zTC}412y;<5$ISE;)Ulbk&fMc><>9=|*@L!s+z&On%+)pQotdVkLAni$;a|cZ9 zveHSHz{>*%rGK{r;SM|*QgIV@??zOxXsQQ!?IQM|UZ-VJjt>7(^p)0Kz`19?lfU^NyH%x#uSzcvCeUqYS6vc~+AI~0_B*xa>vdX7JQgrPkux6cniLDdVlR8U0 z=}Z!~z*D0F&Igu&^aRNQOn&`>j=9dxXdSM`X+Srn=d(D0F4C7L?aj)$66`8BL9WvWgWfrxD zYPiit;vgtz{XNd^%D*SJISbM&_2B-;V3ffvb8eQWBK{c^s(KC%w|bNXZH z@f+C@dUFOc*l?*DmwWW5%nN^(_)Y3v^T+uhC5fk17trIbl~vn`*Oq7tXuXoPGqB+qSL4NY$}PB?GRa+@_$Www-XtS{9LDfQ3L^x zJ-u9mg3~?U70qrmLiRY5wK2>-fH7=|!!qoE=}qxZUv~aV=z>%udYc6JCUmBB`~<>B zf9sos&NWgBW9NUsc>vHXuAF zcUP18JQ!j{Tz&c}%eV*F@c*FK=Ho^8u@`C^O_)3dY$i3iRAz(#gFf})=mT;9Y-q}z zinhp7D$;e{6XGV0(r_=1J;>bR9g1OeUGdqNh!VPM#eCF=nouc{CdLPm=t;>02lSK9 zTU5W-i!9h>3)PBqD=tuQm-B}3mlUdADL<_Oo#F2FjR3qmJY8K1uG7}zq>{?n({9bA zVu^%wv*)hTV;sH#X?=2Q97prMNc7nqcKweDZY8-e9zYHXjmAA^XJbl_ujQ{d>1UOG zJhYMUNg>h>A}*!HG5|7dA$HAHk8~K7ko(j`(qqhE$>!eA=E&;Uzh8Yh_O?q}kn9Wv zN}l1+*Q+?z=&6LDm0JguRdqlpD}QrfCi5;aCkQRPD;E!@*>+1Y(zYzJfq+n$K@QIE zI*({ls6L7N>i)IT%#a=$9mzT$WRV*SD~DV)Lve&|P7rSTMu^kjaJcT|EIJz+q+P4- zT5$O0R6ZKPCn0vu%${i>0P4x>WdKw;nQyoY{7kX0I?g;~`kYBkS4<|^9O31Ts18OD zWrfu{R8A1E<+i9mFJx{L$-PTjqyDw&xTXwszo+6~b`Vo%lJxwSy=7_$1{rEtL0)$5 z6)E}1K87fyn^i*p#rrxy35FF{JW!^}v{snZT)r>|CsfeltqDgE3EBwJWv|Wgi%Y{7 zmht61nGEE-EP=n7XlNTcm|qXO1T(o)VaclrCZ*d2TyL3be^fr^G3m*uGyX?%F>*Z~ zmL(k&$c9)O-^Un=`Lqo1oAu%Ztuy*~xWk59Fk<# zd5$jQx}E~9R-)vz{=~unk5LPGHIos4k0}09Lq8Ay!(3uyT=4>nmCz-=vGOT$Fi9)o z>oJ@(V)Lkok%Uz|*(?X#B4ULO6VKvq+ljdZrhHG4*WypcYIPc_f|IW6kT$t5kHbc! z_#`o8hAQh$PHHAoB@Dr!@a9`b(ie;&df0ur9IdnHv4bDnxjvMwe(uYw7g(Umx#z{` zdNIvZWB;|mxpheXg7AO-5F?cMrj^RQ^8!Xw2*QXDGGBL|H7Ny95Ak;j+OMk_=Xf-~ zZ<-Yd%Tq-=j5B6OR(#L0NmuV@fJ;txUQ>i)F`(u92p@1OMZmDf{;mJfOA8PVDSKn9+o0jQ|O+oI>406;l&qPZlwvFcm%!+B=iU%V=#D7M=zW4=yLh=2z zc>YI$%(|kCg#08kDZ6n!i!GLRQ4FPdvCo5m1N}6(j+)W(**;XY#dk!`A0+hD>%I@u zw4PW|xwHIl1b!0JgbinEI+uR_D{;vWJLVPZ?rx%Z7eqTgIB zqDg2te)XEBA-ZCK-U;xKFZsIfH^gnx*95(*P?;Qfx0ixh6>z7$R!`m?#I0y8_xh}9s`$p*5e49lGq2$+KL+Tf1npc3Zn!-eG zlK=H|w8=53wmUk$0el&vB`Ptb`ku$Wz7MbxYlYCGCR~Sec`B5yIi#JzFE8fF%)%iJ zR^w5TY}>zkt?}%k5AGZE3)*zz&!M3iGP|)yEtyTR(vc z^g6!BvmyO~`D~KryFOYv-+^L}Ys7HB1&j^*ti;I>&Nw)MzIZ?%JYD+a`-~C(LEQ$t zsTW2{j!?T$_3Run!mKzBCihm0kO>_xE^9AsuuC$2CZ)(N+a6o8s;wmtGf%dM}_DdCM8A!u4_ z9*{WqWx#dq3|>YnLwpUyT1W#Jqn1-(VN)og3G)cE8#gOG2@cM5for8HU0=ul8)sOx z_a${_-vdz3Wve}9g%A7ozhEgiG4DX1&6eLbu<-jB5_Ck5*)Z{~Vi-5ed@MQmFTVveR`3^X)^WnVLl zb#84xKqzo01pu2I&Iz}hD`J=m^5D2TN%a5fEO`%um0rPvb{FR*KcZQ&JF=w4N6F$3 z-B|7{Hh#0ii}0>>5TpbdcMDnx5=IXFk*WS6K-4^%_BmDTRaZ-yk3(SOsr!tW$qlF* zm&o<%L+TLUDpCtIYCVng(*^zH#IQuVc*;>G=t1BN*5yRZIA2Fz8R`rucMec*IHRyl&NH3)OFE70=i8Z_cOd)^6JH*44H%hcRPlr5Li86*Bm>J|``pK!~Gcm5_ang9* zK~cPnGP#8O5CqBjtSfdKbzbrci;avRl7h$d)54PIdE(sKsk?7+duXCK7vez0up}F_ z1x^IF$5k%)^PN6TO?h$Vj9vKmo$HeSAJ*!rm_NeHdjYaxi_JB85&U#|dh2~S&);Un zr+^ufTukX$qLN20d!XgXWDoa@{g?d9f5Dgk@SeBH7f)A4s$QxVoX_^i=;VQyjDstuA_) zIu}YjC*?WB*PMh^eb?@-qC%jTs?n5TaWaB|KX}ZS)Ku4pDkRemYdZ1@p8OUBfr@(n zeQOx^pX3-j5~OJYfq)rF)_eXzpJ4JhI9eBdNP0CVdv5aS%+#jc zgXMf3H5iaqBQjYc#7D)eJJ9XLc&PPB}HlaT26To|lQz zAo$Dnc$#V$(b2n^+v(0>kb8Cz$vI$J>_3m$Sm7pFW%IlAC}Vr+AKUBfQ6nl%C=r$6 zkwW}<*&|wC{&aFPB5MUgM5X_EQe`zZ?gqK0xJ{wG}(}x8R{e9l|+R+*Y z(Vzi$5Rck`4I67%J+Zktsk3g9(TK2B?89|=Rqtp*_0F-0SQ1bo_oyKj4g)Jhr2U{U zk4hFPOKmSB9Y^2{NGrmZT0|s}FqKh?x!(?9&l9nUrhm3Bc*W9S4{oPT6pk zutvY+f-|lM$RS;dq%prxKlBUOZ)afcF(ZaN0WP3I=d{uvcW0Q)K>N=Aoi={sr!n=d z%SDTdk?p%GRhb9P{6MiH?9DERFcV|l8to%{$ZilX`?Gv@U;2o~a`rHEqsDpsbHk|V z#J#$%_HsScTNp4?z-WbZu{yHGrJnM-!?voSSwqABTyB6JZ^19be}G3#0eJb z?bMz;LH37ZxZ&62v(S+ITK%gq&9nHH#Z1}&_^w+)!!g^HOzh8)B1T+xFrg6TZV}b{ zADx|NVW8g+_z490-%~0-vo(*nwe`RdHi2aXPTf@yZTgSN!w{6Mv%i0mSe>>E7Q9C~ z8MPA}Y`%4AT;EoLH9UYXw#>|A={M3E!anH(*vMu*&RD={&-g}Gg(BLP7)HJxc>Th@ z27PtdP`rL+js)>&tFCz^rwr{NH&t4G&cSapN*AN=aqdbHo-Md_1ta>7()7fr*HOy$ z-of@0ce1o6&Q8iEU#o_NyHSXU*JcJ98Reh+kM^zKu@qWy$PO86r=k=-|b~+7Usi$CTvAln_!bDPpxZLSjL≻|o|b6;9m zgKm#ojChZs1(FOao>Ee$vHC6$5gXoeLRX39q_x@Vf#q!tf4x^h;lQPNWvW({B1)*5 zU|+)kJ?*`a!u4` zaJUxJ;Io)t;P&#nStt;ryvGjJ0B$m!6c$^U`QIbAV6}E!W&?XB#<`lKZ(Yg#RrZEo zMYZNr#7DK_&83zD3hm8cnh>KzwoPI(r}m3(8_BZE4qgT+CXt@+%58G4D2p|m;@759 zGDPZyn}4Q9+wq)R;no#undP|fv4}66w$1~qF+M)A)rKMaXW+idA{uirP`nR}& zx!A_ei)tfL;Mv-7a9*5N`JmCXkT4t7kCqx@KZ{*`W3T@6=xIl4x%H7duvFM$ZHtZn z$MmWqi+cs>E-(BBis|_TgDG3F^IPUwhgb)H*1$^iR!(ji)7jEoHd{fh+7RLLItlX0 z92Bch_6N!bi4O+7As1>#Ei}m8(=38H_htIO9JObmyS_yz(crSbln*Ja@fcqfT|&NA z_fW}L^3LP5AOTjqJ;vYhe@+)AwA{K;rNvw624g(!#!B?#A)WX^aJTu+F51uxNT;R? z^Y!s5sHZsNmhOR3deP`xJDSKae#9&~!7Y%Z6nxzN7htUJ1(@%i&k3g%trU#T;X^ zL|X&9U_GM4)x*Y!#MtkQI1A(3+pPe|6LB*VnZ`i9ni3&Wxtru?Uv12l&E?;}3WNE_rLJp_=>aZdGFBgUk3$L= z#Jzz7mM+*3j(f+OIjqdCTl1~y?^<4vQ}#Ixz8TQNHqz;U`!_zm7Jn%s>cI#Oy}0^# zmD0y=Is@*q`qP#K#LFjU3g3XBl7O4D79cc%`-r7o!*`9F5tq8}4ck4w>R(7E#X<3K zQ$AV$Q`Eba^}OxfaT zJEl2FVuU5ce7%vC$jkF%{{pRSXB<(?$eUUbE9Z0F2L{`p*o2-AWbcHDY~5p$fV*C# zjG#a)(OTsSW$DK5KqR$=`!iNDDaY>8+x2&#@A~M#(ml_a`$+jNa8Tm``qeCT!aEt>(3>( ze{N@`in`)ur3!2~8Kp#A*YZ5`Jx{1dDiyk?#kNHwh9{VJQEZr`&mD+G1RWN|X)aXe z5XqC0jQ~S`o7pDb)X>@my7Txts*wFh8-FDTg?{T)UB`W*i!VRiAr9n|l-KLyd~1d# zq)^FifMqceufhzADgH*LcQH6yYpO2sSVOt{f?pFWf=!@dE%7Xf|Gw{Io4OM5+~*2i zT$y103tNhYr5KLm6f;cvct+@PV&b;ol?{G5N{G?T>b*4uq6%OkmdtW5>ey0Tk0SGt z41B{p2bg1_cpBqR5KdBTV+`KE4Syax_68b6W)uR~s|?wJk|_Z764pH2z?)mh#`5Ci zIJ2Q!&^bN3G5iNL3r|ENc_iT1D!PX!Oie$(o-LG+^$m+YCX^u6Q#DgHA%yLGZ;Xd? z=r*?&AYOx-c~YM1OVr+PIWvJbN&i*fwFaj|H1ABOqSJ=6`sn53Sj93RV%<#D1}K|B zN%}7v5e=hr*+SEj{F@7Dw7G?)3TE6dFJ%=c`uiEqGts2A9^S-n3CP&my)si+uxMn$ z-(;|BBU2ElV0qPt{|_x`1sGtm6plz0k+2Lm_wHBqrWOu<{bEOcP=LF;dH}6g=ZYRM z6Bkdw-UvXqVPg5(6E~&ZaMyUJ^ayCpf9s{q^V_7_LnwUfjI4 zQ04kxerb}r!ZRa(a>6GC06r~v@mVeik%~P&bl3T{?t1Y zW3c$A;K9xPST^G}m41-^NwSPdR-w1(B4&QT*!cDZ?>Roqu=q*CDGJFjy9 z0O}T$d9UR!LfIi}v{|i-1WuFFZh+Hvg9TQBvxHuYfxX}+O?^e~eoksoXg1(}QD&W) z7r%$ht*byjC4!X~q_{alAPwP%B83G+5h+w^61iL4Y!6;*Sz)KA3Hau-S2pf!X7YcH zn@U#7KZSo10}gtkpQ7>Flt0^ItzVb$a_O?=q~TS3~GJ0c|gBphzW4i z{;E6`ep!YGByLM5W`Sa)U4i$P-e?7LgjXR z)3OB!q+?QN;qt8xM8gS(>7(l;5^iptvb8m8#P|^Ep|3nbGOiycF65MiM_S7BX0?G6 zVfpij2~h6E_d`qg?IPss;tkxAD_@&fPrM({Z)d(#%&D024K3htNw2e7Sev?N??Ea& zOxU>E?oU<|5YC(7sLMsY8@SW zX@HPB1@$hY+)N*QLM7-uG5nv;V!@Xj4Cn5^KbTE@rxvPJL}@V)(ox-&O-Kb8eL<>L zpQMcFtFKNj(sxZ(`#3;s0WLZLS$`lr5m1D4_2sKIBQx>bc8vag*e(3V3EvOQmPd5R zP91CwkI1eDqxOP3C9Qd@v=u=*b|2*zcWBU1+Z*RCC}jGndk|clBlKKFTN zj>PaOboIO>A#sU{4RxTRIyO(!1xqTHN-k07P&6^T%(ii0*?N5}uNv^vc5DMI^*qAG zMkHI_YcS!`_c-0s3;T>-fhTA3-4(%1qS>swXBzTViA0ATTv9lMxDF#kgI0@<=n`$z z=S3Q1rSxU`_V^fEVY4ZXo$#Z@UZrBKNnA2m82o5y-Za2#t1XAx7vIChe=I2v2i{#X4WBT}m9t!kL0Mc3q7IswOL`6(jiC`tEqgq{~TugzaJb*?RU zfgthk>TztH`w8$Lkn1ZxsH@taewW$5b>Vk8Y6Qvhv~+J}Rsx6_8L?&Ot(feTj_oKh zON@raT+4hUX@Zw#8<@Sx9vEQRO* zd!<3<%#+R1`2aWG=iR7i%bhO-EVsR^%9y8Uln~opN_K#BXbuzcC&@XR<2?oK?uP`c zkj;ZoSZ$BQ!JiRjlPYA0;v8w!7KLeLTQBK1bS`pz#qC}_1e%7mB!orsFmcu+l(5r+ z-M;Nv!5i&=h5UaVK6b|cPlu0@jfsu@|91FT{?G0|hmVzofdh(O+Qin(*_`>mH}dg8 zIXOF;7}!9$Z`QP#D4Xmgl16rQIoTt*xrGgrI-Kp_{_i6B@2Q0(iEr=fV&Ci0(c!)9 z`CZcCf5r0(nGRQHCYt3yA&I1V9wc)W6M*E_GV?SABg6CW4v9&KN@~oB>BeoN>0z3m z7Tw&K$GR^u3M04M_{3$!eNA^z`^E2<|$)TCH229q&uNAQ3_QKAuc|srlYbk<4fUsu+2a=|( z4ggR96_v5@kOXKc?#2>8GKY5lrV?5h8Q&blAh0rkX?6kf1y0}O1gie?0l?VW$W;GU z%0wPQPpwDpN7y%pXK8(R1n$HJzWGyF0K5)}WkZvd)!PGrCil-MDyVr{!{yilq_K{v z(UJa@-2Z25`BoQvjeFBeW8LelryuDmJ1aXH`(KgQzpgye5b1ArX=8i-BX3<6P0!Ll z=OV2nBBx=BpY-S78|YJs3tMx&3lInJPR?J12?$8~e?qPnwx{>VFHn0bsQZa<>A>AATfth3{1;K$(and@(pZ2zBXPeD&l%UECc&9BKzpJTnfE0AUuQ$J&q z_XvyYZ={W>0rc3rT%W{N<|bfz#xE_qL&G0@4)y$l9=gbn+?@B9)ZqN^?CK`4J`hWY zQF5EpFBD|{Z=X@dL0{bQA6Vp{7R0Yz!P{TV_TS{fAKUO>-P5;T0jYL%%?YecFV_B^ z7ubD^IgAaUUKbbv^t;)HIyM*3uLcL&=pVkCFgCPR++Wg@UyCaEFZb17#E0I>9~_~z z)$0fXFEcmrSKkPlEE3wS0J12wnURx!MZwG3$CYOHkTW|WJQTsXvHCBf3Al~y5mQSvw zf7cH1DG(94-TcvcnZeopb3>9YtT6gU6S6}7OJF3w?(RJoH#7wE z?)f|Vm`l3#3qWjRZDsx_i&M^qwUO!7yz_(nRlx4z;PPest+TmT`S@Ro4@4lJdmh8~ zBrb>1uHM8`HPi{$b@#qF{Z#;;`Q1L;N2AEnp};Rjr%uoF9nGy|K+6U~c!_0Byld<* z;^!+DX6Dx%Kb2L*j_SV9zd!6+%a8Dq%Y>Z##+RT-7t?UG!n1kpB=DLn7!7URn4*im%VJ zj_~X4-ooifpVA$+!AD6}A~A;-2ImedhMz!Bn^R|h2cc)4c~@r~$XasgM&ydukH6_% z$t`sUOy(-Qn3J_Oot@D`7j7>j>M;p$5JG5|iL(co znW;;M_4^JxO$o7w6r)0ed9HZJ4(4eEmUVqv%Y*b+k-84abfAhSyq%&2=i|VD1nroH zQw|P&`gIgxjm`Vcj-~;%Op3!(CYTCmYuJ%H@js0jTOlXVK(qy}RNzT^+>4XUhr1cjqQkJgx~rdd0p_rcqW;Vk(vM|JdEkY3>Lrih(H2lSHHaprw}a$8Y}!qj=o zi=uNOMk(Pd&4{fy6Zo2lGA&N*oK^I~_76jdm+iE&)EOJi1TjBppH!FKi~NgOIR7~Y z@JpmwrvE>$0=vt>l=8gIk|O$A4hR{mgJs+O34&BaFK7YIz2X&=VbSBCeD|S3bU6|~ z&5bIKbgK-9OdaLmSv$Ys9S~1_$h!+i&uFih#NuPwfcNF56}EzW42Q8U_i(H*am6)I87eHbTckKiYpIFn&}pGSJ$y`K(nmeHNA?q>2F4&|kb* zj-_S)D2`J>Itn*Uyrb%B5*{lIWud|?-fSwfE=9a{BC;4Cj>nNbq0!q7FbeUXTsi|g zJg6`r4Xh$)YH0!bXF`h;N@Q6bqjE3rFz{C1>Aogg8r7e(_4)jkVqU`QGQqeT)k5OH zk>UhLtb?KS3q}z^WFC2@v-ters{jtL)#I0S?UM{mICX4=o?7JP&2C@CY*jkB3MYZE zfizYe)}w3pJLyUrV~dDT7xI?UwC?L}xu?;^)}&|N<7{{OfhZ81H2GI{e0lqDhpJm% zy}RYLE58kEPC&=Qw+WCLsSkmH8qds}r1%!yqrAE&D|NIe$R* z{fV3=tRs4Z%4{P6yBKTbO0y^LKm#Q-A=-NS9*YSxKSa7U;_KDGymB}$M|2>U z1X@W}5*Ti-%D2y%(B|e6t%mOsGKEZm!ylF`|6)j7M+kvQe=kGO1D`ac;2y_?z%=qt z%L@0CLMi6DgQ2-yf54G!zNXWm7|5wiN#MQnljmdAGAg$1Ad-W=dm<{w%@38b7Jpr$ zsH&Ohq^K!2f^R#9hSwzKN^RZfk3R;V1Y{S&l^`xIIUv#S$d0oxQi0T4Bib%#pJ z3rteR8&=Z(xz;DTu7wZWP3GA1s>8p$jcd8X&NzDSRgqsINN;NS+fGWW0H{bzzE&nA z&Hqm8rhHI~1K@VIlmci9y`E%Eq^x>ubESo}tHDoABSFr_`xd>it)cNM%qATKqIA@i zFc!mm>jwAe#{Gu)aA={e9*vMFMwN()kCnl!oDv~2{81XxhM6U*(%sXj&M4bfGWpar z3aABYxrk>xTxP4#p49?*D|P=NAXDXU1YbYt#&Ma&WA+@s_=T~@X-me`8f;4xkRz`Q z?pAYE^$|4u&h?agJ`vZawL6!r(tptT>%6DKvZ!d-r({MO zpy9hXtxTw2OXDm=^a;f!4CQa;JkDPTwRxLtPSaUAYsn*lH%hYfku`lDr%2q(A`pu| zqq&PXUF;+c{_+khb1^&#bJSp#n-oEzsRXuC7Vq}zguG0TH$@?A)5vLBS>hI_+>*<4 z__h*;_DoUxq!|I0MH#&uXhreve*O>}bfpxJ12o!GKU0qVTQ1m9s!+bs(b} zztK9=@;jigUJexkE7q7WnK|Tbnju66dVBpH@PAhxOG^)J6pV-fm%UFS1;-7RWE1s; zlYBT2(JEMWAhQk)O->2OZ%Fp^H}=`4ddA?zx5q<_F@Hw-;`KBU(ffrK+WW*}&Mwu^ zV=c?yD>hbR$D*kz!B+fu`8Bj*jRA=`QrOgs)BVEhG;9^^?qcT*sayPfWEizVP6DAx zWr7UGwsz&`CR2Ti;U~U4E>@=ecua>vc81sAsOzscx_sZpYJI$ZHp z*k`#NJoGC&Mr>l0>xN-b0>*T9Nf%=cbNEs=glaJLd{@%SUv;hv{hJYqd8(eVl+|+Y z>d&u#9$BA=Qkwpq1SoMeRJe<+IV=6+C%aU<#j0ouX9dc?|s zKIIQL-sIJKVkc3f$%a<7H(I$Rl_Pe(R;PehVKB_`a!f42dIhUm zNB`Z4cLStnGBETn@Hx>;GmP4Dd9#e^XNsJDS5HWF%U`&HtoMGrNV><45I}5LX~c=2 zCYI!(ew^`O#mQ4WV39#g_AugfT4uGUUZwOdQ4SHbqQc&^Xu)n6Sg!xVzI@Ac(f&Xy zPUyy$lX25nV43#*$Y>Y$B2T!tPt6vQ;K8JQ2oD~tn6inMMIN_TxedfIM~iL{Pyt(# zjh8S}s-oT8)C0q$tmdY|eXB*gupYdU(dT{qb-Egw*Y57vd7`%Z{<)C=h6BOF9L(~g zGL`OP*Xx4b%82Ygaecs_X&+&QQgC}{AUzC$wT+HE=0$@ zUOpJcBfWEWU|E+|+y5FRibKIDvP*d(#`?bB5*^6%p%fF4Mn{f4yu&+p{o%|Y&bv{S#Buumfk#j!iyFn*IkT3#) z#mNJ`jUjZ%Y?g4slS)?Itj;!RKLIR@207#Rjr)7PF7@`P#4#?@mg9j#V*^pC)s?@-k z^A=8GUus6x#w%=-ib5si8{sSv+?$wgE{*IJ1JdW;u{aV9LQJU@akmxY0s2l)n9PZw zqhdl0WU;AN^;yfeTa)+&LOSRm1dEu0^?x@}<3~`s_6)56#@Nho#K@JGg-W{hZ%`_! z_#TK%H)8l8@`Vv-AWqzUs$$0x@?$J$e{!ItqltnlliRRhz7lqK0ZY)@S$28UNEnk$ zEbV{O?-O=Kb``{*hjUe-n185NhkSH{LfY{kB1KaxSGmqU2SH1{7iPyB!{&Sl+PN$j zTzE<+|JGYw<@7j2>~Vq`cLuZ$)FbeGfq_CntP7%3~p zd31%7Rq~w(<@E1P^ud-P|L(akk4W8axqfQgz;C*MJ(V123f(RJsTqmO?WdRh!2UvE zYd?TA(Nnv89GnvfxP;rPg2Q|pyctB`V&WCjO?rBrO%k1rBc<$5(63DMl(_}W%vh9s z86(~$Y~w_=`hx7O&rzlnx(MUL+c8-@X5hDZT}GOXNwd*qACIFQcdoj>f~iqf!r+vN zqJi-MaFGTQ?qOEsMMeXH27f@ZZ@!%`MrSpi&?SC5rg?^ez=d**H=ScmnjSY-dlz2HT|?kbv?*&YPINOrh`!i6 z{_1CO`9bve!pG;IUAs9BA*>O%+JxmG?77YBqtkdoQSy{qO?0^o z<*tsrJ?h4iree#L89yx1r-iUqV6bnWesAu!BoV0FB4Vc4g5V7`iUM};flK}LA*=kW zI3p!uSa;wBgLI^F-x)XJZXTyWAPS_X#t8I?GtGKDi|t%sig7I-6d1Vh_jg9B5A_=n z%e;>uA#*kkDa1o{Qb;hR1%c6_+Hn*B1it*EQQRBQfWM+#t&LW0$Iq3AV`)Al77CP_(^_j2>9od+DaV8m*ihw6zt!67n$Lw2G8$lu0*G#MOEaz%jWwS2lt-UMInTta(fETG%6 zS6k5dl&`zAK`wQ4S96*Y3xm9ft$7*8Z*ST=iO}`{-?`RA{iw~Qj2=pNEW~Gd6QwRV zLK8h1E%swfF!Pfy!N-GrP$Ne4dA-g*MXvQs7S-e@j#<53SLV<4HrCvzZvYVq_ z>i#}lnjX&}BHzJR=J=yn;4Fkll%Ln_@|Aa>in8BZ-^WQJKIO%xMlc1@fT343MxRvm zv_c5yLb$(0Czkf*=^l76v3FsZMMt`8c}+*0K1$K59aenLlQii@Q44Y`jz(eK4XgAB z!DGIA_W7{A_FH1&^FCTsduNZF+OZVcvjJfr+IB1QxkMzqXD(aeS6c40?G_x%$>1HM zld7PJZr(XIcM_B&PkvjXSB%`8bKkKN_HTYJ@*5WwYm@ca1vEIG%2(|ZMhpdI>H6*);tjhe9S$e#D*W$ zdMB6D!Yk0qWE=!$GX`V7%$bP;I@QG`fu`^13hCTKvZ4MkJIdfpsKvb;@LE-m+AC+a zt`U5pSn;Rl^s6pecD|{=Ai4@+7s-J!vHlY$&Li~2kl$lv5{`K)W3~lJJSnVSzdrO- z)|tEvD{XXUL{kdEx&{b~Byd24^Be8m|8CxAk?d?^`{bE2Gc z1jPD5r5r+16QfucZo$Rttzz(S{`9skF*32U90T};A!P0y9#yrIr2#>KKYn^L*d3*A4Wk>R+J zK;{jLoFv5Nh;V65a3K{jk1ye(NTk{GK<}A+mOlY!KQ*LibGM_aHMp9YM8t|7g%_&H zq=T5Z2hun_CfRAi5P(8Gi{_~2C-Q4OVq zOCUMkwH z;L|9CH|*hEUP+AIaV8zRuDNOWZs#9yB?hlwdnX3!K>$yRF5*sY3xAF%^(;6cb){nB z4#qhf>-gGXe9f7BgWGDK=kni}2O_I6HCML3*ylzmnR=)Wc9|$v!f#L~^Gv#DR=$8JXdCLjn6jXzD8Q4iy69v>V{%9oIEgZ5iK*fJJ>K zaA!zx3Nro2-9g(F=MrWi=Dr7;!P0auz!t78NfbP2HG8 zEoGemXW}z&!Mg%cJR2F}Y~s_?PpEg@c5qGrdbP7;;TDA0NLUOwd4Mt(`?5UwrWat63 zy%+8E$=DSqBq{0u{46W4s)8PAxy^-?SPt+N9{iJUx|H)Hiw6`F_n&o%1!DZkxvMRl zI*SmLMOIKV{`RP`lGhb#u!?98^unWIOY2{osP3-{WgTUJF68`uE&|2{#1tzb0m)QT z36P8B*t{K)AI0s{Q=DHr^~x+Qn&~B}h-}@Ce#Ar71pel}P2~%VZD6*ZdEc6s^^Y-h zKp2?oB*|g_8@5d`!Bzn-ngpyhH$kp|d}<#H&cVogk{DyaOBu*x;RK3*1oGo9MZu0O z?okj8bM?TZ_1=yueK!{2ZtuwyBH$TPujQnm0bvfD;hqwu0>`qCj=2ULAKq>eqWXLQ zkaf6Ix{m1LLPUu8xgB; z-M?>9HkTC;?BwE$=RHYjwmY|4TdBeobC^7|0NR{(qn_Aj7zN=Ok7vlTohL^}1tNf{ zxpLjvHv@0#d9{4wRP;r_M*xPcR+i|ztfpLUyXvrOkvyve(E-g4vt?q-90X;ZnxQ!_ z={&bpI2*hbQ^3g>OGS>P|1~7yV&RP=(T_t&IxPqyfp%=U4qA>h^e+5uWM~H}tbqo0 zxZjWdUhA)W58qrG5#LdtBS4p@WPp^f zLu=5AO04_h^|O^eJJn$@_H;EyAB16O0u5|YTqk6>cV0`N4RCe{>JBGf_O2=JJy85bU zKL+b+$4SMO^F-Eeppfv3^ue@#l?lZdeb+l#Gg!?I^9!G>atAEq*q}q2omq!yc$?A1 zO7l^=>SqIY_jqyCik-}mfipefhiI9Je;&RI`<_h68ku@hP8)CMh2fimn(bp)*LDyt zs4i+FWXMWjpW3mRcqlcpcTQ@2q>=-HYv9fPuL`U*q&kmuiM$nP{_oZ8^=D8mm1X9h zO_lY0T7Oz{o9Me2C<<5Z3curH9$4Pin!s;&#(_HtN+@#=o3>^aDe;Wsd2GV|C4i*E zPXfiB=<2)GkCpuB3Htzt*q_!8yYqK$r!j7;wt2Nx-0nnuLfyj{h%-v#L9CD4PsPB( zvPQgrDqKaa^E$?AP9ou+qs#tTe$$i9d3c1M?z^ zC@}?Nr5e|t1*nqC+Qz73lP{t?6*)@<^8s5G=|5$YBjyeZ9EJm<`pDo}rYfLVm-3si z^?;!8sM)huLVv4SX5t0uxpoe_?_@mf6#^aIzC67p+r#JYJ{GW*)yawf_Ke~*6T#%> z0n-9X7w1zgwxxJ`DcY_RUb{T!%t%=ufL<^Z1mH+uChFFXyy_<;g`F!6RPtJeU~jq?d>#;3u%0)sk>Y?5WelZ8JzkH$wpT zN{ctqZiJ+Q{e=p|OmIm5t1>1Qt~EIJi2N#OV$VH@uB(zcsWXSMFFoP>__5AeDS|Tc z8nD7Qael?9U1lQ&%PVK%nb=seIe*bwwET{RLcr9HbhDAj?c1GFf+yJSMe%*({_r%I zGe2ys))w33*VMdTvaE(sI4bT{xl=T~0V3*{o<-j{UmD#rC^$YV8t{4Gq@T><jyGoYI>5y*;D|>fH`KB(IeL2p~6Y9 z@%nv39U0$EO3wDeq`O}PL=0Qx6vm?%bB3R|IE#<54k-dbmZOAHQ&?tXXv!0}fA^eO zuA~~2l)TT|H|Mu?#9z_|Tz?U$iI03}M+k|qv-7h`RXL2UIv1FOYPuGPCS_Na$`oi# zY$9?;eAr62?3-mgc!zN*#5f2Y6f`X_`-!A>#8TsPA;aW7;d#V?jX@0!N)aZ8jqd*t z^88vd&2t-1yePmz#T6t>b>UlM?s?1^dpa_c(z~p*n^K7fS62@C{!f#1l{3t%li7Ms(bHs0T{!%gW;? zI)!M9(WU-&S3#*G2*&Jr(xYY?I9hx77d>iF%is?*GE1jL+zj9(-*W{u3b18@cPR}? z6gPzKvLnx#=M7DAHu9a>EU2>TB!8J&0>o{s#{mw4)x=VPlD%QcEfZ5SoZ1-)7)d~k z@$dk;vku{Uy~0Ov)31z&By5Zn&q&XKXQ00A{CB$mXP)&J}t2Z%oMXnS3|C^J6-A&h{Kfur|Kp;itLk=vQElFNsk z6Zp?{{199+!Prc{)b_eLL9hlz@^P2*=Jw>dNVXXx*Yo#~_f7eO23S~p%kUQi>vG#) z!z-g?a+?p>7Rfyd*0ZxBG7APUG*cAM_oJVhi!tL_ywwB^#v`lsNc4OJv45$JsD%&% zfDLg~s(=>;Y8B|xK^gSz3CJ{A<`P=?o_WC7|I^C^gbaZ9SG-%P*EiVJ%{Bo4XIIFE z(Ah`#z-pS|K^0U=5e@|=yi$Bjno$@0k!;SIbhfCZ#s?p*RfI6^p#HX+25KYT>D(4w zo((otzW;St%el6j@2Zd2Cv{C%4 zL!bwQ$+^WlZtwZx)u2`uT1o2r%f2PesNIGAsoKRSd+~FoKH|X*fIZnow++25pasrpZlVge?S^w2 z8Q+bL#GJsqyGrjtZ0!Epztsp2eBv^DI~$wqzJqRX41E~5deGxwoVCS+0NpyoCZ3Iv z>QHC$L+IXzoAxZ>iKtH4gV9bQ6a2>f?<{>7jHBekQyR4WZcX<$XZI38d~lujX4d z7~NjY??wfSa5fh2FPjmJ%H5bvU8>~7#5$5XFd{$JNpmBN?^troBkkC=p$U75wU|3? z;g9<>DNC^*Wik43JK z%eB$cX;p34DjfAp%wU@QidGO(@w$8s^rJj_h0$?Vjyms2c6d2s4;=e5^?qT*jxfTs;&*yY#j^d-FMKoeJ0Vo+PojF7tLddGMNh+g~ z>va4kYb5xiUSc#z#&(%!#w)a-U_MEIcR5X z#FQAB3?N;n8QDLmimvOb_UJ=$ z<=WrV6)_+zi5^QRoZ%%Js}nElFKi^~Z-Wl#I|fYJDv~fnEFR=u6{`LOOywBGYX5YJ z-3h5QT%oVphcnAqL+IV!3?pIhBsB=A3(^1bE!)`%Pk*>(t{~H9Eq0zqFlll$-Ak4I z^gL7hQP~pHFbCXjc3;E%xB*179PFU+Be8K7Dg4!XMqY5XJ+tNO1mYqaZQcmtv+Kk5T6p8+b}x03~#D4&ngt6-`qqc$<5LN3@iZ3?7Q=theg zEAYQ6%Sl@&XJnjoow>#oUNN!I8c2_UV3f#051xK@cC>TOaYk>n+Von;5>&(vdUfPZtdGCV% z?pm=6`!Z=6%Utt+Kmk*z5h~ptxbjl6@(fuH$Upt_)xK!>$n>lSmL9LS7KHA257Dw@ zWR8PWV*x4?4N$BkT;mmFEI=?Zm^hZ9YM_iTeTK+IXE#V{y2fry{Fz&&ir|BdSNj!- zfbrpT4>~B0b3}mV)apV}m;p-4QM&o37;*Edy&y74GH)GqTu^cDKESwjObfOfeK*sxY z7oARWOO@aymz^agOD*EDjD8&+phmq{KURzNbZ=~-UI|nF4eU4BlbWpaH>~bXkpO za6d2vgL9Cv-@w^_wb^*cj9oH>jRGF_;h8&R05sN7weAJCI6C#!NVLCEi~c9b0pLU2dNZj4@Dq8xvyouNqjdd@OSIUg@HIw0Lrp1ZXuMnp|%HVK+L(TgWd&y zop^(R>mL={tP^Du>`eR%3?FCqY0BMJu{NNUb=?3(PH7(Ckn980MM7;qeCfS;@S5@L z!|2at$?GUyi-jE3yYO9v-bzhkcCjWUqEP=PY%J(1G3;R>r-k<^-16$)3}UmIddIe{ zZ3D=BxcvRAh{OYNW8XK5aW7`_V!t$aOP!;K20lW>X2z$Sv z$Ub$#EiId&`F&H{nB{XU*m*W-&d?!xrvWtx0LY6bXzeDlJJWwqDq%3bWlW_V?N8aMNF11#Cnz z*AUC9$UL$x#)>}gF48!itvd)Kj}kDaEEI9ze>?UDFW-`Oa* z_X08a{e|;%@jC!5wMm1(J-5>4`dyki7~?22+lX!S*~{FFY0uO6wNI7~6{uEM~g0GNM_jG4nk^4tm)IE@Pt?d`azi zT^W8p-B?9%Pjf@C5vH zxr#bZ9;3asyaoKJs(TK|h_WFrNXvMc_Mfwde3M(8>J*QCd^pvVlII+oj_@9sS7ePK z%m-z>0%+(s5v530*U2Rq1>Pk{=@1-z?|GKJ=ejx_>Y@sNiA$K9yvqR#aonjYhPQ7dKlQK5`0zk!t_|)8?+wzBeL^@uh6d7g))Ku3A(t#9K#C!f}j1{^MI;AX@6T< zqK@Rkr5x8i4f!nESvBmZHPKsGr%c};-lDH1wBPR&TP1gMSX-GkigHNEudDJ>-mi%~ zx{i(t>a3`~oY)~bRr6a<^AbvpUGW-CL3W0Al8}Jk){rU*ZcqXvny~(fWIVz;ZJMoT zUkj$MfX$Xvf8D`xg)AH*>o*tq(bjnpgd?81vQ5AQXerhemR?5RTv1d^13JN&OpefW+D!0)IP|UmSO=CsA+h&9{5`1zrjX-X`smm$vD&F znvvw0l~(?IZ!^hBsGVVl&(#DW)9oNB^Z0T<@~IqFqrtZxBkMh7)0x}?nXNAEvKH81 z;zf=$@;u+Pm)O3zc&!_KLcSghUi;p9X%q!?DEZXbPOuTmn- z;9!|~DOkfwIO*3rowK4q?iEu(p+TlI5fK{(AYP9#X7tRw(7Lj|A?UoF3Ha&AZT*;a zTPH$nAu2SPqp1ExQ~aCk>O4&y!Au&7$GD%B6|;yciREOPK7yue2?tsTru8CC#|fLkqP2#<^#fbtFOE02gHpG(FX>wwPT!>lULyV zOkg0u`5XV9oWK(MDlCFY2P1Tsk?cd3q7ygssjw}G~DCNK3ec*b8fDsDtHPN@v&LQo0hR(he!MyD8^qe-ZoPG53X& zV7=8XPN&zO%D4rOKuvZy)`}ZOkQ@A!UN5DuZp>Hv$=AZR+Dm*YLQUuEjcpubYvYe8 z3WqvX69h6t>UXUZE(Hv18lCe1vYi^yRa2$rY`a zT<;w71)XxYn8Zw4RGX5iicz97>&v_H+6@&C^jG)Y%QI8-xt@C~n87zlbMtMdKJ^Y3 zO_wuysrsdJ+O28MV3(^7z3{e0BiSv7Cti(y?Y#__?dPdef54xQ%Ee z6l14a>8QdkC-Oqwm8+Zl@Y9fMM=8MN9$HIEqjwgnd^gZ@6eyEG_##)Cz=EkhWW`$; zBx_Xi+Elitp0|{`Fn1sif@cWQQ98;J#)ad&g!AcoB=)E?Xt}FNY4`Ni%G1TXE7`@ebnwBinOha9fVGkHfH^`L9bS@tae+v5wy#b z5t(qk!8k9XdX2!Bw-S<6q3GNOPZaNa+DHDK{gN&#fK1FIDAssS`i)6L6*eFMtwkNe zO9!QHZO*B?^9-2gYu0tZUFJ=1=`j7wPkSK*=gEPV{uPO8(ZUod=Jjn-bNwl2p%w)U zxp+^f_#|S5iZ0JAR8`6@`HP113q#RxNiNm9pn(q_wyC`(V?^i8w%hzE;mj0AQZ^OE zXD%E-gTXo0ebykdKv&1~-RnZYMpqyi^47p>+j8v2cs%-;MS0#39}!+onvnHzOh0+3 zRd(BXPzlkcV*6A#*NkBIw%Y_zodB+J+Cf1DMsjMS#9%E}tmF=!7v2Mxd(Pfxb~o9N z)d^<0RSeR#m{XN%@Wpv)MH;TiUGrfWdN*5$k)*e#Et)4k*cJ7KA0h+Sg}%a!fLwBd z?hbshQ`zIsm(M}I_tL?J8kVD{s3HAiQg z*a02J#DOpzpXO7f<*=k?P=bD!1qi3-qFaPRiW56oWyyD26chGZvKE*UTv0VwYbuFG zkvX`c@Wq~wH>^i^p`?yL{RNZK4tXkca3IOLiQHL80#xiho{>Vg=#DYX4MQ0<6t`w$ zXv&_WRO0aI?D=^MrScBK?reoJAW<@ff_yh7P$cYV@A(!;78okT#Co66Y@^VAnvgHW zJG#Ir4=HcWaRSp8Ft}kuW%`q@&Am1CZ*$rRd7<6)A_Jtj=jt*1I&;oIot6~n!RyeX~r3^ts#xDi=l6zo4l3I zH0*EluFUW?cC{;GrX3$9f&G;lX3{73UqB2*VLoYe5Z1!7F?4cwMWNh}k<(#_%Rv=3J*X?cgCD?Gbs6`9RKeyPLM1hQ%a@Dj< zWKJVoXFPA0me_6kp@Ahgfm?F}F^8aLY`%Zcn6BCKYviqPy=U46vyB@fxJP`Dg!tu7 zUJSGy2u{A4Q1;=({XldTqlvh;NsPoyzTmmS8%7;Qf>NjlOtfe1Qk$Zj}hpfM*l z%7NE7$$xUq7fKd(EoE-|_4GNw;`&xl{pOkxcL@4Dt#y)oTVb+*hl5qU1=TTTJ}Kir zuxr<}(5YpE+dL*=3!hF0`NP~5vQ`G@nd9W8dTQpB)IlYv4f=7YKZsr&0KRh4>l~n_J2n<<`5%orGO}e!RHzh-Cp=XF`Sab@2 z1w4;mB5c|q!PzQ)A0j8?&M5RSaWrEL?*1cN?dE9hfsc1MjW<;2umh?;n2>Ww6!~A* z;hK1-VqAvB0(#3KWQ0KYFm7UC$hyVU!lRnxhygnvX&o$93ucaEN6|gYso7Q;sa)R|*^OFzT@m9;>^Ey9!mN z7=ILh!K2%tzWzh^|9|!B|1q+H;^85n7qhf+Hg){B+88>UikKSzF){t${r&`OoNWKC zr{7jZ_Rj$WLieepIzP)AQZlSXK*<9u;?kFB#^iVrxJk-__6dPfoI|GBn@1{>7m}D>B@J7cUO(`KZ&0 zzzeEL^3&+cX@_-Qy8BT!x52iW=Bh!o6HB+0;iBSJfnDWKMc8!#9a5sI3#ZQRnI_u} zUwqd$4}8mdQy85V-TKosZf@955(}TmVp>mw&$e&fXYIn){EH(OF0BazUbR|P1GxJ9 z^lz6nP1X;`Va(%?M}y2l=UvA{O~dJ8P1ASHcinvABdRvqFja%?^RlqBel!pR{2*o# zHgc%b1x|d}K8%$K4@{aQzy(`&=kvX%`Qv@Zq+jRH1&lg$k;7Pg;$P}CygcHU6g05= zsZr~p9Uxd(x(K5Rr<^#)bF`46OwfJM6SSezP@`MhSdEXKVCh}<(PoKVc5&SEPqg6A z9y=rSQa&;`tBJMv<|CKvfZ8rhq>q$)#x1ps*OY^;L(qff(Fl1LlI1)HKlL+gWqMLZ zFP%3GcOe`?|I#IC3JZzyG?ls3liZ+AsdBj4fZ1ACpw+F*eqL+DuLY*JV%0n8*+6|R zINx)Ux8lXUw3UAP9Pr*~7~c%hJ8ES1-m}R)g z&mczWXX97X`4SN!KGP1oVw>UuYsMkISP935n}U1Ma#3rvhPOaOsHvJZ_#BgKdPoO6 zzB&77kR|JNR(PB!ySugTamFvXFU;a|PQAH$4c_@%IF(~VU%EIi1bR4cHB9AY54wFd z+d0y3E)TpzahpdzNzx5nTX99@#dj^WdAk9RP0OiYZ>m^Dw5hf`L-I=bIXEVZn>Kr| zz@^98N1%sn5DW}b5wQjg2~Y%)XXFR~4Up*{4J-hg;3WV^p+ge_t^_VoGznTj2JG)_ z36YNlWs8cDO%1?YG`bWrOF#q0Ery;!GMC_=;3)E!e?m z$Wb~2=z%elFE%o0p8=9@H`uO?U+cz(U)%ZBdRaST-zz) z)#1YUAipFUR#RGU6!UE&i?Bc+6MxV)#Y3NRhF6b3&nx>)MO^6{^VKb+hw)hnsc|$U zE`WRp5y)MtJ2)^j*;5kK*7An4tup!I4(IL5irjv>8*i%-t)HGYNM8%I_lf4M_;4>x z?jHuQzLvOO3m$iqV|!_uAiYcuk5N4|ABlIfBWNAh(GLr+b~$qug{%Rfimfj#({kI8 znooaOIp}n|x>4r+I`;z~{zLt<{txS)otg8$*T3q21prFd{|*3G2_V&AlT+b?I$PC> zWj#X1ZHf4N1i0_l%X?z?^%npv{<%aW49OQKCzzX? zhZ-(?8EzhI8_u(GcwoOvmERC#^glM9WIy|F z{v@wwJiohdVV4>3XwYfey0#QN|8#L;uzk<$r9Aw2u*h`O6R4`HXDvD?XZSAqrV|`{ zk1sUnfR$ALW=UHNyRs`GOCLLVhfryOmBY}cGD2+`Y4@3wl{2)4)HQJw1`Z2iK}R7U zL!p6N29A>m6VuHJ7ldi)xYc)vfsT8tsC=(KpYj2Se|L zyYBTd8m^u;G-^ZVC`qg)$qX^_VVL`N^>Mr&!W#5w=Ro986!a}1q#8%} z9Lav0i$78U=O^@y<%2wTb}Q#NAM&8ucluj28w2HIw={JsCd$W7_0p~wF&m@A#~#^G zm@lr|#qE3ZSQNR*Q~tmvpSgP?8CC1C@L-+meD+vm&L_|H;IU9Yn&eM+wn2JpYHiuZQ}UOE*5ehpE52=$tP(st9FN81QE5~1l~)qu-OCYW1ANzCv)`!+ zST-xw|tki0n!q?RS>@bfj-U63D?$ia_A-jHD{xw__=+T<%Cgdj1<%-^ML(h1s4b54Ku%0|4-1x1JI&p3-qHXiA_m-+In|Oh{1|SZ_h=D4#u-Fzki^oF1q<0^$q$L6snnr+92U zf*3FfeoGyy!*-6IRM!|x0}*VO@Oh~ZvfLaZJAP)&Ki47}AX`!-8-hbo&R0-aC7%8a z)9D}xEJYFbE%FWXNwEm|R>^4r^0Yi;HYSyio$IN69y$*j_kHVZ?Np3954+mOj>S|o z2#?oO-1p?M=y8+h{D4h9boXRDyw-#1A1pFwk0xk*@?;MlO#fg}73!lNbBs z!9094QOYAvdgsa5Ivq#hlc#+3kP4Yqrt-;M7=E6)ON;AucF)>F?dJ15F23FN3YVwl zbG{o|j_K|h(cSE)`r2dW`!?=7Ylrv&ED|pD{SUCP|4(809|z0-fkluI=i_$uP~85% z1&ikYA1urNcd&dhm9RZ?czGJp`>0n>_uVwA_E=@@_a~~`HuJBVnfx#I-ZHF?E?E?Y z;1=ASpg}k81PJc#?(Q0#1h?RB!JXjl?(Xgm!ToNMnRDJbGw0m7|L*sE_bKSTTUPB_ ztE;+dRae#Wl$qRZG2?-FtaxO*G?B-D=rR0aeIqy}&0b(LQc)LcW1K`=IYzdN@x=OE zc(6!8PQbBOD_iX}i?6$mCP_~+?-*NSvDnx~si(0I`_<4%0U_c0W7+!~(hApSJDg#? znbSrhsfDW<>GXh?u7fYod1Ibd4|$#@w;A6Sf`2QPH(~j$STe4kEgTpuAASqVmT97+ z0e^B)8EgJl9^-SKCma9ZeRQr-1H7chD_g>N$cc3hdHmqs9jsC{ygar#jR9JHPp!{@ ztb(33oSvB-aBXFOW(*99L3En;li)F80m7P@fdYs%^{4ty;jq#71qBaH2lR_>_3exi zz4E`XRORu3!lDW`294Q{eA4b~&{;NSWKxI0nHyF1CM+Hdqff5BcBg&Vy*(8FPqF+( zu~55vUcD)n+i{+nzbh7;zbTg9K5zK2^r3aam6rUVe<3UE|E8>PF#NM(S>i-$K2oU& z1Ucwn;Ukr&g9pno1mm~@lR7^yi|F2l2{IC_MZCX_tg=Y4`YsqYW81*Rg%{wG;l(@Z z(dHe&bSV^~b5YMHqm|0CRvWl^TDIb{r-di8W^ioat;OSo&X@0bizTz!>SjvVyK%)q z#uv5%T!8XAwi|0->^^;!hYjjYSyQbu2a_FG#DrK zFySs&8j?Fnr5xb1_>K}(5quuS2Vxz>wGDI~6nr%sG&9}YEflH(vVyoBbbJ~lu0|aT zWV)Sk9YmIgBv0KCYnnl`%U`5WUNfsMmWnPOo$=%7c!3`ASEXi%Zkm9q=_S$gicDQ< zNOO?!E;^8U77!@u(aKNzn-%-x$`Z7w>!7X~WLJsHOo+*3nfw)MQ$Frlqsfrd(7YBn zvL8yS@<6X~l31)5RDReb=XJHFo0pd5FF)6%(FKgFXGJKIs)EoLQdv{&eo#S6_-aGB z8)MThuf7V0O)8OX%N!k{UQ4ULsaXohTdok_hS`-R5xMxTIxSb?XNE-24%KKepzYJC zZWqR>?hw|J%n@stnOrJx4;MkT<{)`bdj`SQvg_)bI416!lGQq?TdtQr&_ZRLJ3tyx z6AcPTs)se42r(!URL2&)=l%iC*2N?~XmaU3Y<>44iq&6ng1#}V=IcF>jJ2(pTCVpk zpSufqljWE26%6NPA2s=9K5r9@7Lf^W=>n=8Z z;rlh&xaTvcn>MzE*Vf^eTcw+}x`o$Oqt{2yN51~!*UX=<{-ZC5ReHmxe8>#unTax8%tffiv_LG}jLSfYtQst`{GzD-oNG>x-VbNU00H zGSx$Ze->s2j=u=AqMI$yhbE(MuHb0(-#FNW&FmcZ+7L z#kB0z3-vF{*FA&fLi+KV(w9&fMzEBM8Jv(fHq)4da^&?kvj)h!~4))ctGPvAkcR-MX*V450CH!v=T4ohnJ zz+n6yrJLROeSG`)$30o!odPUJm?Tj~A1E`bxI?!R3iRR3IPsj`5^ZS3YJP}b07E)v zMTipfa^^c+x8RM4&*&KFg0xr!JyGr8fk_=wb|qlne&{n~T?M~$Q+)Svye?26&gY`V zFAJh%zcLKPkf8quH~>OS5!wI+!sfuOKZmrm>|F!QN1RL7`5_BXkzm?wfUo*GV+h6! z=LCi%&y{Ixr@AQ%m9ybJUppHruF`T;KbvUV!V0lhTDv`T#_II;B4pW2%gZV zq=xi7$REE>!FK48Bb35`3f*Wo?^ix=PpbLyxWrf767o(~)K3UscB~iOUXI~@$|Q6B zNHnYBJNt;{?1e+&D6W15`t2YHy%2z3DV^Kv(ybn&K7nkZY3?La^1jeXtCGZNUMa0Y zJQuB$Q!iJHs=&DuHbI{B@aqoo^-pT$oNBp7T`AeZTl~t^9Dc`c&IHI6WDCKN{%K@t zg;6g3aOsA_pc&Sk+7HV2v4Lf(#~Tq}rbLrqr;tP6oh4bTS}RaB>N$O){@ZrvbxKM-5Cf*qA}@F zoPAQ^^#BnZ?M+I7s!(BR(!I}@9`Tyw$BwT=dYJbKYCjVI?@E5dJ4d?+Hyvb7E zhh*uzXAMm59l0~EF3luS4W{h86b!#?(GQS`_@l=z4pP&^NHMTxC75@gLn0m%&!^jT^^}<@O~7|aJRJKZWy4jGxzFQtq{EVCEpE&l81;P9*jn1=dlC~hCybb zYE%>XKKrf?m&HE4Vs0zc?w!+%t;X^Qg&U3+={K4h$%6}(seJVDfU5(G09~k%zNb=; z%qQsq8>a8xmLy99pS9!jEbSe&&?dmK-Sfw}KIPyA=9T*(r{e8Y5i6-%7ITme^?M%@ zG>=;iJQK>AH;79V%JZre$~(!L!-41G)YpCzFjR>Nuo<74TC8WrYdV_LC)87!@aC42 z&MQ0QO|4RpkrC+8#?w;3|DyC!T=5tE9WyC9JJ!c-+um{C**;EB*C&|;-bl~EzVJyYC)SmRh|v<2rmIcr`ytXCzK9l@NPOF_!m)TdhuO<2y^6f{y`Z_x)qVp0aaWUPOeYPpK6b zdB~oQkm0KCWp>*`j^5LWBDSL%F|!?{u$w|(wsdWm*_9S7b;1e{TZ`tNLjg+dMUTn2 z@Xt9nuv&qiP+skznUyJw*bB+-)uGYS=>>^ybr5{3I~}Cd0+Ew{yhNq)hveSl=#{=V z0@j_Tr4`UnDj!jajLftAI{Iy(*iK5q92XNEhSt#h^gsxi-BqrJ zHh~0ZHZ85(hwMw1d>ULzkEz^TL|Rq_ev~G@9U@rW?pfg>F#04OpWw;W znMA;qYUH2Y#?^?s4b!J)6py&J$xi;|*po$6Baq{?3T8){uRJWa1h8@z-aKVQRZbFs61MS3Vre%znq zNQQ7R-eDmi9{Wl|$u+A|e*@E+WQ^;%3*dp@RdDUwbK^}zN4mQ7;2yFmgEAs}A5mk^ zz%jx63ML>KN?$TP7B+H9;WldQ9kHQ(i5xLL_2L@cHT6QOW8huYb~#XgVsDL`@OXE) zzPW#wpU~GA=vTgG!tQ?3{|grZoMmd+l)sKOS!E<#I8DlA3E920iE6^JE|4{;0wL+L z>Ujzg>U-D{64S?zoxx|HdqTq%vkhwZu__VLS`zjG2_rx4S;m+#mrDt*z30ssBY8hx z@g2dVVnoQQ%jkA{g#Jt$B>^G+dl@T2OGq0SdR>w@HfFppk+uEe+|YB~yo=SS#<$BB zfB&tP$ZihHd9^IojGt?PRj1airJda`8WpZ^ANL4Z@3X;^bO(I=TI+3^!`hGCzDQh# zI|f{Kl(=G5uhkkCn_isGJa!?Lf1qg6`{Il2>0jJF;`WG-l{DgOv+|{)XVe;hVzb#2Ipw)EDA9ew&qSed)5EFHIQ!l?Q) zehI6+K<8qHi;d0On65Q1PO5;p%Yy}1SH|}uT#nQY9hS2cT#idPmv`gh9)c;lriNZz z(ScIIHWnFbm2{6kh9iHqZY5sEVXrME9?7XMOLrf37pGaU*q3xL)~IhQ7!6*zks+8< ze2Tp;8F4#S?l^u}<>C`HK_m!@hPek{^{yN`95oPtha!w4lT(7t_fn_W2{ zqOkH;l%5=l2d9x2iA=3V3(cx#GY1ck+7^@Hx|Ek-xr<1-OXVS3tO?6gZ>@9zf2lA@ zm21aiz@BO40x%BLLH$JeSHiIvz1iuqREi7rl*`f-F3t9A^SSnhr3Vu^J?+&VwWa%Tr>5A~Q8yG-_S=ZAz4Q}#tplA!zCwRrO)X1Pl0mI%YhA|iRRmV$SUWF?K4U1o>`Gk*MM^{CjY%CLZ@gWvU8WdD)K7K6yaMwiPY(N5EE+~jiiSo5e zh&ijNgYiz&i7}=^;xb4!LNc}paW53?*RS?x8h_d|+!yRBviJlx@SPa}&(I3?eUqva z%cMIbFt?*w12?hB##hxI)_i^UuwTLwRmb zy_XXsg49>N8TP&9CWs*ej>2+f%^6&S$ueglDGSfcFWJm~^u2A76&co)u9l+I87O8u zFS<%34IjV5Ao%WQ#fyaY{n|g-QELEEjT7OE4rJTeKzJ_s-e*ZK^N7^;F+vMnq;7f( z=kR-EPvw}b%#laS$B45s{A*Rbx)%0glWlb0;}3z>jSf>ubEbWBn@@;xfh@ZJJg;E; z>%8KBdR6~_x08VD3ooKgzFw->HZMYcUF_w=Ptn9XFx2Xu40im$=R2hWdig={lD^i3 zGls6pL>aKjXkm6eX^<+O%s!(P*895tju0#_jUC9VOjqwR4 zlcYT!ga!nv{Sg5Kk?0%)0pwb7NQ2vl#RmcOy-yxwLCH)fNG4=FD3s43GGKGmN zhk#o=ZWFd8SJJMsBudUMKo|jzxMY)-$QY2CyDID#2mzr1kOlK0a0YEJ1!cFjkAM)s z3=|5iRX0xpL2Q4d$?DrqcN?bxMerK|x(DE#W|=dE&w=g&9KAFago3&L5;Ok>9M36c z{F`e<3n!kP)3y^wz}Hz=ryP=wcdlYmb`k)>hU|J$4?#eN2Uz^%FYSIJHI#2ZzB^9; zv_eMRGY<#s1SpZnFa)A-ghX~{N9dzV`nz^;jB82UyhED}F_@l6HaMOc&LRbyu|yx# zv6Z|}HyE^Wqpv?;>4WR`P=;NGzm9|af^aE$Dg~gHA_tVj!3Pa=D-ne%<0;@z4q^@N zVUyKkr_0Inrpy^QcioC~ehRQEhc+37eUg~%Cd%LRK%iFi@MbYCyjz@Ex4du0b5-vH z)T^By$*yT1X6Eg*8tM;uYJ|+i>d5h0DCuLkT%fbFPY&WH&S_QqdE;5+c67vfP^^>n zwPp{Una}$E0{a4QvK=5Hs&hkbQe%`>|I{<+A{ERYjo3A z{`0kEvgf9TyOGag&LE19hCu=MGbTM%U_P8S)BBYj+^>&%(_9@2f44BR z{Xb#;f5QC#BVm^HJOD0nWB9j~ZGhk2Lg(1o8UNz(`~R$FzU?`_=Wb35up|He$eg+= zKPoarqPK}CIUeu5>4dLdwL!eAVocvPFN4Xri9HRQE5zrR zKZN=0+(Zh=%GXLLV|UBYdK|hVKkS{`vt{N^5s(pjA6XbV#n2k^$d{NK*LS&bj?p2| zuU~`W=^+L&b-Y&BM7c|d;40zf&i6U%Km$;0z1Fv-x71_C)!M61wIfc$mZF5*_fpPs z;yQFW#8M~S@KcOWoxVt^QjxP~5J^@8gv=)dMB`lr2p-588wj^gy9P7Jv;wWo1;{K2 zD>yqyGEU}CWjHV&992j>kRp+8#ZY*EoB#{gb4sNU>Z^Fho}Qlq!adkHEHqy$Lh5`H zdu-#~abXBB&@6jCA%M8gLx9vluD1_xU_fi(Vc;*C7aG0e205$xN)Yp9m%j>}8JZR7 zv!|baQ_3ACQefs^-;}iWhpIe;UGpocDaZLah0>Yp*rwOosU9SC>gn$x;Bf0?EIqV6 z9@`yI4b=)srWUjG#^Ndmb9dm0Z&UK3gmQ)`F(7P0 z?0s^fU4WBSrH+sjv52TtVG4|fSFBQ^^zH!ce2JiH+i6+Po5Q0=x>TdGWtvvOIc6DV zYGCUrVNU9vH+B^kySLPiTtHYOgaN~@Z6QhJpgyNp zL-kJ$xnwDky?CBU%-yt5@vqvsv|l&F?^`!+C>QknQs|!?{}>%iduv6dt>7heU!oMO zw%nt#zAj2_qpR&Q0sCTC5(f#T@+-|J0q-${4w&u=HHdf&Nd5kScUE3{2r$}H%8_?e zzk%g(xW>IjHnV|S1XRYOFSq26YY9LP{@}|k>*HF}vA2%ND^$`mq2i7A_`+)k&~MN9 z$oKX5b=>fkjeKk}whsFseyX&W&xQMQoyiX_83lK_{hI8AcQ)M&y-WVi^MnR)T`lk(8i^{< zs+e5;gr$-~Ek`$sIPiF?+yuU`S{yNRZOg{kD@E_umI0=5BZmMMkHZa&mO~9}=hI#m z0(nff+#HVND`X~rWYY@uV@3EP|5)CA?R;{PxBdBS!H`myC{MtwZP_p;FdE7cv8^i) zb7SozZvc0RzWPO!L2v6JeZ&)izI7nayk6`JoTUoOZd|a*XaigwG>h#i7AEm zeDB6(uC@ilU5{I}G%+BC`sj4~L>4T?(|en%zOAO6wDg-x0SX6)& zk&hrhUY9~WdM}tW<=GWSS3(`~wR8_0nkuIq#!5O%GBBzi%BABJoH3;0^1lN_!r@I?Eh|f=|6Lml+AB$dV)P8mUqwm!%cUhIe4|D z$)+_ben4*OCISmQF4q~uk~fPZ_*C9EHr&kGb==ptDx>58&gZdvjM`|-fPQyB_Ve1f zD!VX%djAFClW($flgf==vNb*X8 zdKUPbo0c`oll)?6EzY(}o8Q5;^8Lc+URKwQp6%o)!GX}yP}UXqgrwi_7>Pb{KJit4 zJ^4nr<%%OmMe`B!rl(Q6ylrQ(;^-kXt%MAr^ZZWqqa zJnwEC9-kEb0nXK?u4byXlI94v_tq&G zRr^_l1`hzc>(=WR(0Oh`fq(IA`gemhSvmeou%_bgXH&~1`l?{=O;m5dCrNZ65~SbE zWL`4|o=PVV78E7;>a{n*ZdRvG#lN+ep7gKV2V~w(6alI+M;#ROs`r2^h zpOTVYJSs955ji?j)_R;y0QBiAX|NS7c_>&e8aMygmeOd+uy7f8@SYrOVL35@b)7yb ze&jjm+gg2(`e3%*!n(zE;=Lwctvpw=Ijv*O&Z700IovXv?Bk@dw}RX#(=qK(d>}NE z1+0b!C;O!zMzR1Hsrej|G4mb%)5XmebK_PXk7I{^Rvvw|8%`jivNNV+M!+J1)MK7? zc@gwV`s2$Azwcy+eWAHarZMx;*6IHA)uZg($z0@WVbq-Pw4&CD^mCbgSDB|nhrm&; zdh8CPt<|&DShxI7K23Yn!5Ihc;b9sW)em&oiJ0J65|v^H$N>XkK1e5mWm9OVK4JA} z?I=-LA$!ZB6sG3itvXP|#cyz_fUn~4|RT2A-W+(`GfRo&*`cmz?Ig2 zcs(K__x;nNc(XvNcq*~B82kjMiHQ_+G0PT54$dwcvM}2)p>UY@hIB=`r`i`!@)t~h z;h28huaCxbskRe_cow@lFr%s2q0;Yq7?edg=79NemEL>f=npPXFP|mC?n}C{nN_=Z z1n=|&2YdHT9U|_T520}^duvMi%D@95hoHK;mr)-E^vkq{ z!iF{Mx&23{9f=<}9eAB4RKA1iR( z=Tq^1IN#W`Z_tLCal_irTgMHu0rhy50+vrxT)QsVcZrbhEqzLz$fB? zFv<@wAcJDN8*<$CkADzs)2C}s_IX!rF+lmLiH@NTT0_u1_}IP&qz9a+2~7{JD>D?e zvXx!oc>!@7?9NFw=bSWF4&~vJ^FdRvIiBCxR-{iJ%FBvOn^>A`6sepvz%;F@iu~(L z@&%qe!et8d58PeE54~-+6slvbB`v(Duc7=u#I_MSJ()12>rELWST4)KO{6D;D17y? zihrb9JX`F{w>j*?J~x4RXOs1MC~v~!lx(CEKUHLHw<8WQC86Pz@S%+x8*l&;r$Q1q z`aE!)z!6xt)MpJ;`~>}X{PpxtVV(^Eb7e-tyR;nX{3eGDQ5g@ny--YX>b*RePeA^E z$VrFWW2Nn8v&|hw-%H)%qp~&ncGqj7$bFSj%;$M$YuBJz?tjP)@2GH&&|L@8`kLo7 zCsB0fu6U7QQ#bS+Jfnd91W|x$U-vI$;NOm|{4WtCR{yge_y-w~YSIGgfxh8pwy|Cn zK7uzrpuJ4mICD6F-!%1G2IintRDd$@z6}5DO$NNaG%G|3s#vWQlwC?ynx$L>M6+34 zI~T2R9KDKB-!4`s0A=9FZldgN6lQH9sr5%?kqfTi)>art_X(^A@5L&8ViNnHcgy}5M`S^`65m$%x<+RR0|GJ zTeu^-xGb7r@(_OufzlkN8n}&&=j$#J|8F5!QeaEI^>VQ_f3PZxyd67hQ6Mf4-!E)j z#DCP<3D9~x^!qj|QA0Uow7&R|;X)oCo$$=TmZizl*EKvG?ZdJe?d!(xQ;C)mOQ4rH z3=KWOyUi&D0PPQw0Hpy9&T<_Mcl67{Siqpkip@dpIS2!d0sAEU5u2QZNHCISj{qnN z`zpf_s;II)&a~J-Z2(NG(z6hy|E^Vrm6-@)m_QPykLBo0GHYT|#Mk8O0n+w96fONL zqqu;#wnzd?w*|H#a1k{cr}a7IP4rF0P529O{h(P6E(-zGQtUC}8HToChDN;?2K^^g z7r*P{sl_9v3l(@ADH1N|+YwFnjqCyT+2J;nR1P7`H1H6;k_h{<>!rcUwKpMH_2rvD zG2o@PgG89M9YgY(m$hFH+)0vzMVKXoHLa>l#Tqv28A>(a4d?WOOtGUqs-*77>iE^c z0J#0?^)u08j`71~Jl zSWPwJ)(dQO#OJ+38A4u7O?tvxekUBr#*cUJH#I=kyr&u?E&d3>S=}XB$c!LR2yhu3 z|3&5&`@b7B$-?qqf+iJ)B7muj&3n|%P^7z29ROd_4_{-UzOx?>n|=aD<1bfyZx2qj z)ot@yS%IMrXYc-)P`tbo!E=AnaH+6@*OAdCegUE7Q1xb63m!?43n{FW={~rCAh>Iu ztaj&H72#j$C)PsXR0M zsBCPE&vbJ+bFa4^DZEgo20Yq_YT2B{{_08bR7I#|H32-wkCdKRTYUi?CeEqThC`sJ z%RTCN+LfZ{Zz`0f=&!^c<*Bl5HFjf86RPQsM%!T{cn7?I zui`f!MSf9RDREMXCji&_-0WB5+m~XNf}wRaVSr1>KFTY8MyP>7|F1mu41aO*{o7%E z984_#kwVMN%E^{QfN|9P=M8peD zpQs@J?{qV|x($fJSFvdU_v$+`XJSUX-s z<49?F02|FNj_vefpt6FDen#8HZL#WVk`vo28te)(9(rISLshxxt{CInfd&>+(&dvWmXh z4(VIx2U-4iWV*7Meu~o~(O}7mnKo$Nf{9e;;7Fd7MP$!EWa4?L9te;gCy~T_X@Vy? z;Q5(jeY3IA`hVC?QIPlv@Naz-%P7f}>rX{m{SG6vJjni&1otx>bAJHyr|kZKun!6Z z(Abn$zQQQt!WD*sW}nq!5R4FrXAA}1B<`4h<~lxvq-M)Z{vcc+!pxFjt@hVCiBfu( z(H}s3wvUP;sn1QCHIdmFg<#;b8ilac4$C-+w4u@wg`n{+ju=;2pL-Isn?jgB-CTSL zxF}{%e713afRk1r2k4i^0!lt$#{zHyWPc#CiNUdQP zcBou`K$WPVB-$ROk>3a0QH20-3I?V2X8-VY(XI|*=#4{q?}o;gb0ZzhId2Q+zT%_X z{1k(y5Meu~XF;a|VyZIlFVAOeZLiP3?D@9W744qY^24s5T4#|`8m@ji7;UnTjXZcd zt4g#x-#+ib5Muh#Lu;O|%FzmK%{*R(JLkF!U)$Z>5~e|W z+jL#N)C^;yXS|KG&jPpZ>`tBTtW4k2W1!Aj>wWpz8qigh-$g%iV}MxOWF@n_JUD=BeQ)TNVwMWWvJcL zp@bFQkOQWHP!GKo;^(7xzy8dazDGkzk{7&MBi_$3;AP>XNmUJ;(ee6UW_X`DXoEt?-hxAyk0N!X&g1c&5Q0-hO8y68lCa8`4k}n z_^i5e08(rna8nTV4CLWyPnBuj-h%8o!0jZ&Vm_XOq=+Q-)4#XXPNMw~V9i&oU* z<(VHnr=kOowif!gAs_W{PxMQd+F!DDx2jq6AjGTsLB+VHIqZAu-c;~^ zs^a4GNERVyWeTEjL#4{f#^yuapL9*v(qOSoUM9B586~rtsYSZRZ{ZO_J~?P|ca!}7-v2#?A(U@~++kqS zI!($Bh5aDkS25=!V+?S}=dB;-p!HxVN@qnWQYPZJJPlkE?PQmlCv z;nY-YhLYr^Z+ZB5O`Io;9wU5XlwvNSSa!(A0FvV&&%F!B1xpxVRa2y|UntmeA5?$a zut1kEV;#7>sMMzrT1YI!7w9rWp`M#lUv9I>Nvx#Pl6BqEiBcq5Q0aL^q3Q4tEnk-L z@?5gSmS0=iMlx#t?b{S2>w5zVS1VqS$68*;LJgHbg%sJVsxNC^_v$B6h((pGn5tKl1Om$d)x(FvUl}aw#VZ@Fr^0pSq&>fEE$6OHvZb56qN`^P4gAe0p4=ru zulL@kuXnXKFaq8aA`WJ1t?1+O9w*-@APgF1ur8uUR_`ln46~|xR^E8Ue_E9Z>-uFK zt|V4uFlE_)SDV)}byM{uYhy-v`0W6c$^V#r*{S$+11_n!DXQ3hT(2cGweGr4>bb8m z>uG;9PfZsqg8<(@a!@a=cIc#4V@Ha^b_Z>4w>HlvmcU1U@I{iHHAA`|4E5Ifs{kxl zR0eOI)K`>z+TFoVZCc)CDn~=^KMRZLFssk{wt1x~H&l)y2`yr8EYv9~lVQC;2bPoZ z0uV<$le?`PoBcQxlHp@xSC*Nd8|-vMKniIXpM#baW_8WS7c00N?}6)*AKl(Z+fJId z*hIW$6$JC2#LZ$lEll3M^S|(!+FN-=nTjnakFS1Nc=>gmYu2Px$abH5c6+C8Gn-Mn z((9G^0cuy>8^g0jV*CLv626OI#MEeVkOBXY@5V0LY0U5^7W~S(BGWBjnT?DUck05G z?fRr5-F~LZ`z7$j<4mb+l4yVOZxfxFVm}gaet1>9jGqC5AiLo=RGEYMV+5y=!bM_m zm?s`i$3(u3Y}Bg}GB~93p|Upyi^!t*$nC)&ev-VH+T$@LLPAm%_JO!d z_wpzcg?#5UpMTlT@n zq`on_%spTEh48nq$^$5wO2zsd@n#tzoiwd$5K5rcBj!R_Qgnq>nb|*zeKqPTq&jol z)ly7xpr_+Xt}!EKOoirEorS8`Rl{8ORZ0a%DoMW>p|P*QUh`S|d52FLALA;DN6`UZ z4SeB|ND?to8HGiW&r=nuX)y`FWwW(5*7&++Yq?mT=3uD)b&Y*UZ1bjreV4PZGu8mv zD39eJ@#terG!t|mcZ?k`hCuJnXq2&Jcxu1+of^{5F>t{{9--zjqxbABvuU__LZeqx{p*nm2CULpXinpP~)KI^)gpB~VyB zj_yE_cc0ve1Cx(N?gD)gh6wkWXgqbtejfXB4@EYGV84>8M$<^3(1^v8!NijeMT1L38;SXm z9jGC9(sWh(w*#@T2ldBU`hBd(QH@vh7;pcX(;ls_CSQ^3ssUX!IrXx0UwBpWg-EQD zlqxDmHQFc7P^{0Ej0;bgdSRm7^}yJJDOA&{mn}zPAP??mt|7URN8Wm@30bBFmohl* zh=s2;PfX%goWgKfYPJT4)W9M*k8lOaxdAElJAUCx5(YzNYJsScX@c~MU3Z7~ex$p) z6j2Gg9A~;qD%7h&w=H9Ul6Tc^k>cZP367Zlvw8Nj)v>N3Z-+E_( z`othZNc4_>m;;)Gp&O5$=$+8;y+AL{q#Cf~zuLAr(`nEB7I0{vE@(vTUR>jUsJgvv zI2ra3gOH2}^PXdN{3cQykVqlob5z~H|FR*iw^o}oI6%4z^LFmoAGyP%j%@#l_Vrte- z7!(4qh&SWN$H=Ih+(nFx(7?6|e(yRujYX?U>!p6*gGPSg+pIqr?P`&km`$pUQ!qfW z;g#y(6_D+^l9@=+c2Ua@fDmc6l$C5iChVlnn-8jkcxbLUD_*)wj!M4)8_p@ieksGR zJKn3ZnDdhxGm@C|`yvQn2z%_jtg0@9z|7pkUaeW*oHAq??K#Yy`_OL7We&-(slj5@ z1`)YG)}%A%Cvus+hT<(Vr4quXf8W&_Issh6R;8IG##3a=DCrRD!|L}m%>FV6HUf7L zFgE-~LRYkER&NaZO80|nXa-IHR64!!YrRk>a?n>+%1K8aFb=))vSQ0Qx08!>Q@V`;9Uu5;_DEl^LLSqnYX zE&$7rLZ-vKapOQM94oK+0G3`H3_^lHZpR^{bRmp0&j=)r>To2t<|Ox1m&OK=@C3QN zVM6H56o~+2U8<3_!V^#&m$44MejOnYUa0er+SuuYC|BU+Tdlg_AKDMk#Dzin_qZND ztnQuM#FwxWf;BmgYVL!(LQoLykTiKT7UUCR=g|)4;}7Il-yA?6PZ4bRCq4MCKH2Dw z?por|J=X$8oHJ(6qO{H6a$P&mq<6JrPb~u-W8n{$u_omha`u*p(hV2#KZ`%AT6Zs0T(0jP^!T94xqqK3a()Mhkv??lK(d=1#;)Wsfecb_jHYgg!uc zi5k3$D*Bb#bu33z%oZK&_yu{=4{NgJv*;HOQ6&wA-HT{qjh#`0B?@{g;uYMlj$%mi zb{Lb7Y{V^As3ocDi^i8saTLT>x5PZOhzCwTqX@7ekJ^a&2rtQaE@19uEFM&uPE@Xa zIU)L?5Jx4Pe>(9WFZA6rxIT;xknSgnttPBiH zj0|k*RIq=pc)MCsPtiz~$i>Xjl*kmAmTGTrWDG>RCD%y#?j~$M7vWkAv;6FU|ir z?o{QCjQ?deE-by+XCe(EZdOAkCOtzABST|0BYh4=MrM5`76V3RJp*Gl0DwW?m>2ec ze+iKm5Sz83kt+}(0}%%sE0Gov@PM%m@cY}%l>ZOMAS2){`B&b?#u0exE&NCxmR`Zk z-RO5CdKF-M01*@LU+W*@jqvBi3Jg{7+c&-xX}h0#@r+#_6}q1;T`s4kYmdEcyW>Ks zQ84L9f>1^M@K#JkKLr~TL;K;8(P=^pprMD88ajMJ@B?d<8xObgeAV_yEpji{UUBfc zT~%0NKA0N&A~j_pUv+6`5IZhN_`wwvl>s@10b79qeTo5}i6JB@@!LY8e?wy6y{gYQ z)vrCOez~dv=&Hd=s^Nf=4)u~QcS~_8CY-*?thuYEyJ4^&8^l*oGUxqW$lV>5-S&uq zBGX4QJ*~ti&<_MO-zo1C8cGh1h+$R=wb~yJ1qhl`Z?;SI-A)&4=^M zhkDLOg0Tk)vWI4emxUBaHt^Yby?^j$Oa8&P`QvYpR`GuY)#CpEyvoi`S(k+z4_%3ki7X4VCfZIy zTB+j>W(T+sc1lTsP>HF@gsDM<5mQDeHh{qhD%1jCMZ6YCZzrv=X(c0{2S==3@mRlr2)AEtZHa zTAwYR9SrBjNBUuujC=TAJB&SB0!&-vIy)#fTTYhp(h$MFF@P~Ygy!hIU}e9H-I~&A z!vrn2?LlZyQ&KXUa_=Kgx5Vlv4F7xrUIEf;@Qc)dpKJZAytlwoJ${%BpxFc21IQyh zA9l72sjl15Y%-%Qvwn*IGyLKdrMK}B8r@}h_ z*XSro_&d?r6VoQE`MQhYD)Fcp zFfTpB>a%S+F)VwWk3W^T9ivI>8Hh_Xy0+EFTlTV6&wrcO?MB$=rrSi8&abwk@nR!? z#G_kBNpJsJV`t!&I{5V5misy!VTQCtz#~ngw=X2tb7(TmE z=j>YQW^GWl1-Y>JTCl0V;HErxMwV_#@8cg|zQ-9adqFQIflbzVrdtq*u9zuH~)3bG9@iQ+HZCmxY#Q7#F zPg}0^(D4)D#X#E~8>LB3Vs$`u7cH)rJ&@c9ObZ8nu`YsB1MwJUxc7-a= z0J5I^kBuLHM9F;r4HFV}H)s?86=Kl~*{Xua_O{}csGd)_E@UHmc=P-J2Vh4IWes5} z56Zh+l>e&uRnf9H#6ILx2)=eDHgyPPpYBL#b0}6wi+1I%kcAL@&%H*8+XTN|0LG9O zscdRvk98Sz6?D~p<~&8bs#zLF8U{WZW}SXR=nmZ}VNY^G{gTF|{)mLfSm+0DR?Wk) zBR8aOtvW;<5U7h>+jr#pe|tFO89)jHP^%bl9e^CQF3$d+sseJ21B$u=r5MR&;BV^U zPMqu9%DDnLr2<={f+gJJmCr7gs{(FG!R9pZLNl~wK60h7?p&<70_ri<#?B|uj_7*U zi$P&J&5;08-sTZ=6H!aDC7})HheY9R zQ=Ne$$MwsFaadW-%eHslzWHJKt72GL?#mrDpfUYn>;>-!ZsF^Kih(23wa%+fn04+; z(QvXSB~v327+Gp zU;3ej0-qc&qcU=d`MkaRn+RThIS6_^FGj6-IrN`xA#t`ix*uFk9^_6*_T!LZD$H4exp!e_T#|4w?qu!#~&LC@CO0l4+g*=6u_YE-2Du@kTLy1 z27D>}yTKo1On;Cu{Xqt-h6X$b3#=gcb_vji6tuB+G_w9PB7cy**^<9c646T;8Jg({ z*!;0E8HiYcBf!SN_|{BT&mP!{h~>`@)5{w<*f`l67&#Cz|LNs_TF7rS{k;VApBwS} z7W}rf9e>xaku?6a10Z_{{@wxnHgf4jfp#)1kYb>b zuR+Af$U?^q927<-7CHuIB4!2v9UIVGW@Kie;{Zli|F(($X6nC<>c8KS&qS04W{yU5 z`ZiWpdJcNlX23ZKz{b|r#@^A%+RV|+$l-0W@t%SuVjcfnU$+oaD zH?z_M4mG2t9x%M~ABLFdpN1I7{eNSO{}^K6MClJ>0N9!67&(}U04%^cCQz^#nSiwZ zFZSL$z>VVkAEtzMfax^^&U{U56YeDKs?v!jV;edd{Gp8h-&jn;;u zFd<`c4aKyz)+Vf$ov4o`VntY^pkgp46RfoV`wr{86w$BM*qTgtR)Ey79jlLoi%1Q} zV)Yh~X#$k--n9p5JXmj|*OpxVNYJXcz?Y4X63Z(C(qv}~AvKq3Cr?R{7xa-(gR%M` zSQnfk^-*{uLJ_+@90s8h;X(qbynsotwFhY=S`R*3gcMsWgso+SlmtuPA#LgI21yi zBoA~$Tu>!VR@dVyTpT9Kd!!Imezsts~T?lC)5 z9}0pFtav^Ac6bz96(C!n?NAszmWx9Xg$)QSs+94<{VK{dspls_I;Ve&vzaAyD5N~QGG{jwE<2U%}Zs|oh=n0msFQ{Fb21OeVml;1o1F@2IKXyxUK04%qm@*u!5qXWm^(fgh*i5 zm_aR1s1^Bbs3F;&N>!rOT95`|s8#~87L2eEoCHS1mM71u3M~XdEE1K_+94ReWILG1 zpiCnyne9sEh|zXr+uAFjSUZ%c2k{p`jX|Wr{a^&OOc_=+P-D^hpk2f&Xd!_%yGBsU zlU+@j)?_CkwH?b-|^bO)VgIqTN)H--`?cq^uM`1;{}UP|K5JRRlE=tPd7QHB6fe+JR|v zl_$n>t?5ppRU7$1?Wy^2^H;)Zp}ZguNa7VGaP>Ak3kGOez@zV+wz|+pBjB{L0<=W9 z$kj1HtBSEvh|sXu$S9Z)ur(+)6Bvb1QJ83j{)U+k?O6PssD*=-{~bkSfOaVoentUH zC{$&?C$qECbEoBpc1*^*`SU1jf~2U%X(9QNRBG4dTT^x$K(2O zo-fTAmwV%{TP^)^=BIxd+9!`_mijxY>fmSHBo|nRITZ`6w{p=18{_v~SLvP7%IeOUf zXFN9Yg7>!nVEp6He6Z&?58e08g0Uxke(mHNZtjXo$EAPX@U6Ce_P=lDuE#araroZBrQ~rDCgqyy*;-RJ4WWN&+_{*}>R=sih-p5bh z@U)NqFR4%Y93_b{{|eyf@BYF>d(@ z-wc_zVz-fd){I)QclY+K+dXz<^OaXVb58h_6(^;`Q)V8}aMteUy>q~2>FsW~?vS_k z-fGpIQ}=si=j&FT**5yB#}@zfS2JI};P4?wY&P_ge#Z~I`^Bbp_pUvEgWtV2dD|~2b})>cXKmyw(K|e z?N7(QeZ?ow_w>JW;GUC`$IQCyi9H{mc+G*oZ(g!jSNj&<{o>%Ok6(1(Gqv9wdG|m6 z_^thaJqN%1`kT++u+7+;?%Ju}iiZ9hFaLh`vxnZcx^|ZVJAAh&{mkEPZwl|wJTt%K z;PLkdKUi9mKVr(V*{2Cizh zjK85LJo5Z~Yfc`VIcK}Zl~1pJ|Fy)^@3j5z+!dR>z5V~aHZphikk95mx%A_?yI*%z z`r8lJ{m(W-E}t{we>*<&&})(3zWBw5GoN|m+ilN%_}9PNaL$dhZ4$H(`0eeoPX(U7<*^;-9QV$Zqi4ro z8Z&6}vR~}^#GoDbec|d|UVr43f!Ce+>~4*9J1h=7G2po1hJk1a?IQJ57>O>rB8l4aoa1BqndAe^ss&B{qF9~Hh*->gZA5J z(}!-(Tt4^lS0BD(-2QI`Z@6~F(?6`SPQU6u2V6Gp%QqhVZ1m^H{CHyMm??ky>?wQu z8AG4i^@-o*t?~A$uYLCJ*pIT2j*i${mz+Fn=9C!|*1Ub!CT%b6`P`P5Oue{y!H4G$ zdF6vxbEPO`fp&f z)hC?x>B)(U9=iF;(Oaagvwm2*{;H?G+2Ml+Iv)6N@w^S!Y5Kp9pS|^ib6@-F$b~C+ zy|wYFJ@5Uk{nT+gt!(H%^x?pNmS6OLD;Az|_>RNQSaaT8H}Ct@{I@^+)r|)nci6mJ zF0^kw<8NPFbm_YBb0Pzu-fG!zx7*;r(ZiS8*X%#>+T)+udB4RE9lqJk^)2^ZX*WE( z$-b{_JM-RimK=QQn_Dg0^R(K2TdsHR7A+TTKJ?6Uu6gdEGjmJtxMIx@vtC?w_qlt_ zzWuoew?Fc&>DIEA4<|=%>VMz9FI_m}(jmV(^PH>uZ@B9&uQea{@6ms*+3D~9Xy4%D zrT@C^wi~}1x@e2U*p6A#U;Xv7yMK1|v7cUjc>d2Dgtz+b;d^e^^%hQfKJdWeh8w=z^v+|qJg)YNh5tSD)Ip0j-0F}E zFPwVbKXz*yaqWTqcDm?_GmhW#fKd-T7ue-bBgR};GxX~pCf`0`&MVHEylBl6 zOP~Mf_WpMq`^+Zap7P4hmxjNc(01Lhce+2@XL9GN?BAaMaGk}^E_iZM`=L9wpE2#h zn=W7fik(h)<(IpL?|kT(mC2)rF8KZT*BtWhi4UBa|6umnvs!L@=-_MC`R2vw#H%vr zw4Jz#{n-bvKQna2-s>H-XoDMGfBDmI&kFyk`LOA$&pG?tQ6KL7SoD*n?~NX};}HWl zY`Er$ZT6Tp>H2NnUU1M+TOBj>{$+u?GMP;WO`Esn3vb;vV5{@Dv-e;1!YPmL_3gb6 zet736hb#>rbpMb`?tkLQ{V$t*{WNRFFMfCI)_?AqwD*K3-x`1ZtY6GM`pgFpe5dQd zo+D0KIQ5Ot9*10c=6TlnlfPYZZ=!Qvzte9!>8bk{ozb1T?Czzj@9+BKsD8)f@0tAK zANTzCBcs!gJ#gsB$L{;=m+MdI+3mP3A8GmMy7T&-IQQ_cMjm|ZI~Ok4Xx^2#Y&_$y zV{c2YGj8zsziss9z|ngQ-f_{^nF%unOqjVr_NM+1UtfRVzAaZA{b8i%$RS_8T(@=4 zy~BrGJZbAUp+5_(mdhLMYzrFsw4bwaS z;oVFBw)xxt+8}Z6q_L6D_dF{*`9GgLaQu%K4L+$M_r?XU-+j`?+b-Gu&vl=_5ZQgv z->*JtmuzIphik4nWWu8Xv>EDbJFI2?8K>3VeBM!i9}wz)#)AiMyHEIxjb54Z`M%c& z9(d$;uWoq!Depb~W%OMfg8J}T;~LN0?ZVfWZFTbow;#Xhq1&wNnZNsxYbQ^C{F$#N zPJME~hIh@r;`Vo@ys}&D#D1`>ng{E1%qT*fZPYhK+lot^4u6Mz$F6 z@_i!*W}dib)xeeU(>|S?nDX7hzdYxNr5|p5+V|o0-kmbw+^ufC^vMq&NIV@r@sV>z zUoifeV}Jkc&Y`>R9ysueL7U!GGdp|Rs_X}6oU`P^M>_A_W7)XRA31sJS#y_n&ubgE zG+f8Ddi&pATeb0nJJk-JHGTE*pDzAy&#cJbPPol}V)n{?68GHo_Qm)7;^e#b z-+iwo*SBmjW$#&gjl|Ky`6D|{f9s8HAAfXwHnZ^#m)-Z93#a_yvY`|EpZeXgqsFbg z>FRT5e0%AoGoQNb(0e9ac1wKMeI9(`-p0G9ue@>Vhi`fIt$)wh@~}HzzJB@lzg_(P zj}t?;C9_MeePE}CgBIO%z(=`%91@!_>71G)H~aGb&BmQA<3?#H=%A9B?5 z{h!$9iBp1`Klt7WAKmuiWBZLd<+2gWT2?>t#5#M8`Sig_`;PeG@qHh8cAMjmJ9vvF zyUyQl;q_M?-`TY3QCH9JnP2;tttV|eYWX|&o__h5Gf&-o`O&YQ+HYaYh##)Aa(C_c z``3Rt_v8K_?Hs=I)H8CME*dlK$A**d{9FImlW#Sh{l&7w?l|Ph0S6>6Ui|Q}~T8BiV^*{EVzSFjw9+00s?!w_0`mlLPLk{a`Tlt*RT8alJ}Q9G5E5Jhn;!NfD86JdiKIk4qPyCujTftcgDVL zzg>6AKd!iYpXtpz&scfd(XV`a>VMCDeo;sNH{QGF#LutZJaXKjzgpDsz~kS~9(?R8 z`%nGnk5jD1RKI=F+duMSdb`y0J6iU9?yj+4b{%r}cU$ee@R;tsBU?@WV51+#tc<-d z=)C98nKNnWiyM43A()74GjX?5mtH(+#J3j@n{d);TX(jG&K>c?M>oE^|M|C^`qg<& zU+%r=u}utA0IlW&gIn9)Iq(XAii3&Mx!cxa_UZZ>l?X z?@Kx!tsAh<@VArm!~I9ReCQ6bLq2a9zjec#x75Czs9Arz8@3w%=*+RBhGnk#-$sWW zGy0MP{;>3+k0%e=_<_Toy5pa}x?=f3@BDby#!H$Pee?C9)4m!|^T?w;GnY*LZnyu& zo*eei_L2Yjbk$}ZH*CKC?nlhssIhIumG5ml;i{c?IsV$kr~LDj#>nyaU(|icwA90I zetq8tho)9u|6$i327K22@X}4E40-b7S%WV>dwJ8NM+GX(zXvaYTV?Q^{?Bq|Lebg=f{>R5n-DY{m z^MBiD)i&4mH0*lN(2E9N_2$PjkICM>(WJ3^ufBZP`THl&xnSTXr@XqR|G=l>9eZDS z*2n9=^ZnxG)8C(V(wgIbf5r*7Tz+Fq_?Vq%?D5v?U*CSn!;OzEJmLM1ciecl0}tMM z)xvAH`RdBur>;8a)qcO+{QQCOX@B`*$;>Oq95(KnhSj5*_v?4qzy7+#n@bk&yy}X* zC+>02ij8V7x_acmN&Eb8^?*@}XFc_wj(t{qI^o`5+3$y5%l>k+4W54arMpJGbnEJ) zzq;$jMZddlz2WO-SAFp8io0hfx4&t?X&+u2nvP7gtBzT9)8)@UedMYi|8jMAvU|)Q ze*5~I`tOeVA=Pi23qt>xy!6ZD6NjHWY{Gtz?>s*DP|GV<+_LrO>t1&7x^wq?`|LN) z{N)9KV_vx;FzDN-_MY_SqH9KveE<15XT31jitKUwKNqfEF?#hKd#rfk2dia|C|8Q%=Mc=$Q?fv(DxNyXh7l+(Ee&vsQY!sL~a?Nq$f7oN{kN198JLJdJ zCuH8<=|7wO{_nrtI{MhaBi>ta`)$VsBKw{f-h8w*rRTDzk2~q>HHSA0yK2mXo3EEX zwDI%XKOFzpy??!OvlBjU>v?m#?s@m!advvcFDG5Sc*Xl4k6XFz`uCmw-F@4wOrLbg z_TLQIdP%?ZNq-+WZ@Bm*y4;UpZ>+nGY6bCed}|MIrNIaYsYQ=^6htQ*niEu zeI7ZmIrr^*n-4zyhn~}>e%ABkWt+c!-f1WP=lPZ2rykztAN$*<%-H#sM-~rTxMbX# z|NQ6EAus)A*$7wWE8@3BC$WF0$v}-!XRqGUSEI@-Y6A3 zp7~o-1dnF~E8w}^lcm6$Hg&}0*`om7gs~cY5fgm;eMQ27&s2ktdA1bz$WJy^by2yU zk|y|&uv8%X$rFYiG;!!}#)8B8{FBa~Lyy%;d=v9VAR!cWge{uFXLZXpu}npH3=4++C-V zkY(vp5Kc%*#fZ)6%Melhmxk?&Ov-v2u20wYWohmD+N2|=0y&L?) zH5w5l(^!?Ud5(=E0}L*1_nTF|!$ymu3U$_SJJ;JdEyF!p9Ry!I^)Q3$Rh;2!2 zwGHp~UTr^=28jU34N4o+o3wWu*2@l4%9(-6V=ASIIvOf=a#Y&Xib@;s&N0m>3dbF( z`j8fc^UCL?7r66zk;07%{7`Ka)8a|fNS#Mge-z37aim45AtWYRc0l%|2KZlbjavSslcbO|!pEUTq4fvoP_(jbdFezuw` z8-!s2%P}M%B3g+QN+g}IrHIFL)T>;i^gJZBG^DaEls}EcNxLK2R$100%B)ZzR*QZU z$nm2XD$nML29cQx?Np-_m2Isha4GE@j35H09;O_rNVp#2oP&=dLk(P&PkMu^@QWNu zqY$vKw=3WlHiF>65Fn(SvuLf2jYtV5ktizJz>hTmRiW<QPz#E+5mHr0^N-W69{5TcTPeF++fIGUm=9av zOAusXSSys=4V?!0Tf|i<%h$2^$C0m@4Kif|^7#EFs#Bb@%kmw_>bHB9zRjiI8B z2&59#C?>G+Ef8yjGH$|1LI!5FZRAfdDG(M_Evph3kr0i%F`EkKAUdgdIS!d=C!kR3 ziWc**^b4c%H)qqYLw8oeQ#swi7F+uRIzBoC254VJn>Xb zXd&gPn7RNh>Uu1YfC*aW3n@*x%HEP{NmRpU-*<`Os{z7T1rXZc=wz};2*W{CdIDe*fc_+s0lct43 z0>5E=CZI7H;Vn-*DTZhj;1wMSB)qaCff3&F|^e-~GUty|3K1AWew4g_mkqun|?pI7kS6f496H?AT zt3J#yV8STfkuW152ADAi6U^nww(3}|RuICM07JfkjecAfN)hb{kW0vhpG{Q@L+A)l z4tA>qDYlx8&{+Y}IF=tpA7)5ZdB%dv)O)LR9SQC!){!7$UAkt3hdgz4-qDDS#Vyc% zjn;W;Z3rGK`V>vbM12av3G7Fb6KK&!JQyGB&KgAg*f-jb_>o8)AN*+|(S(_o_>n(F z#8i3EwT5Z)X!ypT!ZcI7oqyx)a%>Ki07>28ih{a+h4Jff# z8&235$jvZZ@FS-K-5aMZe**45AR$FkIDO7~mibw(SVzv_~Rvh=C6n zhB^r?tO8C$@efW3Lk?MprC~^@{qL%d#zwR*hvpRTOy;|@$x2!ugka?*;Ejy14k(CF z!U2sc(%LBEPc~E%P4EDB#f`Cc;3fFbi>e536oL}HD$Ve!JIO!1*<=$ndn5#>J5cI8 zw=(~vMNPg60Fy%vdbJU~>dswj;M!30B{e|M!w(E$Zv`PJ7LT)$&AHS2mU79gCgQ$+GIgOW{ij2nqz-KzNX(g%QE!NwX^D0g%JWLX)%ts1UqJ*7N|iJV911 zP$6?koh^WhZdq^-dVpG<46ACMHS#NvSY3dZ{P7gbHc~;E?K`RpZx}wG7!uSag2Q37 zUiE^vJQ3EBY)Y~wUv`B}fnklY6mAJ?A$!TR#bB$KWj<63*n$HAnHCXBT9XM(;16Mh z1~f6IRYUm~W5u3MmG}D#6B0>w33v+{f)U>GWLY)gB{d<2V*t=J3vJ%uA&yWhSTHq$ zUr)IUA+xO{#7*d?runFSwBpTZ5;*>FpALTkT z*|tJQc5Ha35+rp60ozfez_YwT0|A#O*{TdMl7?gObQJ(bXJhKkZo;sk+{;hNQz*6I`S}M2Y1JLVzmC60*(UmM7E} zV3)BY50c&aOa~5@Y^)%E!=^J&aN3Ln*p8qA3r=V=z~zax>H>@e8Hy$s0H(wfq#v09 zE>EiET3Ls4JJ-{Rw1^53j6SRA#xB7PBPBYB{4+DS<_f=g9!L8>GW1RM@S%b~=5 z<9;hotWopIx)f?^>mb=;b?75cRq+}T_c3^n#^zz72{<6ZirGmx2{PgkZ;rww*4b}) zqOJNsBQY(GV7LU@0>Pi1Hqdhe*O%9S^HmT#cZ;B)xArOa-TL2X&1qpez?q5=l}`ir2(>9iDpZUnVF@m8fnN31wF14VcR)zWcP}Y(pxo6`Gf7Xz8ltT$C`%Q^+opz`~GV#eHUA%adl+0~YE{3}@y^ zU?b2cqwtCtfz>Lr{bLpxlsPJ=VLkgRJv0el2` zeP+y-DbT70EE24v&=4fBVI){bsJ_y;$^1z&rOqbV)rJ0E>G}1;G=zE?4Wy8|Gk$Hv zPNUGjnMG!^A}Y%Cq$VN>M+r4G6)DG`+tN)3W!Y$n)JZkp4jZR$@|J38!yrzoQY8Bq zu)_Telu~XjP_6T=@(i=8!W*SFGDUD{ufZfUgIk^u>&o^l0NWxISHl&8%4DdF1&D1} zzA=;p86hsS0=&u)L(6B0#`DFqODD`t?(v&&k$g}15FqhM|9{fGi=81bnnGEL3E zmM6!m25cA(-2{SUa<|dhK6Es&8QAjVm`G%@D@AM=rVgU01z4pfZiclyF;*>Dr6O+Q zM3fM?msG?Vo>6$KaWR&C-goV*m5$i7Q=O;?sgUKu?4_D38yXm$T?~$f>}-?}Qi+HY zR)0dwS;ta6SP@O7jA;ogcv%dYZDv@@lV)(^GI5rg-&L88NG-xpJs?1};k--0?_~zH zJV911P;KPfq0Y4cDut|3+h7K@JRw%qvr$M%Oh6eBsHFtV2x^Iq@cOI}cT#Ls;SD2Q zI0j=(!b_?9q%@dz7^^VpJ2$9mkp_$LqV9k6m znNJ=sdrQi;1 zU=K*k6Kd6gl*+Q{_{0LFh(X2>Ffrj2ts=^zfq6J@87wA<{F=@jKY2N)ITiY7r6V$i zi0Odm&#!+t%6*s z9LCg0s-YlOs}M#NY^hV_cp0RwZ8$1~v8MEB*5w2r#N_cotP>x^k?=v=9o55ZGtnne zD1FkhPpzRNE%|r;sF3Ku%xkuBALhxfvr;=Js{XP zfpwm@N!K}YLgJjj5N{xh@#21Q24IM#a^Zf~aGO5G{1Gc>&qaD5{}l5VWt&LM6u*n- zvG6{|!^6Pt7xkHFsUyC#=hcMZpXApCd`N`i-^KILVu^j{M+YDcO+1emV%MO{k+?44 zlhxwjP<)(t9yS&KE}q9r#cM)PdD-uxSUiMwAN;#`o~RcOAy~|R7tiA*;}|cVCpvYL z_QdWJW#b`HH%{z_|1RdsiM1vqs1=8gDOH|-wouFDq! zMeV0gHJcGIO2cp`)tQbFu*iakTr>=Kk&R*Qh!&v-k!iGxY7C>pR1IZ1!vJ0BoR~9^ zJe<5i>{bKleuyRlilWEn{2m{U8h?_^TAco;CqA}21i27K9uN#KDSe0Sfk#vL_P1D?&BBpH^1LnzY3mSP1 z)8Dk=hPP8$K50}2;9W##m^*|&7S1@_MRlgPQ+yrzhvDrmsxu6yM&}98gJHOf>R6(Y zIC6v>M-<4Rkgj2@i{uPwCka92_U_?Y-Jci9d7j2#G3{AfHN)Fk>k1?~8{Y0BIm3R5 zz)(&=jv9u$*qrI@g|Y% zLOz+c8?!&;aBpofde|fo%;F0sL(-FH;+Fgb~i{oj;QER4=S#Sz(`|WRv7)% z%#}nZItL-5s3&b$K1-OC#V-0*B9-70+ZlrO*q} zvMNp9qkupJMlr<+U`tYFBqhkwfg=epP;J?nOe8E7h4Uh1=RE1q7fIygS5ecqE<@xZg7{OK5leGjcbw3Nj znP>FLPNFYIlt2F^l)(EO9wv^Btj0fVs z6ndiwBasRV!=W;SCZ~tw0;AyCu|(9wR|j8yNU5%lI^uIYbcrYiv-;?h1Q&gBz_@{c z?;`4CiKvssqfRxJh&ov!>SV$7#>NYS*Ah`DilDOJMbydSQKuS+RPj4)YrF?t0{ABp zB7Nu2Qycga3#MyjMO8%fs@T960tio&NMb&G-ACdTRnh8n4CAQE7QPj%s@uQ7Vq@O# zgoOz_({Kui7>27n(EVyYoT{NH&SDs@@+9x!&>?_n!*G>HxQCNd3D$eaJaPA?W&5F* z!_CKx7(I66Oy?=j!!qqn+9}_1!xWBqM*PrqJ~H1{>hb1C1k{dr=44nnMq31VRdP}H zvU~Qjl`(V!I>gI>JykRG9uEyJ0c+DRo`_#19fS(_c94yAJx^H2$vBR&QL(KH-Grma z;ib}h_j_3wR(r3(r)48tWsF*|psgJ3tzsyChJwpxxMpPb&1D1nhdIeBv162f1f_`e zvZRJ!u;4U-!upSwhS8}D3zOJx&UUvX+bb)>q~Z$1%fr-f0`6owI0z0}NCP^E!(kPd znWpyPq&<^RRJKj5~V^r^nRlPybumAwZNko?2rmN zDX5#=n_QiwP&HV=x@~08N?7TTTMLeVGps5@@rG5|TGG#0iN7|4I9nKE45}|BQPT6N zhe$(FBneAEk^F{90+FI3xUrYw!4g(_8HY12AblvV>*nGt$;Ept*Boeg3~4Qf;V#jy zfh>k2`z&UR9&1Uy?iq{WWc(RnS2v~SXgV9pBsW?5PRW_0P}>;BV@YZyM|Y}{{#jtWZ(E+!+s{m>Z=w@svizOz11Ms;0u zlBNN2m0P+Y=d?nnGlh{@=pOFQNHUNbtT@KJ4a5D=8pNhKrwJh^KXa#)C{xsyPDvdxfbX5^Kvg&*1xW@b z{sJ=A@fHR6_qMC&CWt_=? zDKIZlpFBpsbS8eFjJ|}VGeKYM?9AkI?93VdevV3{xYh=SXFBsZ0WF89GSj1K{OUxt z%4tgQr?Yt#bs`+)p)3}0KgW4D9y$@7uu)uO!#Fr&=yV|1Rt6e&?mYdSLORo-T<``M zhC4T(aX5-3Em#?b;m-YM9F7Db%P3-9nO(-=@KM?zB=_4{^1EDoH{QhD5&pDIId$mK z)hpV?aHx~WEi=8{$?tXG$&&=jGHS{tv|VXk`dI#I8XhyLgLDv2Zv-QTQ!j3l2hL_$ zSf%h5Mh0=*SYr*9QrKV|ICtD>O%2l>)CkBY)r@Fb%_21~$-Qn1hw zfrpyq3r?q7OexZ>yylfUSdwp)I*!m;FQf}nja3A7d67=pLY46Z6lik2Yd8WY}`A&7*8GkJN zu5(C-!<-AJsbpibciST3Z;OaO83qIsn%{~~B4ND_l6w6!xF@8V(#c_&`7rFrq>Mtq z#Ndm8K`D;hwRvn`J#&|~G!0g)M3y8Vp=K(iNF(Uob8fRz)?UU@&k{Q9%%p>`M1)?) zl2?t=F5UVIOFHO%EZj=uxiE}XIf7xXB(kY}zHzL3o6_p$$bRqc%-2S10^`z6%)}Q6 zv_%!JJaRS6RFkP7JifZpIfSr?Sh2V(uwlD0DqO0NO+3+LkGhg7DEo(zj?|(&quY2U z?vz;e&y=c~dP^zTlv6|TFcgeJHu07xV0x^Jlg5^qQn{7^MwvVGKZ~6`51|x-0fD7v z;KPd1!S$z|s4|o~P-Z&oqZNeksT`2}Hc{&B2>wp>N=Q~hzhAUva6;%$B{4u)Nhs6; zF+%{MFdqsi>MaN|#OSE!Nd&=g9fztOQZ8!;%Lk|8flQb-pnHl$2p(CwZ3ExqI^8*$ zz9`bSMcNj6PO<0nJqBhknl{i;N%US@q@}6RQX763wsXs3`mFn@#`ImuE0R~0VEV4a z6@45kWhwI%jq9l2o}2$N%<9N0WqVGVJaXLhVX!@?%}&79ESR1!DQW14RIV|b?#gGf zwbTrn;izDE!Z@9tnbDbU%rvEN>OEW5nxfgsWxgT`4T&|Sz#BGd)P!Mk0Nzv$x(J!c z4)8?wUZ4OT3}qEQzDf#woE%j6Tuu`qVG5xMn>p=Go+2&7g^50GEce$?#<4g`m;3=}K-e52pidOi?sfeuHs1WJn0H zMg#Dwi1Wle7(LoLn1-wJ$8$LA?`T?grel>NSeDWTo@M~PcajsBhrA~R#V}l@y0xS} z$)d0jj84)jNsVsUa6GAr7#c^CU+vT)_NVw5rup^R_5tK6e=l{;ogW+w2VC3 z!lo*UBqALN2%%zAL?pnKE%aE3Q05(q6A|lGb&RlR!XLaybgN)$C@1;jwYaQ5OP zc1mI-wFrs|auq1xp~I+dG;bK44uqaq`F`=gugUYym zeo#T5l%-z)l`^T|Ry2cJ9+S*xy60vyomAl2PUjtH+pKSOv*iCyARiV1MQDi`1R<>o(6XW#4`@}k>>pa$ceyI<_)*jyL1rkL#KD=EK)yKC z9Prl=wOxfXmrHXa}CXH{0zt5h=7&5Z6#M;o@2t4mxm^S zCSyE|a9MAY=8)5HI(@+KYIP49aE(_}w>D6{VXV3p++)S7F`T-Qx`(@x^7L7Y;nZCm zdZuAG5uKzGz6VPA^hO*6ciutF z4QuDzFA)(JWjkCBM@)lG_Av}sg)42&{z#CAfy4+KxUNcDck7JFgM_m-SMeQ!UDOAZytk$7!Xc?J5 zKd2zbIE1PM)rLQXifhfFx_O(QD$r^+_{C8U^D1hc`uV`|LxrFc zSnB4)+UFCpUBIfkiGSo)4_LS?spCZfSfts9V1O{nZ~?1wcK^TvqjQn<0`yvG)J$aC zQ^~o`Bf`8A%FLDsoU34iMX1p>4ZdJr+3ZHKDp1xY)M02#lo26u9fq(7BQeH|>psh| z%3bV~Ko$ktvXQw?+16o5FXFs*D2DB)_X@ydd~GvD&~gAPkzSxocB%uqX9G|;rQlm7 zhr5NC0L21yzO`1R7c~HYpbw#LJwftSc6Wrz%hJ6RU5#0Xx-#j9VO2@Y$|HZ!Nfo9* z@yH(rIrfx>Q!T~2Dh4towUtiT;7M3Ij+{m@>!?mftT2Kx7iRQ|EWI!*5yW*?c|xDU zI7Wsi?yJGMQv7~?Y6XHf;yROmxSz=t`sB(vwQ6L~XJrXAUc65z%Q&fByq`Z;Gpa=< zVR=Ti=vbmskSax(u_lt%N*L7)H9A)PP+s+&G$K|~aMBup7++RdhrEJRFAMs+Q|L9NkmQZ+E$gG>R5)hZsPPis9v|3h4QAl+MPXL>h+o zzMaI&S?g?*zsZ$jD^p1MZsfyl9LJde3o^W2)mOZLM~(_wgqlXHdV}X^av2%@HmsA9 zH^m6pVTn;p$N;-4b3NYdG`x431Ea8Da>Y$qV&IlO zAV(KGkc{!wDo-WIP2MJUqP>yNuoO#`WK6@+PuqjyTq%{N;nagMVJhE+g6y0-%V0vB zfVtUV@sYXem03yWB@`FkziE3D)P?{DHSQf8(099I4zQc%%GMh%&PAl41q6Depe3El z*SDnet=$dvjhT)>OFWqf1Y?a>Ln>q?Q$ee-IoT9WCc}2HDHdyLOoW4t4TG6~X;>!P zl*&T4B>RE|#fGylh$H}NJ}Tl0@$4zOV}$hHV6{V3IP>^$UJ}*1uNgiRflt z@oX4lEb_!;BS6o$X4*3?J@uJvOJH&+RtE<|sPBA_8eN27@)7?-2)Hm2v@*3E%?~0h zuS^nik@b0I!}To#j#pUA$D0`(t8$tOC^`R#i0l@QV*D;M+Tf}o{-QeHBRmVvNAr9` zMjM~H%CI6VnNC8>NL-}bm0(zXOrBl~v<-5AN)3{LL!3^}pj^rrL;}$|qi%sk5L_~Ie zdM?}E$%b42$8N&{izkAK@;5mm1~86_i61t#4#D=-@laKE1q}||YF?^=PegJpaI83S zEF7iRXV!~JL?CtOtf!#TX#f*Lg^gaS5YZZhR);+GRUvC5h)OTi2lRO&l&p>k;K&i| zGsN97u>|%TdE;UTYwD|_n%dRpy@qzCq+G(5ki9TDfrH1g#8~3BL?`rJ@?x$KYCP9> z7BMX-o~E`xiLkV6jpw?uh_$1?sn<{pe9Ns0xw440aY7xT-lSC4Zy4_43pkyq3IyXDVt;QU@Fwm zhy!oJk*4MtLK~?_-DDbE-__LIccw-uuw?awshAL!g;XeE&e8FSYVM62v%B3ju(dDw6+tJAt35lYm-@F~N( zmLe%iJvu><7B*iTnPyQKAaU2as)|DORXl7Hr&Gn@KjhL0DjjgnRXg#?_!@o}xpEOv z2uEibu={xKTulTgVDaz#cTE)(`FExLg(g)Ho(X!e;#gy8d{TZh^PA02hTwr)vnt4@ zCdZXWF6D2YpF;pkl0z>@=%aM{IXu~L*ONdF14kMw>0|wbz?bH(cWxfdJgP)s$%#

Uv++AkuKQMD+Z~TLw%o{&ZU~lP^p-Hh^)&9)vX}wMyetaDiU(~ z%vdsXRj0-57FaT>852OaV7sm+`@KE2Ys^75s>I(}$>8N(9FH99AHASi+ zY|ejHOeDC5{1F6(F)7z))%w4oI;b{;goXZvRIpZL(ZRyEU8`<-4>A1 zL3$TeQ#ri3jN+<0|Nn3C4T%l7;Fv#AD53C?#<5FvT2Q^KRUvU#c$8dU^{I}|_O807 zR7WO9eR=!pt(PV`+|oE10AgOS!U!GS0r9WTNj2rlo0F&B0=HZM&O5~7g6JYt{zP!X zH~=r!sKIPUK96G_s?At^p03s9a5Ooi4x;ILMD*ds!H1lOBRPlov&OSU=~j6>TXb_* zdA91FbR{u(^K7kzro6>>Ft?wCpbJ}+&XS|_YE-t6%}fMNRc{(WGjvzRuzoUvmJ_j1 zq=9nAqJ0GdCuf2_k-k|JBWedBWjR&f!kQb{*EVB zTw-oa9n!w>#p*%|g9g%z+JFg2Q3nzumz)t&f80(zh}K5V0ad&kYLWhHLRZ<;o$cglW*!f&p2n zI(R>==ByQWnYk=n&P+2F`EC(b%LdIE6yV~rNVcZm%Hy(#q|ji6%esocWIU}z9*wK) z%Q236e}R!1s-_~Yk}rKIuXdx+QAitiK-X3BWgZT-&va%f^{*-VVVPKA3nV?4;_c9) zW2U}T5!Rqya$?Wn=;0fK&%pq^x)nT!BdHKheA951R6U1Ni6s&U4R2S&pYGuZ)*O4ykclD6mge)8PHDsaE$GZ(9y@w+o{7fB#?XJTFDnpJSx&AIF*xL z44a5GBQsRX!KLJQdsNuEd^q}YW5?p4ODJ@vm#Y-#&HkMA%hi#2II0NtAA&gO&`F8J z=;O;$3+UW#D$3V5q1O2RXNQ!UQ&D78LJEbL9a?LIRNY73kh(<*qDYkJJ~}f!(~Ava z7AYaLyp2#VB%E*=rA&PI95#7TirRc}%G z=!-f%+VenW@dF7tph%`d)+JG^2~d@v`a)$D0YzwxoPx;VWf(L{$Qn zotOe_QUxYq702qL;|IPQK`qy)sl;ZG3?7v@NXnt3l>~zag_4b~@AYy;5Rz^y7D060 zSCs570U?D)aLT&_LRDMzrcC$Vsi<5{@XIh7M$kFV1qjIz>&$6nRZLQw?hAwn7*>u$ zI(nAe0TPFZvOq}ci(IbAiB|(a5{5|XC;$lM9L={h2%}n=k~a#S?59fFB_dJeeMO4^ zRz``wTS4q-2~@wpZbzTAI*oEiyt z|Fjgf=MGFBF$zU#srh-wblv>R87~v%f#@U*%)r`b=SI;dH_{`bJTM{-I*f?&zz8o7 ztf69CHcpfW()ru`yLg_c@{5R)w}>bijEFPTRNrk>RP>bPRY3MVdvVdL5QVF%xY*3z z63w!E_bNoS5+^oJ^cNF`7M2bAHVR>k{fp3W#Q9p0I>m=WAxbc=4%Zcs>oOCgtVT() zBlddHhK!B3VBk^nAj+jRq`603B}q|jG}Vv5j|+zx9m{NZxvJ^#mvchkz{4LBgNL13 zM^Rb)$q?xHUg*D1X_*s4lI zb7tU(JaE1?75+P#79tIFb}0H-cII<+>jL%;Okq~Xjv!}D{XmC^%jYtnkRBR#X@`z#w zum$NQ&pxWvvlN*q!5@W5!j@!|xp2`R0Wl!m^HPa~Rjc&kC@sKmA`($yq7aFzh(#ib zZ%wSkfNoeBRUT?Ih%1$Y? z_@oJyJXQHv+z=v3yv)gS%|e;HmWs?agy8cf*c;0||G8*_=~MD$X0(XpGaqOX>1a4- zr=S|}M0zDW7S$3*xl$#BGC$G5BhNi*$jtPTyFy8fimbJ$NSBjmm&=35s z$dg(m;UX#ek-L@dR|bBGwGo2G>tN=>%sBcWyVbQ}I-j;SDlg`1qbkgv`&CN@EF#G) z&HU|GL!O|MS;kVN(xa@|5>BR)k&Al#9|F46jystozksIMUr>h-n#ytNhe^cnBFd#^EkX(~vDA z*3_rLG+fI_@_ajjNXDkVQ7!q$b2#+`29n)#hu_i%=8<(2H#OUgvfKo3f)R5Uu6)DW zRkP9)co6|G4DX%f_}g(nEo=8~819Ga;AlaYR3qyAP#r?s)Plf-c|TN#bO;#c#^HXb zE&`X9u{ADQ3;9_02qkTyrGOiCt^^M$m^gzGn5Uztz!nU{{ZJh$+TejT4tF^-3_A`5 za5yc~INT4_QM?KL3r)cLqB=xca2&a5xG$gmnvY3H;-YVZnu$V-upY2iBetHp3HMK-2I(Y%LyD6(jY z?gx033>g*K7z^Tf<4pkXrO49$cW9y})OjhgWEl7u?xo1a+_5Fo+r1Rom^-#)8t$dY z!l?2Ayr&|IUX6Z+dnvLpt?Qi^*?20lG1k|w)Vg~rvXn?$df_}3S?a~*W4MZ_flkIS{%l6?p}&4#qfP#-b;~4sE!|TFS+<+_s!`odtnF)31I%Yh4nGVtP0X&XHF&@5*{z!g?+sLsqCAr5~w4)^E zsie`A!?=&oup|P?uXae=vhp=!!XWAWNYHDDz_o>ZLX^Mlri-b{w&5MRdeuh*2hRNQK zX+aFHHK+8&gvs8IX<-TP;qddA?EM%ES(Ku8^w5Ww$vAgE6i02YgD6sPFPs*s@B$v` zGG;GROp7>p52rRCCPt-24LpZa*HKbt{fB|({Ai?wP z&@3!dEl`X(C`!H^E;7r=Cbc4d&$mOfpyL+TL97}zB0FW_Y|4);ZM&@7BzrkFdF zV0t@g7DhJdjwTp~Qx>m@&uN9vUZ@LEO0AJivO?!l3kQm0js6Un_rvRO6e5JP33x4< zP!+46hZyCAj8=b)MG<^u(=djj#qMIFdc0`$$6Qeb8RqQW>W{GsNeL;foMRj;cd#ns zqPq?V)Q1Y=6?LrCV%NQDtQOPz&2l(-2#v#?qSg3zWGGTPsNwBS@oF57({Cw_%P`zY za>n6wzPC|6xH4dkWO_M*I>M(W9 z#5kNPHjQQk5aMGvRcspNBg?8RNp&cwh#Tdj>y$tf@DWN$HOfci+Li_$1PiUqxNv@` z4y}`*m6?XSq6tReQM;MI61!>MRWfQGPFk69xYM<39FEd~pwWzQm5dsPBN`Gknh`8F zywtj*-5GT}HO<`*)lusdwBk1ncbXAK;8C)NmY}BLes~=$PiSSv;l8L2b*;#0Hoe^s z)uAT>;eM!&PK-yNThnl77|Zx}__nDv zl40(CcpZ-DhE`@A?uY8aRC@u>km2oqs1Ds5pp_Yi`=Pop4pyN9Kn-vALv>*~^4?To z9&?2wjF=D6u{DOsY1|c&Fb_v_ZwONt@NrjA!Z;ii2vO7F8F5!o!Ze&ZRa32x;q88? z&PEwGrLq`?`=L72S;J*w9PWqeY&vKO?o7km{ZJjY3tT3~;eM#jhM0gXHN4yp(b)iz#rm-PFd-UEhZ@OvtZOuecVrDB3De zb;6=~v9HtGU^tX6D&Yx|Qf8`FOyLj>jxoKMB+d^l@>5LWwruzW)x^BGE7&0uH+#2i z;;tx1T%7R1$H7+{0^6h7S#+Q`kdhYWD56?V{p=&+aUxq^bNM;crd24UF2`lvP{~xt zxTr-@@<8Lf_D9JBjk{tbI_A9cK;y39h&~Q`mQL9YK{vOJJX?#76zQ!{TVvG*#-*Eb zFjD1%GDpE5%hHr#bN79@7(~4<7l?)x z?3A@!Y&QD*ocvm1rNjntMO5^06wClyLOPk*GtY}<^qCeaX#ykC&CMyS2vvw3jBZi2 zfu;-&uE=UdoxC!J9)a`HA+Zmw5oK2KLcj!_Wxx-?9ohfI8ww#2j0A+J;Vq0UggcxM zEd(;)<$#fdrdq`@QsQC^RnM!Jq%_4SU^`TpEXw*5ZJv?tBVzpf-@2kKX+qwSl9$Gx=0gLwjwYv%8}KjX~2bojAg{CV(cP zjYRf=WC!&NO?Ebko3rVbR{Y#WyP`eSEdLZgCv*8!Hl1t31KPW>zi<#~N3u51+SApV z>cl8q%-~76_GGTLHn1R-&C~`mov6Rd&%drI`XvCp@ohUl24bc2>@u==3rqM#Zj#C(ZRCg*z2WVpkfN>~F`%rRL8yJDAEbKn? zmEq@_)&>s5=EuR6C|IfuOsow|!j7*EOu+$5O!7`gH@@1yLA8OIwO{~&*|mW=wSk6g zvau}%#M1N|@VA(c_?_XRCeWD9HgB7+3Ko;l#rtrwxz}(uvyxIUB-veB7uu>8zpn$*u z!fCxUSJb)F$-SlwawkXyx!3eU?zn`LVL0VSyr{Y+FxHEzQK#Xg>cA{P z)d|!Z`XtR-57GqaX3`8Hdc+?NUc)I=avn~fp-8YM03OZQLaKv(hR}B`(3A$V&!uw^ z7g#pVz;t4kP6W=Bi6H41A{$%|DbuKSMko}2)4w45pFE!+cwDowqv?0Lhr*J<8umZ` zTl~+iLsPU6LZZLJ&_yKC$MHR;JRjE?LDL)L{q#xCmt#tN*3diXA3a0P)}hBx4c8~}7b!6ad) zchKMTsm=|5*U)$QJ3U{Hk)I6bc&;2L;iXUftD)<~&0dHW7Z+DPsE?n%IgQ72|Nrq= za1Y${`n$52GDit&8`8T~&8iTW$2!AJjPr2&_`mX&qQX_?9%QsmdntlwO5q>vK}Zg^ zXT+Y8f73oBKEQYZ=$FuhziVjkk;9zsqicoxXkTFe$-PO}LqQruH{u^%AOL8Su#E?& z0zI^_2{D*93_XNC)!z$au$k#^+T3&>TRZxcV*n@{C$7U#aa$zt>#-Z4KxJE{yye99XvGg}vZLy#QDzGecdI&MZOT6Jc%);Ul95i?Jv#_}M z9b?#{(mgbus7m5We$xHKMcEv2AN@`+it9{5gbCmHoj%FU$>pAW6TO|$qxwkx1Bi<5 z;>K`MTG9ymXG7Z2Aa*m|n7|99m?a=Eq=tXWm&td~CxJwtbd5sM)T)GC$5P4P@L3b4 z`{*~iSKP1AtJ^{%<9tLa*N8(O^BHK+LvzfyiNa?R7@q7J!!3)bm8b~}Z%t;YVbGw! zcruvka8@r2 zjP_(pt~ShDJPaF_nGahPN2B2%q84-_Y#h@=fiM9!3P(-W(vgY0*#>@@nCyUZI%dS^ zu_I?Xn@bGKv^PP+%O~5@jYFB@3cYArKGks$dQd~-oh2p{1{oH^_|%1`7$D?ovj(G%zeMJTM|KGB7GIIxr?MHgI5ITwr`)LSSNG63{gzFf}kOFg-9Ma8O`o zV0K^*5XdyPqB&TonghA|J~7sY*D*w6m(fMQ6F2MXo5@!*mkLnn?I zH*+}Q!LiN-G{kyAgKufk`#$x(%E&4fs|=5>dv=U3X{$VH)P!MkXm)VQsZ?B?9rmlo zBC{kn-M{Q13kecM8iEU{i`>!Oo=98f z2^+oTn!2jWgb`ZvoUm`+)cb^`j?$6LxB8nXLV|S(Fk2Mpg9{^TL7yoZM%qlLVR45} zXKU-&oytrRHu9XQZ#LEYOv|8Pq|}DceHRo=E$J2IfggpTDi4z;j~q9B7@MVIm#VXb zVE5Xmec(d+K1~?)R?=BXW1YaXRj#WZ#I#gW^O;+r7NV&!(DNOJ!|drZCXSu*n~CX;hVI0S;zpp3?EV$$_06ew+ zI*X!y-gIO^x8rn#V6jd|E_Ha;m}z6jj(5-zH8=F2BjiIDw=yKBN=CkAQ55bFm5fMP z^D;qI19UPoJ<0H#6@b|(qn6Bq_ukr?4XEsHY3uu<1B5hvY2 z@pa_?cPtl6)z&oMG1JFPo;=a8TqH^LC98wLlGY4%ZGr!S$ta5kCz8`=ptVM`Vd#mx z389DM@FvEL#L?vNf${IMO_)p*(IPln9_RRUM>mbs^4`o?kyqpNV!C6?@X-2aSberyld5BgTq9U<$Ti z`}_`jaE&ca=_$ToD1piGU;GhorQ=52u&W!i2zvqA;Q*Z-ERk%tf}v=>HI?kl=rd67 zrIW=NyoDb}46co`mS&h$;m48qkvF-nDgHQWFmleA$|PUemTpMmt%*!mzPmGt8Su5P zF+oIahTOB0K>>Yb*o2Vv(2~QM$aq0ePK!Udu@#M8(F4b7PjVaMed;GxIv> z7*B$kj=3IOW3luppq)Wunv?}ckQ-<8SZO5{DMCCX)mfJUTY=gViq#?Kg4*0l5&^qy z&j77?_;W4zTN|N8tDCmhM_4S&rU-3{W@CxttOwUfDla?(SMBC(cT2Lpr3VX9wVHa0N9j-5Ek18W(q_ z8tHU!Vhn8Na%^`uZU|y+fi~nqDPxUbqv<-kMQ4esq2eH-R(igj>P$AYE5y^*n#|{u z4YVd%B(ux~hEfLdaSp~I^D2Pdpng08J2#6h0EWR>yO>#eL9#KK#qr;k)ts6~^H#^U zC9_@099`_lw&9o`Vb}28qd?b?Y)_@Y5fFzc?qub95qj)E)BYlKc@RwTF&3S}+e!Q> zk9lw)knYw2X4I6LPf!|=<29T)KIGPFYt3MTT6h7xja`{sstLPG$T~(=ZJ8s}9bk(E zEV5EiIlMi&Ael`!WoQwtH2S?~J5zaVFLm>@r{<*zlfi6~kr&@K=1EvAE6KU3i0V9W z)RAmTvR!58QtizWH(kgb%cd3(*fw~Q0XfcbbaGj70XrHSlbzVBp?GI9-<_p%o-8iQ z1o3nx+aUR@(7aRwV>p&*JBM*skUnT2 zAj4=&^duKg0d~M5lO1?c2*Qv=;aqEQ4V{Qlq_kXPYnoWUW#==Qb}UUtDhp(&dj!@X z9)-JTbF~Ah0`3wcFSklx`|*SO#dPb zk%3#BV?NV8H=AK=#@HflIc#{)IbKN?_%S=!Eb^z|V4V6O_PG2F z52AL&Z2ICiCA!|M&e z<1T(kPw{oIeku2b(WZD74hM^JWx7UA5yx>wrTHZ8f(YS7H+Yug&6aS3=y)e&xbdzH z1tRrlIUEblo2fkogIv3Sxuej~aDlkJBspyamLV)RsCI#r1rY>t0fy#P>>c_n&HAJE zP;xk^KUkQyx>)M95N@WLMfWVRN)ai#v=MP=j8ZAoE?~u?oWDBWkDUmI7b8`50Kvt3 zJvvFztN?0bX(OpWU>;s`v~Vx=9e2%<)M(za!a-x?SrJEWX)nQY*`}2T8BwN^YlZ-d zT=?{2eHG2RoH`$%pI*%iG_c?5K|@{>#Jx7FNOw#Fy9&*u-1AO=PMM~}?=QXsh@!G- ziKyZQAj(48l$>Dk0-&PyH?A4NdFinm3u95;?wBKLUR;yZZ>MH%F6dC}O*@iVTQwTe zPzp&`ic4R38p`$Pis1e#qS3P&SENd%USZ&cxeV6VlQB3W?l7w-)Cr=M^{WFP4r)CkpZNJ zcC>HzCTSFG)4RBvKpTd7WV#~cyFMrEYD)8SZG&(<7%*BEx-o=GU|f+dZS`pkJah(S zPt#Zwb5-^}TXcAE%r@5BY}xG*K}yjfN^h+L609VJ`AqO* zDfxK_4uOORVlAU4riDw`B2g35;y_ZGuY1;niITI%K61?30wQ{uHEBL3u7xHhxFW4K z9Ur9q@Tw%&q)B0+X6^=5k;)P^(KdC}qDx%d>s#4Vol?llz0$odG`&lDhxK{UGAk;r z@(NmIlR!h~2)ZI{IsIw0#?m{YUjcah@Dqn_#oR)rme{$%2}bJPD@+S4UA`(97>6sS zt&A(Qz%0z^SYTYDQR19fjnZ(57FZk|>%2rr=Zds0ikWTL`;-$d?uD~>ylI-<9+k$V zK4XG{wNG;JArv#y6@&0bZ4X^B@Gi6@qbrQm^*zVHi;A;$HFDPK`iqV$9Y|1zian@% zz^m8#qPrJCQPh+RM2qHCbj8iJnEq%X?T=WD<4st1o5!0pMvWm@cIx(M7z60&v=JA> zV$~D9E-lknbqKr{mfdWLF8}%lWoEbm$B74OE-iF-;3CM$==imaLc^;%kO90%@E7uSZWJ+@Lp;fz#FK$A))m&*uwN0T2IA& zA-Nn~qV?oF^ZOxw>ff+qIb#ouwxXVUPS-&l`g4e(VVe5j+O3(S@Zj2(gp1RgjQ1q8ud#A zqDjo4JsDkb%V9u9Pg7i8$|@6Su(Lg!i%e3n6c=-HZFppQ;o3ps^86$a7hw~vu|qx_ zyP5fTVj{aq!!525Veo@H^(Dq7A`Dg^=Xeu`FecjfXiLmUtoIpXw3Dv!6K3T_*L(ma zY`e4qT%wia|pB?MWc2CJ!JNdTBVObz5 zyp5LAVCY&QQdD`rdDlWSwk`Z`j+JI?n;Rs~IWx8`GZgeW(>50oY<;0i`nCWk*2Vz@ z=H$K1w3NaZ>=;HX$)jsd&}kuMii-|8ts{>2~{m@d&W8f;>^L|8Z3#EgTNoU@Z# zGxfQbInzqY)CCpy0ECuO2A>_5Xf3%3sS^}N(PDBdOkZYPqSfSPm~#|iN_c~GsxGa7 zVS-p4$3k0}miMIDrZiZ6z&_iQ2D6VY5s(JEk1o-2a>LI#X{PrW>@*rhxaWqNgSty= zU>J?%am<>Oie6?->uE6V2@+aQgK8lktdW(Ry<8(Ya_g!EZ7kF^aY)Hz8f4Xg#?RX#$dWWOYia z(kd9b~!k$kgxx{`(< zn=~8SfF1^$&2?LycQJjT*O}T*H3Z!TJtxX;wpnOaUmzT7Krk2603`RZu{<@l$b8nP ziPf=X%7!A)(5s#|8rzm3$4ze;_00*!|#+Euu7Gtz!af$X0n!ZV6W;gdbOBRGw-@<}^ z9G&)YxEcpWHufa~Sxz43MiiIQ53 z7N8o`g}NG=ug4;?o9L`U_w;BH4Qf-IEuz8ajt;dn!o49`xG5~%+5(^&LF}3W$Y{pB zc6*q_40s@imMjew`Gw07Y2EW*fNpBwroaY~6qT}?--8sWs7z32zhFWkDH-_>cw5xu z(q$9yKrLB30$2y@=Fi0Cs1!i_7rZwr#Dy3L%8p7d&F=w8MVj<*2wS)9GY}?3uVu?KK%(@;+dnd-;k(89lPwY6_17Wl$RtZ78N0l4Y0j zahRzsd@nlKX5lP{(MHR4H^{Fpm04cEeIZ%DB7Xzws6~wi8nB0c`1Bj5pgtGVwd7vY zE@0A@bpL1R^LE~4$z&bv0$_?tpN?{2B%WJi7gnt} zy^&1+l9Z>7Aekj|Sqc!@!u6xCl-X&)2LYl%g}=~^qfi%J_%UpJrYh%T=X)Vr<3g0i z#RiEBK^YhFEH1hX>Nyf95#|cq zX|cQv8zU%}r$%rdgmAY8o4a(~)X8;Y-s`3;R&m%K!RBIo(Fm$PDE0X+;6cy`>{r z2sYth-Q1Durt+2(XO8%BOk*3Yi-r7P21F8ki3!F7|Q)=d&wH%u!TYx}zHI-*)Exz=4X=K1aC$lb2r_`BOTzWw~4sT6al8$`DF?ZfY0{pepm@u%ze z4&V6RH$U_*|MS{cUV82Se*XE7{^s_bmw);8pIv?7BUdjyz52-2&#r#=v(Nng{V%@r z?)$I3^zmPR`Okm*?(g4y|Gm%r;XmI0@Av-gzrOa$OMm_LtAF_8U;fi8@BHICfAjW- z|NW2ezIyohFaC4>=_h{m7a#iHZ+-0hR5RH++}>DO+W~hs-VbU7xyy8IQjsvf>^s_sIq|2JQK`oph3f91PhJEICeA~BaL{VR2lDVV+e zK{c77flnSZb|CAo_*4CP@Wu(-?O<6PiF2&l)xiv`V{`*s>hg>jB}LiEz&e#YFrI){ zySWa0wCl8_z*?*rF%&B3Q()Ic{sNndHwQMCZ6~ny4+d7{s-qp2Uzp<94s$3MC9uvb z3G7(>BCviQDzLR=%@LdSMKl6y(`8_D$wYy52x?$Uk?0X4C0TnkS1syncPL+Vd^?w_ z9ax8k1=h0bh+&A(IXGVX_m1`GMD#&mway=b;>KD!82crDQDY`hEHSpc>eC2S!P^YnyU>JC_m`SVt8G)_UoP z9d{+v1lGCbfwjpxu(<@Iz&cVWu-3yyj8Hkk2G;uLh~eZ- zdU_xmUHvXoX(jP(Cwd3gkt~6=cs*jMyqYg6n9eC0fw4~Dh~HaEn;I|@WA%Gj+)L*^ zZ&!cUnohf^Oa$lcIN+U{7#Q!*sa+dZquq_@0S{%A%`)086%-R#t3@M5QNfr8>B&3M zgNXuq$&qf!&DLPWYpe=1gpe_J~1i4W_Bo(6J9#vc^I! zoxC&Cw3d~1{C4IwE@AC0k9J*4EwIiz3(PVX>C9!#7~36T{K71AktQn{i8NU8M)IP) ze#9^9en2PHFJYKkE5s|U3qgL`Bch))Hy*E5ABY>qI>UNgm!(}Vfz{JEv3`h{+H*{F zng=4G#)n9wbAa~jc8h+G7h3dth}8N$PD|Nyhk3xJB~Q*f^B$h7(Ap#Mi}%d?>oPiw zee8LPT-dQ}nbGb>v=b>-vcODvNBtg^my$+Micj+2jLMn!ccyra)`hGo{T_agJbyEB zANoDE*Wa3_;E^r+#JC19DSeBRD$n=~!BW2zHZf+smfM^9`2k;W`4z^oa3uA6b9>C9 zT^C9^$6(wS&Je6tbHjJ4H6;O6^M$PC-9n2OPvPA%=~iG~XYyAxUy>Sh4#>LFSYTV4 z2YESKE7CFh?mq_@sPOI3e$tuiYJP-Ry-E0wc^d3SgXUlOwx71r1$Wgu;~!*KxuAE=gp66 zzjW$SVx|$x;yZ&1>$$BJF+Tnt-Yw0IPqofC7_`nZ z<=SHk(q0}Ri7^k9kFK^Heb}zT^Jcn(VD1-ImV0?5BvkW-A88&J@>Kx<zsIg=71%+x3^<-_cwnYY zY>4K@gtg~aM+}><)&)5tFbv_gBT+uZ^FWT!_>d!vndf1q7S98d zPlQ`uzVw*Kjd&iSndt^HgU%KCCCLY=Qdhp}u@7Vh?GZAA=7!7=>qim(E~yZ|hlrp) z?4LoLyf8i_ZGDT8(b$FYbxw1PRzHkr8$` z(gUJck_D?Cu5TJ$a(g+7<1$Ic7#7mhlK@5 zhd^r-y&?x$vNzes(#6Ta_x(T~(wgF%)!gQTjTFsUTSy9QhSm31_ zP%|`n=m9OjdjvDno*ym0kpGr?1q|w1a#*oOoefMav2QGjWpA+TgLW)dt_$|g3z+A@ zU^F)}oRfF604IgYQXewOrVv#jy}rC?xA4TDV`r z0oBLaMLWkauSe5U#1he$brr2+MYX?hEy+lTG}>Q!(A~myr@OVFYQ){*?c6T%HOyl{ z=}dVDOniZ8|FkU;%Yf?Yyl`~IyTK$=A{LT)`d>HZsj;Wn3T>2 zdZWey(b4*$xF^+P<+-CCiO`aN`wSmzyP8toev9{Cc$#0Se%udxu4nsk@QebF$Y zK*a|UX)a)0f9QPu2z+BR2w6(OBY{Jtuz)w zVPlt+=gM*6CfJx3FJYGDHPHpbc z-oe&NQ8>pCL~&*blWdDnyic0Lw_S!leot}0TC=GBucNf?$g6AvCWZ z${psrTOb>nFYdQELwqYgCz$997({IB159!CDDhp7d|WHYn#Y1gLcb>+Y{FTTbaw~M zN!^)t`@E}Q;*DU6-QRJGa{XvWbY6}RGl30Dma5LwIyC--G?G&E_CR$gR*ALl3=L$Pv$R&K+ zPg0EQhw7{|M6h4172hf!0u0Stb6Zkwr(LJ~js23su5*BCQ}aMQ(mb$P>b&3z)I4U{ z(Q(WwRvfJBa>V!$oN{is)Q#a8SG#_GDSofZIRw^EjU|?RpYbV2qAbm+mlc+09P9#* zA~sa}hF+_AAbRUgbKPR!_SD!)`w;#1%Ut8!5J6%u(KO_%-SfC0 zA0Wnh9+G3}S8{A&=8f2&b$>C8#-0;ue?exzP^kNh(M|J!m1#dP!)YFb6KX#e#fUJL zInOg#zZEl{4cvL2hio!@yQEGGhO;+m1Z5dC59|z@2a3Grfn2P0M&8i)V3(qkD7IQ> z!m2brRCbS#V3(W+ZWoWQ-y<2rCo(~ir)Yu3T+%SmKMugSR#{l@~z?zz_5&tzXzuHjl$A4 zM=FZqH^3CXQJCm1!dcq1XeS>AnBq6UlzUiM+5o`jv?GBjkDxH=YCM~qeiGU#hXV{_ zX|CUX%{`)>d`ji%m3{<+OZx%saK>aGs2|BISkm;(^6eq*&tURP7ABroX+?3?Xm=FP z0~62VP>SafS|OeXCZ5MJ5YGckeIH2~NhjBZBw6hT?f}h$bWYuU+AK9;S6D`n4oe62@Ej3CuJ&mBOdU6sdYG4 z=vK*lDkL@6ilCca>eB?xDTjl0(lvO&s^6SG@0Td}F1|Km*oSfru&Jirh_y*N3Dp4b zV2*nUr;t2eo+RfWS%`OYJtRwUh}trB@{X3pB5C3$WH8`kj`d}E?^7j_icA%YV zh9ImVjOMqL{nA))Qn{V!P?gKb^+R%$?nmmi_O}myjD^E1*@y5>@o8MH;ys9F;_+B9 zGcFR#X1@e}P9wm&ifv%Z37!~<#VQ)(L z`G9F8&+`FwOW%?Upf$k8qxB>0-I(bMK_VC0ktmmIMR`Z{TaG5R>k`xBnRI?P(lgmS z`c^SdC1vY=)bY!8!4W1K1!{owMw}C(Ly&vzQ5li*Y?J{tcrw9UitQmY&G}`>L(=_F zNp#-`BRlscJb&}y)9>8Ac~wsmeCcx!zV+Z!$JY;shp(@8f4&r8k~Dqm&uzNKMU8 t{=>6p&-oYBv;Ot +``` + +2. **Push your new changes on a new branch**: Feel free to add or edit existing documentation and open a PR for your changes. Once your PR is reviewed and approved, the changes will be ready to merge into main. + +3. **Updating the website**: Once your changes are merged to main, they need to be pushed to the subtree repository that hosts the live documentation site. This step will eventually be done automatically, but for now, please run the following command to push the updated `docs` content to the website subtree repository: + +```bash +#Ensure you are in the top-level dspy/ folder +git subtree push --prefix=docs +``` \ No newline at end of file diff --git a/docs-page/api/assertions.md b/docs/api/assertions.md similarity index 100% rename from docs-page/api/assertions.md rename to docs/api/assertions.md diff --git a/docs-page/api/intro.md b/docs/api/intro.md similarity index 100% rename from docs-page/api/intro.md rename to docs/api/intro.md diff --git a/docs-page/api/language_model_clients/Anyscale.md b/docs/api/language_model_clients/Anyscale.md similarity index 84% rename from docs-page/api/language_model_clients/Anyscale.md rename to docs/api/language_model_clients/Anyscale.md index 8fc5241fd2..c9485420e0 100644 --- a/docs-page/api/language_model_clients/Anyscale.md +++ b/docs/api/language_model_clients/Anyscale.md @@ -28,4 +28,4 @@ class Anyscale(HFModel): ### Methods -Refer to [`dspy.OpenAI`](#openai) documentation. +Refer to [`dspy.OpenAI`](https://dspy-docs.vercel.app/api/language_model_clients/OpenAI) documentation. diff --git a/docs-page/api/language_model_clients/AzureOpenAI.md b/docs/api/language_model_clients/AzureOpenAI.md similarity index 100% rename from docs-page/api/language_model_clients/AzureOpenAI.md rename to docs/api/language_model_clients/AzureOpenAI.md diff --git a/docs-page/api/language_model_clients/Cohere.md b/docs/api/language_model_clients/Cohere.md similarity index 87% rename from docs-page/api/language_model_clients/Cohere.md rename to docs/api/language_model_clients/Cohere.md index 1133d8a107..f3a39e1e66 100644 --- a/docs-page/api/language_model_clients/Cohere.md +++ b/docs/api/language_model_clients/Cohere.md @@ -31,4 +31,4 @@ class Cohere(LM): ### Methods -Refer to [`dspy.OpenAI`](#openai) documentation. +Refer to [`dspy.OpenAI`](https://dspy-docs.vercel.app/api/language_model_clients/OpenAI) documentation. diff --git a/docs-page/api/language_model_clients/Databricks.md b/docs/api/language_model_clients/Databricks.md similarity index 93% rename from docs-page/api/language_model_clients/Databricks.md rename to docs/api/language_model_clients/Databricks.md index f6b2c660ff..e6201e1dcb 100644 --- a/docs-page/api/language_model_clients/Databricks.md +++ b/docs/api/language_model_clients/Databricks.md @@ -40,4 +40,4 @@ class Databricks(GPT3): ### Methods -Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file +Refer to [`dspy.OpenAI`](https://dspy-docs.vercel.app/api/language_model_clients/OpenAI) documentation. \ No newline at end of file diff --git a/docs/api/language_model_clients/HFClientVLLM.md b/docs/api/language_model_clients/HFClientVLLM.md new file mode 100644 index 0000000000..347ce89eb4 --- /dev/null +++ b/docs/api/language_model_clients/HFClientVLLM.md @@ -0,0 +1,23 @@ +--- +sidebar_position: 5 +--- + +# dspy.HFClientVLLM + +### Usage + +```python +lm = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") +``` + +### Prerequisites + +Refer to the [vLLM Server](https://dspy-docs.vercel.app/api/language_model_clients/HFClientVLLM) section of the `Using Local Models` documentation. + +### Constructor + +Refer to [`dspy.TGI`](https://dspy-docs.vercel.app/api/language_model_clients/TGI) documentation. Replace with `HFClientVLLM`. + +### Methods + +Refer to [`dspy.OpenAI`](https://dspy-docs.vercel.app/api/language_model_clients/OpenAI) documentation. \ No newline at end of file diff --git a/docs-page/api/language_model_clients/OpenAI.md b/docs/api/language_model_clients/OpenAI.md similarity index 100% rename from docs-page/api/language_model_clients/OpenAI.md rename to docs/api/language_model_clients/OpenAI.md diff --git a/docs-page/api/language_model_clients/TGI.md b/docs/api/language_model_clients/TGI.md similarity index 75% rename from docs-page/api/language_model_clients/TGI.md rename to docs/api/language_model_clients/TGI.md index 0a2bf4dbfb..c449023490 100644 --- a/docs-page/api/language_model_clients/TGI.md +++ b/docs/api/language_model_clients/TGI.md @@ -12,7 +12,7 @@ lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://l ### Prerequisites -Refer to the [Text Generation-Inference Server](https://github.com/stanfordnlp/dspy/blob/local_models_docs/docs/using_local_models.md#text-generation-inference-server) section of the `Using Local Models` documentation. +Refer to the [Text Generation-Inference Server](https://dspy-docs.vercel.app/docs/deep-dive/language_model_clients/local_models/HFClientTGI) section of the `Using Local Models` documentation. ### Constructor @@ -31,4 +31,4 @@ class HFClientTGI(HFModel): ### Methods -Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file +Refer to [`dspy.OpenAI`](https://dspy-docs.vercel.app/api/language_model_clients/OpenAI) documentation. \ No newline at end of file diff --git a/docs-page/api/language_model_clients/Together.md b/docs/api/language_model_clients/Together.md similarity index 85% rename from docs-page/api/language_model_clients/Together.md rename to docs/api/language_model_clients/Together.md index c24232d1ba..c6baccf6df 100644 --- a/docs-page/api/language_model_clients/Together.md +++ b/docs/api/language_model_clients/Together.md @@ -29,4 +29,4 @@ class Together(HFModel): ### Methods -Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file +Refer to [`dspy.OpenAI`](https://dspy-docs.vercel.app/api/language_model_clients/OpenAI) documentation. \ No newline at end of file diff --git a/docs-page/api/language_model_clients/_category_.json b/docs/api/language_model_clients/_category_.json similarity index 100% rename from docs-page/api/language_model_clients/_category_.json rename to docs/api/language_model_clients/_category_.json diff --git a/docs-page/api/modules/ChainOfThought.md b/docs/api/modules/ChainOfThought.md similarity index 100% rename from docs-page/api/modules/ChainOfThought.md rename to docs/api/modules/ChainOfThought.md diff --git a/docs-page/api/modules/ChainOfThoughtWithHint.md b/docs/api/modules/ChainOfThoughtWithHint.md similarity index 100% rename from docs-page/api/modules/ChainOfThoughtWithHint.md rename to docs/api/modules/ChainOfThoughtWithHint.md diff --git a/docs-page/api/modules/MultiChainComparison.md b/docs/api/modules/MultiChainComparison.md similarity index 100% rename from docs-page/api/modules/MultiChainComparison.md rename to docs/api/modules/MultiChainComparison.md diff --git a/docs-page/api/modules/Predict.md b/docs/api/modules/Predict.md similarity index 100% rename from docs-page/api/modules/Predict.md rename to docs/api/modules/Predict.md diff --git a/docs-page/api/modules/ProgramOfThought.md b/docs/api/modules/ProgramOfThought.md similarity index 100% rename from docs-page/api/modules/ProgramOfThought.md rename to docs/api/modules/ProgramOfThought.md diff --git a/docs-page/api/modules/ReAct.md b/docs/api/modules/ReAct.md similarity index 100% rename from docs-page/api/modules/ReAct.md rename to docs/api/modules/ReAct.md diff --git a/docs-page/api/modules/Retrieve.md b/docs/api/modules/Retrieve.md similarity index 100% rename from docs-page/api/modules/Retrieve.md rename to docs/api/modules/Retrieve.md diff --git a/docs-page/api/modules/_category_.json b/docs/api/modules/_category_.json similarity index 100% rename from docs-page/api/modules/_category_.json rename to docs/api/modules/_category_.json diff --git a/docs-page/api/optimizers/BootstrapFewShot.md b/docs/api/optimizers/BootstrapFewShot.md similarity index 100% rename from docs-page/api/optimizers/BootstrapFewShot.md rename to docs/api/optimizers/BootstrapFewShot.md diff --git a/docs-page/api/optimizers/BootstrapFewShotWithRandomSearch.md b/docs/api/optimizers/BootstrapFewShotWithRandomSearch.md similarity index 95% rename from docs-page/api/optimizers/BootstrapFewShotWithRandomSearch.md rename to docs/api/optimizers/BootstrapFewShotWithRandomSearch.md index dd007b10e2..1a36af2a84 100644 --- a/docs-page/api/optimizers/BootstrapFewShotWithRandomSearch.md +++ b/docs/api/optimizers/BootstrapFewShotWithRandomSearch.md @@ -41,7 +41,7 @@ class BootstrapFewShotWithRandomSearch(BootstrapFewShot): ### Method -Refer to [teleprompt.BootstrapFewShot](#telepromptbootstrapfewshot) documentation. +Refer to [teleprompt.BootstrapFewShot](https://dspy-docs.vercel.app/docs/deep-dive/teleprompter/bootstrap-fewshot) documentation. ## Example diff --git a/docs-page/api/optimizers/BootstrapFinetune.md b/docs/api/optimizers/BootstrapFinetune.md similarity index 100% rename from docs-page/api/optimizers/BootstrapFinetune.md rename to docs/api/optimizers/BootstrapFinetune.md diff --git a/docs-page/api/optimizers/Ensemble.md b/docs/api/optimizers/Ensemble.md similarity index 100% rename from docs-page/api/optimizers/Ensemble.md rename to docs/api/optimizers/Ensemble.md diff --git a/docs-page/api/optimizers/LabeledFewShot.md b/docs/api/optimizers/LabeledFewShot.md similarity index 100% rename from docs-page/api/optimizers/LabeledFewShot.md rename to docs/api/optimizers/LabeledFewShot.md diff --git a/docs-page/api/optimizers/_category_.json b/docs/api/optimizers/_category_.json similarity index 100% rename from docs-page/api/optimizers/_category_.json rename to docs/api/optimizers/_category_.json diff --git a/docs-page/api/retrieval_model_clients/AzureCognitiveSearch.md b/docs/api/retrieval_model_clients/AzureCognitiveSearch.md similarity index 100% rename from docs-page/api/retrieval_model_clients/AzureCognitiveSearch.md rename to docs/api/retrieval_model_clients/AzureCognitiveSearch.md diff --git a/docs-page/api/retrieval_model_clients/ChromadbRM.md b/docs/api/retrieval_model_clients/ChromadbRM.md similarity index 100% rename from docs-page/api/retrieval_model_clients/ChromadbRM.md rename to docs/api/retrieval_model_clients/ChromadbRM.md diff --git a/docs-page/api/retrieval_model_clients/ColBERTv2.md b/docs/api/retrieval_model_clients/ColBERTv2.md similarity index 100% rename from docs-page/api/retrieval_model_clients/ColBERTv2.md rename to docs/api/retrieval_model_clients/ColBERTv2.md diff --git a/docs-page/api/retrieval_model_clients/FaissRM.md b/docs/api/retrieval_model_clients/FaissRM.md similarity index 100% rename from docs-page/api/retrieval_model_clients/FaissRM.md rename to docs/api/retrieval_model_clients/FaissRM.md diff --git a/docs-page/api/retrieval_model_clients/_category_.json b/docs/api/retrieval_model_clients/_category_.json similarity index 100% rename from docs-page/api/retrieval_model_clients/_category_.json rename to docs/api/retrieval_model_clients/_category_.json diff --git a/docs/assertions.md b/docs/assertions.md deleted file mode 100644 index a1601227d3..0000000000 --- a/docs/assertions.md +++ /dev/null @@ -1,258 +0,0 @@ -# DSPy Assertions -## Introduction - -Language models (LMs) have transformed how we interact with machine learning, offering vast capabilities in natural language understanding and generation. However, ensuring these models adhere to domain-specific constraints remains a challenge. Despite the growth of techniques like fine-tuning or “prompt engineering”, these approaches are extremely tedious and rely on heavy, manual hand-waving to guide the LMs in adhering to specific constraints. Even DSPy's modularity of programming prompting pipelines lacks mechanisms to effectively and automatically enforce these constraints. - -To address this, we introduce DSPy Assertions, a feature within the DSPy framework designed to automate the enforcement of computational constraints on LMs. DSPy Assertions empower developers to guide LMs towards desired outcomes with minimal manual intervention, enhancing the reliability, predictability, and correctness of LM outputs. - -### dspy.Assert and dspy.Suggest API - -We introduce two primary constructs within DSPy Assertions: - -- **`dspy.Assert`**: - - **Parameters**: - - `constraint (bool)`: Outcome of Python-defined boolean validation check. - - `msg (Optional[str])`: User-defined error message providing feedback or correction guidance. - - `backtrack (Optional[module])`: Specifies target module for retry attempts upon constraint failure. The default backtracking module is the last module before the assertion. - - **Behavior**: Initiates retry upon failure, dynamically adjusting the pipeline's execution. If failures persist, it halts execution and raises a `dspy.AssertionError`. - -- **`dspy.Suggest`**: - - **Parameters**: Similar to `dspy.Assert`. - - **Behavior**: Encourages self-refinement through retries without enforcing hard stops. Logs failures after maximum backtracking attempts and continues execution. - -- **dspy.Assert vs. Python Assertions**: Unlike conventional Python `assert` statements that terminate the program upon failure, `dspy.Assert` conducts a sophisticated retry mechanism, allowing the pipeline to adjust. - -Specifically, when a constraint is not met: - -- Backtracking Mechanism: An under-the-hood backtracking is initiated, offering the model a chance to self-refine and proceed, which is done through -- Dynamic Signature Modification: internally modifying your DSPy program’s Signature by adding the following fields: - - Past Output: your model's past output that did not pass the validation_fn - - Instruction: your user-defined feedback message on what went wrong and what possibly to fix - -If the error continues past the `max_backtracking_attempts`, then `dspy.Assert` will halt the pipeline execution, altering you with an `dspy.AssertionError`. This ensures your program doesn't continue executing with “bad” LM behavior and immediately highlights sample failure outputs for user assessment. - -- **dspy.Suggest vs. dspy.Assert**: `dspy.Suggest` on the other hand offers a softer approach. It maintains the same retry backtracking as `dspy.Assert` but instead serves as a gentle nudger. If the model outputs cannot pass the model constraints after the `max_backtracking_attempts`, `dspy.Suggest` will log the persistent failure and continue execution of the program on the rest of the data. This ensures the LM pipeline works in a "best-effort" manner without halting execution. - -- **`dspy.Suggest`** are best utilized as "helpers" during the evaluation phase, offering guidance and potential corrections without halting the pipeline. -- **`dspy.Assert`** are recommended during the development stage as "checkers" to ensure the LM behaves as expected, providing a robust mechanism for identifying and addressing errors early in the development cycle. - - -## Use Case: Including Assertions in DSPy Programs - -We start with using an example of a multi-hop QA SimplifiedBaleen pipeline as defined in the intro walkthrough. - -```python -class SimplifiedBaleen(dspy.Module): - def __init__(self, passages_per_hop=2, max_hops=2): - super().__init__() - - self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] - self.retrieve = dspy.Retrieve(k=passages_per_hop) - self.generate_answer = dspy.ChainOfThought(GenerateAnswer) - self.max_hops = max_hops - - def forward(self, question): - context = [] - prev_queries = [question] - - for hop in range(self.max_hops): - query = self.generate_query[hop](context=context, question=question).query - prev_queries.append(query) - passages = self.retrieve(query).passages - context = deduplicate(context + passages) - - pred = self.generate_answer(context=context, question=question) - pred = dspy.Prediction(context=context, answer=pred.answer) - return pred - -baleen = SimplifiedBaleen() - -baleen(question = "Which award did Gary Zukav's first book receive?") -``` - -To include DSPy Assertions, we simply define our validation functions and declare our assertions following the respective model generation. - -For this use case, suppose we want to impose the following constraints: - 1. Length - each query should be less than 100 characters - 2. Uniqueness - each generated query should differ from previously-generated queries. - -We can define these validation checks as boolean functions: - -```python -#simplistic boolean check for query length -len(query) <= 100 - -#Python function for validating distinct queries -def validate_query_distinction_local(previous_queries, query): - """check if query is distinct from previous queries""" - if previous_queries == []: - return True - if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8): - return False - return True -``` - -We can declare these validation checks through `dspy.Suggest` statements (as we want to test the program in a best-effort demonstration). We want to keep these after the query generation `query = self.generate_query[hop](context=context, question=question).query`. - -```python -dspy.Suggest( - len(query) <= 100, - "Query should be short and less than 100 characters", -) - -dspy.Suggest( - validate_query_distinction_local(prev_queries, query), - "Query should be distinct from: " - + "; ".join(f"{i+1}) {q}" for i, q in enumerate(prev_queries)), -) -``` - -It is recommended to define a program with assertions separately than your original program if you are doing comparative evaluation for the effect of assertions. If not, feel free to set Assertions away! - -Let's take a look at how the SimplifiedBaleen program will look with Assertions included: - -```python -class SimplifiedBaleenAssertions(dspy.Module): - def __init__(self, passages_per_hop=2, max_hops=2): - super().__init__() - self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)] - self.retrieve = dspy.Retrieve(k=passages_per_hop) - self.generate_answer = dspy.ChainOfThought(GenerateAnswer) - self.max_hops = max_hops - - def forward(self, question): - context = [] - prev_queries = [question] - - for hop in range(self.max_hops): - query = self.generate_query[hop](context=context, question=question).query - - dspy.Suggest( - len(query) <= 100, - "Query should be short and less than 100 characters", - ) - - dspy.Suggest( - validate_query_distinction_local(prev_queries, query), - "Query should be distinct from: " - + "; ".join(f"{i+1}) {q}" for i, q in enumerate(prev_queries)), - ) - - prev_queries.append(query) - passages = self.retrieve(query).passages - context = deduplicate(context + passages) - - if all_queries_distinct(prev_queries): - self.passed_suggestions += 1 - - pred = self.generate_answer(context=context, question=question) - pred = dspy.Prediction(context=context, answer=pred.answer) - return pred -``` - -Now calling programs with DSPy Assertions requires one last step, and that is transforming the program to wrap it with internal assertions backtracking and Retry logic. - -```python -from dspy.primitives.assertions import assert_transform_module, backtrack_handler - -baleen_with_assertions = assert_transform_module(SimplifiedBaleenAssertions(), backtrack_handler) - -# backtrack_handler is parameterized over a few settings for the backtracking mechanism -# To change the number of max retry attempts, you can do -baleen_with_assertions_retry_once = assert_transform_module(SimplifiedBaleenAssertions(), - functools.partial(backtrack_handler, max_backtracks=1)) -``` - -Alternatively, you can also directly call `activate_assertions` on the program with `dspy.Assert/Suggest` statements using the default backtracking mechanism (`max_backtracks=2`): - -```python -baleen_with_assertions = SimplifiedBaleenAssertions().activate_assertions() -``` - -Now let's take a look at the internal LM backtracking by inspecting the history of the LM query generations. Here we see that when a query fails to pass the validation check of being less than 100 characters, its internal `GenerateSearchQuery` signature is dynamically modified during the backtracking+Retry process to include the past query and the corresponding user-defined instruction: `"Query should be short and less than 100 characters"`. - - -``` -Write a simple search query that will help answer a complex question. - ---- - -Follow the following format. - -Context: may contain relevant facts - -Question: ${question} - -Reasoning: Let's think step by step in order to ${produce the query}. We ... - -Query: ${query} - ---- - -Context: -[1] «Kerry Condon | Kerry Condon (born 4 January 1983) is [...]» -[2] «Corona Riccardo | Corona Riccardo (c. 1878October 15, 1917) was [...]» - -Question: Who acted in the shot film The Shore and is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet." ? - -Reasoning: Let's think step by step in order to find the answer to this question. First, we need to identify the actress who played Ophelia in a Royal Shakespeare Company production of "Hamlet." Then, we need to find out if this actress also acted in the short film "The Shore." - -Query: "actress who played Ophelia in Royal Shakespeare Company production of Hamlet" + "actress in short film The Shore" - - - -Write a simple search query that will help answer a complex question. - ---- - -Follow the following format. - -Context: may contain relevant facts - -Question: ${question} - -Past Query: past output with errors - -Instructions: Some instructions you must satisfy - -Query: ${query} - ---- - -Context: -[1] «Kerry Condon | Kerry Condon (born 4 January 1983) is an Irish television and film actress, best known for her role as Octavia of the Julii in the HBO/BBC series "Rome," as Stacey Ehrmantraut in AMC's "Better Call Saul" and as the voice of F.R.I.D.A.Y. in various films in the Marvel Cinematic Universe. She is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet."» -[2] «Corona Riccardo | Corona Riccardo (c. 1878October 15, 1917) was an Italian born American actress who had a brief Broadway stage career before leaving to become a wife and mother. Born in Naples she came to acting in 1894 playing a Mexican girl in a play at the Empire Theatre. Wilson Barrett engaged her for a role in his play "The Sign of the Cross" which he took on tour of the United States. Riccardo played the role of Ancaria and later played Berenice in the same play. Robert B. Mantell in 1898 who struck by her beauty also cast her in two Shakespeare plays, "Romeo and Juliet" and "Othello". Author Lewis Strang writing in 1899 said Riccardo was the most promising actress in America at the time. Towards the end of 1898 Mantell chose her for another Shakespeare part, Ophelia im Hamlet. Afterwards she was due to join Augustin Daly's Theatre Company but Daly died in 1899. In 1899 she gained her biggest fame by playing Iras in the first stage production of Ben-Hur.» - -Question: Who acted in the shot film The Shore and is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet." ? - -Past Query: "actress who played Ophelia in Royal Shakespeare Company production of Hamlet" + "actress in short film The Shore" - -Instructions: Query should be short and less than 100 characters - -Query: "actress Ophelia RSC Hamlet" + "actress The Shore" - -``` - - -## Assertion-Driven Optimizations - -DSPy Assertions work with optimizations that DSPy offers, particularly with `BootstrapFewShotWithRandomSearch`, including the following settings: - -- Compilation with Assertions - This includes assertion-driven example bootstrapping and counterexample bootstrapping during compilation. The teacher model for bootstrapping few-shot demonstrations can make use of DSPy Assertions to offer robust bootstrapped examples for the student model to learn from during inference. In this setting, the student model does not perform assertion aware optimizations (backtracking and retry) during inference. -- Compilation + Inference with Assertions - -This includes assertion-driven optimizations in both compilation and inference. Now the teacher model offers assertion-driven examples but the student can further optimize with assertions of its own during inference time. -```python -teleprompter = BootstrapFewShotWithRandomSearch( - metric=validate_context_and_answer_and_hops, - max_bootstrapped_demos=max_bootstrapped_demos, - num_candidate_programs=6, -) - -#Compilation with Assertions -compiled_with_assertions_baleen = teleprompter.compile(student = baleen, teacher = baleen_with_assertions, trainset = trainset, valset = devset) - -#Compilation + Inference with Assertions -compiled_baleen_with_assertions = teleprompter.compile(student=baleen_with_assertions, teacher = baleen_with_assertions, trainset=trainset, valset=devset) - -``` diff --git a/docs-page/babel.config.js b/docs/babel.config.js similarity index 100% rename from docs-page/babel.config.js rename to docs/babel.config.js diff --git a/docs/custom.css b/docs/custom.css deleted file mode 100644 index c653343f13..0000000000 --- a/docs/custom.css +++ /dev/null @@ -1,169 +0,0 @@ -.green-title { - color: green; - display: inline; - font-size: small; -} - -.platform-badge, -.version-badge { - padding: 5px; - border-radius: 5px; - margin-left: 10px; - display: inline; -} - -.platform-badge { - background-color: #007bff; - color: white; - font-size: smaller; -} - -.version-badge { - background-color: #f39d12ae; - color: rgb(0, 0, 0); - font-size: 10px; - padding: 10px; -} - -.team-members { - display: flex; - justify-content: space-around; -} - -.team-member { - text-align: center; - /* flex: 1; */ - width: 40px; - /* height: 40px; */ -} - -.title-separator, -.comments-separator { - border-top: 1px solid #ccc; - margin: 10px 0; -} - -.comments-section { - margin-top: 10px; - font-size: smaller; -} - -.warning-message { - background-color: #f44336; - color: white; - padding: 10px; - border-radius: 5px; - display: none; -} - -.warning-message.show { - display: block; -} - -.success-message { - background-color: #4caf50; - color: white; - padding: 10px; - border-radius: 5px; -} - -.priority-section { - display: flex; - align-items: center; -} - -.warning-message-below { - background-color: rgba(244, 67, 54, 0.7); /* Red with alpha 0.7 */ - color: white; - padding: 10px; - border-radius: 5px; - margin-top: 10px; - font-size: smaller; -} - -.collapsible-info { - background-color: #2baa55ce; /* Blue */ - color: white; - padding: 10px; - border-radius: 5px; - margin-top: 10px; - display: block; - font-size: smaller; -} - -.collapsible-info.show { - display: block; -} -.title-badge { - font-size: 1.2em; - background-color: #4caf50; /* Green */ - color: white; - padding: 10px; - border-radius: 5px; - display: inline; -} - -.model-image { - width: 130px; /* Set the width */ - max-width: 100%; /* Make sure it scales down if the container is smaller */ - height: auto; /* Maintain aspect ratio */ - display: block; /* Change from inline to block */ - margin: auto; /* Center the image */ -} - -.model-badge { - font-size: 1.2em; - background-color: #4caf50; /* Green */ - color: white; - padding: 10px; - border-radius: 5px; - display: inline; - transition: background-color 0.3s ease; /* Smooth transition */ -} - -.model-badge:hover { - background-color: #4a53ec; /* Darker green */ -} - -.title-platform-section { - display: flex; - align-items: center; -} - -.status-priority-section { - display: flex; - align-items: center; - justify-content: space-between; -} - -.card-grid { - display: flex; - flex-wrap: wrap; - gap: 16px; -} - -.card { - flex: 1; - border: 1px solid #ccc; - border-radius: 8px; - padding: 16px; - margin: 16px; - min-width: calc(45% - 20px); /* For 3 cards per row */ - max-width: calc(45% - 20px); - max-height: fit-content; - margin: 10px; -} - -button { - background-color: #023b04c0; - color: white; - padding: 14px 20px; - margin: 8px 0; - border: none; - cursor: pointer; -} - -button:disabled { - background-color: #ccc; - cursor: not-allowed; -} diff --git a/docs-page/docs/building-blocks/1-language_models.md b/docs/docs/building-blocks/1-language_models.md similarity index 90% rename from docs-page/docs/building-blocks/1-language_models.md rename to docs/docs/building-blocks/1-language_models.md index ee41e6d665..cb03ce29be 100644 --- a/docs-page/docs/building-blocks/1-language_models.md +++ b/docs/docs/building-blocks/1-language_models.md @@ -10,7 +10,7 @@ Let's first make sure you can set up your language model. DSPy support clients f ## Setting up the LM client. -You can just call the constructor that connects to the LM. Then, use `dspy.configure` to declare this as the default LM. +You can just call the constructor that connects to the LM. Then, use `dspy.configure` to declare this as the dexfault LM. For example, to use OpenAI language models, you can do it as follows. @@ -141,31 +141,31 @@ lm = dspy.{provider_listed_below}(model="your model", model_request_kwargs="..." You need to host these models on your own GPU(s). Below, we include pointers for how to do that. -1. `dspy.HFClientTGI`: for HuggingFace models through the Text Generation Inference (TGI) system. [Tutorial: How do I install and launch the TGI server?](/api/local_language_model_clients/TGI) +1. `dspy.HFClientTGI`: for HuggingFace models through the Text Generation Inference (TGI) system. [Tutorial: How do I install and launch the TGI server?](https://dspy-docs.vercel.app/docs/deep-dive/language_model_clients/local_models/HFClientTGI) ```python tgi_llama2 = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") ``` -2. `dspy.HFClientVLLM`: for HuggingFace models through vLLM. [Tutorial: How do I install and launch the vLLM server?](/api/local_language_model_clients/vLLM) +2. `dspy.HFClientVLLM`: for HuggingFace models through vLLM. [Tutorial: How do I install and launch the vLLM server?](https://dspy-docs.vercel.app/docs/deep-dive/language_model_clients/local_models/HFClientVLLM) ```python vllm_llama2 = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") ``` -3. `dspy.HFModel` (experimental) [Tutorial: How do I initialize models using HFModel](/api/local_language_model_clients/HFModel) +3. `dspy.HFModel` (experimental) [Tutorial: How do I initialize models using HFModel](https://dspy-docs.vercel.app/api/local_language_model_clients/HFModel) ```python llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') ``` -4. `dspy.Ollama` (experimental) for open source models through [Ollama](https://ollama.com). [Tutorial: How do I install and use Ollama on a local computer?](/api/local_language_model_clients/Ollama)\n", +4. `dspy.Ollama` (experimental) for open source models through [Ollama](https://ollama.com). [Tutorial: How do I install and use Ollama on a local computer?](https://dspy-docs.vercel.app/api/local_language_model_clients/Ollama)\n", ```python mistral_ollama = dspy.OllamaLocal(model='mistral') ``` -5. `dspy.ChatModuleClient` (experimental): [How do I install and use MLC?](/api/local_language_model_clients/MLC) +5. `dspy.ChatModuleClient` (experimental): [How do I install and use MLC?](https://dspy-docs.vercel.app/api/local_language_model_clients/MLC) ```python model = 'dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1' diff --git a/docs-page/docs/building-blocks/2-signatures.md b/docs/docs/building-blocks/2-signatures.md similarity index 98% rename from docs-page/docs/building-blocks/2-signatures.md rename to docs/docs/building-blocks/2-signatures.md index c79c5c085b..5ce2e56fb1 100644 --- a/docs-page/docs/building-blocks/2-signatures.md +++ b/docs/docs/building-blocks/2-signatures.md @@ -155,6 +155,6 @@ Prediction( ## Using signatures to build modules & compiling them -While signatures are covenient for prototyping with structured inputs/outputs, that's not the main reason to use them! +While signatures are convenient for prototyping with structured inputs/outputs, that's not the main reason to use them! You should compose multiple signatures into bigger [DSPy modules] and [compile] these modules into optimized prompts and finetunes. diff --git a/docs-page/docs/building-blocks/3-modules.md b/docs/docs/building-blocks/3-modules.md similarity index 97% rename from docs-page/docs/building-blocks/3-modules.md rename to docs/docs/building-blocks/3-modules.md index 54ecb13da5..dad351596c 100644 --- a/docs-page/docs/building-blocks/3-modules.md +++ b/docs/docs/building-blocks/3-modules.md @@ -114,7 +114,7 @@ We also have some function-style modules: 6. **`dspy.majority`**: Can do basic voting to return the most popular response from a set of predictions. -Check out further examples in [each module's respective guide](/api/category/modules/). +Check out further examples in [each module's respective guide](https://dspy-docs.vercel.app/api/category/modules). ## How do I compose multiple modules into a bigger program? diff --git a/docs-page/docs/building-blocks/4-data.md b/docs/docs/building-blocks/4-data.md similarity index 100% rename from docs-page/docs/building-blocks/4-data.md rename to docs/docs/building-blocks/4-data.md diff --git a/docs-page/docs/building-blocks/5-metrics.md b/docs/docs/building-blocks/5-metrics.md similarity index 100% rename from docs-page/docs/building-blocks/5-metrics.md rename to docs/docs/building-blocks/5-metrics.md diff --git a/docs-page/docs/building-blocks/6-optimizers.md b/docs/docs/building-blocks/6-optimizers.md similarity index 95% rename from docs-page/docs/building-blocks/6-optimizers.md rename to docs/docs/building-blocks/6-optimizers.md index db6223e59b..6c9c3603a9 100644 --- a/docs-page/docs/building-blocks/6-optimizers.md +++ b/docs/docs/building-blocks/6-optimizers.md @@ -23,7 +23,7 @@ If you happen to have a lot of data, DSPy can leverage that. But you can start s Traditional deep neural networks (DNNs) can be optimized with gradient descent, given a loss function and some training data. -DSPy programs consist of multiple calls to LMs, stacked togther as [DSPy modules]. Each DSPy module has internal parameters of three kinds: (1) the LM weights, (2) the instructions, and (3) demonstrations of the input/output behavior. +DSPy programs consist of multiple calls to LMs, stacked together as [DSPy modules]. Each DSPy module has internal parameters of three kinds: (1) the LM weights, (2) the instructions, and (3) demonstrations of the input/output behavior. Given a metric, DSPy can optimize all of these three with multi-stage optimization algorithms. These can combine gradient descent (for LM weights) and discrete LM-driven optimization, i.e. for crafting/updating instructions and for creating/validating demonstrations. DSPy Demonstrations are like few-shot examples, but they're far more powerful. They can be created from scratch, given your program, and their creation and selection can be optimized in many effective ways. diff --git a/docs-page/docs/building-blocks/7-assertions.md b/docs/docs/building-blocks/7-assertions.md similarity index 100% rename from docs-page/docs/building-blocks/7-assertions.md rename to docs/docs/building-blocks/7-assertions.md diff --git a/docs-page/docs/building-blocks/_category_.json b/docs/docs/building-blocks/_category_.json similarity index 100% rename from docs-page/docs/building-blocks/_category_.json rename to docs/docs/building-blocks/_category_.json diff --git a/docs-page/docs/building-blocks/solving_your_task.md b/docs/docs/building-blocks/solving_your_task.md similarity index 82% rename from docs-page/docs/building-blocks/solving_your_task.md rename to docs/docs/building-blocks/solving_your_task.md index b4ba1f14ee..cdfc86865f 100644 --- a/docs-page/docs/building-blocks/solving_your_task.md +++ b/docs/docs/building-blocks/solving_your_task.md @@ -8,7 +8,7 @@ Using DSPy well for solving a new task is just doing good machine learning with What this means is that it's an iterative process. You make some initial choices, which will be sub-optimal, and then you refine them incrementally. -As we discuss below, you will define your task and the metrics you want to maximize, and prepare a few example inputs — typically without labels (or only with labels for the final outputs, if your metric requires them). Then, you build your pipeline by selecting built-in layers [(`modules`)](/docs/building-blocks/modules) to use, giving each layer a [`signature` (input/output spec)](/docs/building-blocks/signatures), and then calling your modules freely in your Python code. Lastly, you use a DSPy [`optimizer`](/docs/building-blocks/optimizers) to compile your code into high-quality instructions, automatic few-shot examples, or updated LM weights for your LM. +As we discuss below, you will define your task and the metrics you want to maximize, and prepare a few example inputs — typically without labels (or only with labels for the final outputs, if your metric requires them). Then, you build your pipeline by selecting built-in layers [(`modules`)](https://dspy-docs.vercel.app/docs/building-blocks/modules) to use, giving each layer a [`signature` (input/output spec)](https://dspy-docs.vercel.app/docs/building-blocks/signatures), and then calling your modules freely in your Python code. Lastly, you use a DSPy [`optimizer`]https://dspy-docs.vercel.app/docs/building-blocks/optimizers) to compile your code into high-quality instructions, automatic few-shot examples, or updated LM weights for your LM. ## 1) Define your task. @@ -31,7 +31,7 @@ What should your DSPy program do? Can it just be a simple chain-of-thought step? Is there a typical workflow for solving your problem in multiple well-defined steps? Or do you want a fully open-ended LM (or open-ended tool use with agents) for your task? -Think about this space but always start simple. Almost every task should probably start with just a single [dspy.ChainofThought](/api/modules/ChainOfThought) module, and then add complexity incrementally as you go. +Think about this space but always start simple. Almost every task should probably start with just a single [dspy.ChainofThought](https://dspy-docs.vercel.app/api/modules/ChainOfThought) module, and then add complexity incrementally as you go. Then write your (initial) DSPy program. Again: start simple, and let the next few steps guide any complexity you will add. @@ -39,7 +39,7 @@ Then write your (initial) DSPy program. Again: start simple, and let the next fe By this point, you probably have a few examples of the task you're trying to solve. -Run them through your pipeline. Consider using a large and powerful LM at this point, or a couple of different LMs, just to understand what's possible. (DSPy will make swapping these LMs pretty easy - [LM Guide](/docs/building-blocks/language_models).) +Run them through your pipeline. Consider using a large and powerful LM at this point, or a couple of different LMs, just to understand what's possible. (DSPy will make swapping these LMs pretty easy - [LM Guide](https://dspy-docs.vercel.app/docs/building-blocks/language_models).) At this point, you're still using your pipeline zero-shot, so it will be far from perfect. DSPy will help you optimize the instructions, few-shot examples, and even weights of your LM calls below, but understanding where things go wrong in zero-shot usage will go a long way. @@ -47,7 +47,7 @@ Record the interesting (both easy and hard) examples you try: even if you don't ## 4) Define your data. -Now it's time to more formally declare your training and validation data for DSPy evaluation and optimization - [Data Guide](/docs/building-blocks/data). +Now it's time to more formally declare your training and validation data for DSPy evaluation and optimization - [Data Guide](https://dspy-docs.vercel.app/docs/building-blocks/data). You can use DSPy optimizers usefully with as few as 10 examples, but having 50-100 examples (or even better, 300-500 examples) goes a long way. @@ -62,7 +62,7 @@ If there's data whose licenses are permissive enough, we suggest you use them. O What makes outputs from your system good or bad? Invest in defining metrics and improving them over time incrementally. It's really hard to consistently improve what you aren't able to define. -A metric is just a function that will take examples from your data and take the output of your system, and return a score that quantifies how good the output is - [Metric Guide](/docs/building-blocks/metrics). +A metric is just a function that will take examples from your data and take the output of your system, and return a score that quantifies how good the output is - [Metric Guide](https://dspy-docs.vercel.app/docs/building-blocks/metrics). For simple tasks, this could be just "accuracy" or "exact match" or "F1 score". This may be the case for simple classification or short-form QA tasks. @@ -79,17 +79,17 @@ Look at the outputs and the metric scores. This will probably allow you to spot ## 7) Compile with a DSPy optimizer. -Given some data and a metric, we can now optimize the program you built - [Optimizer Guide](/docs/building-blocks/optimizers). +Given some data and a metric, we can now optimize the program you built - [Optimizer Guide](https://dspy-docs.vercel.app/docs/building-blocks/optimizers). DSPy includes many optimizers that do different things. Remember: DSPy optimizers will create examples of each step, craft instructions, and/or update LM weights. In general, you don't need to have labels for your pipeline steps, but your data examples need to have input values and whatever labels your metric requires (e.g., no labels if your metric is reference-free, but final output labels otherwise in most cases). Here's the general guidance on getting started: -* If you have very little data, e.g. 10 examples of your task, use [`BootstrapFewShot`](/docs/deep-dive/teleprompter/bootstrap-fewshot) +* If you have very little data, e.g. 10 examples of your task, use [`BootstrapFewShot`](https://dspy-docs.vercel.app/docs/deep-dive/teleprompter/bootstrap-fewshot) -* If you have slightly more data, e.g. 50 examples of your task, use [`BootstrapFewShotWithRandomSearch`](/docs/deep-dive/teleprompter/bootstrap-fewshot). +* If you have slightly more data, e.g. 50 examples of your task, use [`BootstrapFewShotWithRandomSearch`](https://dspy-docs.vercel.app/docs/deep-dive/teleprompter/bootstrap-fewshot). -* If you have more data than that, e.g. 300 examples or more, use [`BayesianSignatureOptimizer`](/docs/deep-dive/teleprompter/signature-optimizer). +* If you have more data than that, e.g. 300 examples or more, use [`BayesianSignatureOptimizer`](https://dspy-docs.vercel.app/docs/deep-dive/teleprompter/signature-optimizer). * If you have been able to use one of these with a large LM (e.g., 7B parameters or above) and need a very efficient program, compile that down to a small LM with `BootstrapFinetune`. @@ -97,7 +97,7 @@ Here's the general guidance on getting started: At this point, you are either very happy with everything (we've seen quite a few people get it right on first try with DSPy) or, more likely, you've made a lot of progress but you don't like something about the final program or the metric. -At this point, go back to step 1 and revisit the major questions. Did you define your task well? Do you need to collect (or find online) more data for your problem? Do you want to update your metric? And do you want to use a more sophisticated optimizer? Do you need to consider advanced features like [DSPy Assertions](/docs/building-blocks/assertions)? Or, perhaps most importantly, do you want to add some more complexity or steps in your DSPy program itself? Do you want to use multiple optimizers in a sequence? +At this point, go back to step 1 and revisit the major questions. Did you define your task well? Do you need to collect (or find online) more data for your problem? Do you want to update your metric? And do you want to use a more sophisticated optimizer? Do you need to consider advanced features like [DSPy Assertions](https://dspy-docs.vercel.app/docs/building-blocks/assertions)? Or, perhaps most importantly, do you want to add some more complexity or steps in your DSPy program itself? Do you want to use multiple optimizers in a sequence? Iterative development is key. DSPy gives you the pieces to do that incrementally: iterating on your data, your program structure, your assertions, your metric, and your optimization steps. diff --git a/docs-page/docs/cheatsheet.md b/docs/docs/cheatsheet.md similarity index 100% rename from docs-page/docs/cheatsheet.md rename to docs/docs/cheatsheet.md diff --git a/docs-page/docs/deep-dive/_category_.json b/docs/docs/deep-dive/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/_category_.json rename to docs/docs/deep-dive/_category_.json diff --git a/docs-page/docs/deep-dive/data-handling/_category_.json b/docs/docs/deep-dive/data-handling/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/data-handling/_category_.json rename to docs/docs/deep-dive/data-handling/_category_.json diff --git a/docs-page/docs/deep-dive/data-handling/built-in-datasets.mdx b/docs/docs/deep-dive/data-handling/built-in-datasets.mdx similarity index 100% rename from docs-page/docs/deep-dive/data-handling/built-in-datasets.mdx rename to docs/docs/deep-dive/data-handling/built-in-datasets.mdx diff --git a/docs-page/docs/deep-dive/data-handling/examples.mdx b/docs/docs/deep-dive/data-handling/examples.mdx similarity index 100% rename from docs-page/docs/deep-dive/data-handling/examples.mdx rename to docs/docs/deep-dive/data-handling/examples.mdx diff --git a/docs-page/docs/deep-dive/data-handling/img/data-loading.png b/docs/docs/deep-dive/data-handling/img/data-loading.png similarity index 100% rename from docs-page/docs/deep-dive/data-handling/img/data-loading.png rename to docs/docs/deep-dive/data-handling/img/data-loading.png diff --git a/docs-page/docs/deep-dive/data-handling/loading-custom-data.mdx b/docs/docs/deep-dive/data-handling/loading-custom-data.mdx similarity index 100% rename from docs-page/docs/deep-dive/data-handling/loading-custom-data.mdx rename to docs/docs/deep-dive/data-handling/loading-custom-data.mdx diff --git a/docs-page/docs/deep-dive/language_model_clients/_category_.json b/docs/docs/deep-dive/language_model_clients/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/_category_.json rename to docs/docs/deep-dive/language_model_clients/_category_.json diff --git a/docs-page/docs/deep-dive/language_model_clients/custom-lm-client.mdx b/docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/custom-lm-client.mdx rename to docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx diff --git a/docs-page/docs/deep-dive/language_model_clients/remote_models/Anyscale.mdx b/docs/docs/deep-dive/language_model_clients/remote_models/Anyscale.mdx similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/remote_models/Anyscale.mdx rename to docs/docs/deep-dive/language_model_clients/remote_models/Anyscale.mdx diff --git a/docs-page/docs/deep-dive/language_model_clients/remote_models/Cohere.mdx b/docs/docs/deep-dive/language_model_clients/remote_models/Cohere.mdx similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/remote_models/Cohere.mdx rename to docs/docs/deep-dive/language_model_clients/remote_models/Cohere.mdx diff --git a/docs-page/docs/deep-dive/language_model_clients/remote_models/OpenAI.mdx b/docs/docs/deep-dive/language_model_clients/remote_models/OpenAI.mdx similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/remote_models/OpenAI.mdx rename to docs/docs/deep-dive/language_model_clients/remote_models/OpenAI.mdx diff --git a/docs-page/docs/deep-dive/language_model_clients/remote_models/Together.mdx b/docs/docs/deep-dive/language_model_clients/remote_models/Together.mdx similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/remote_models/Together.mdx rename to docs/docs/deep-dive/language_model_clients/remote_models/Together.mdx diff --git a/docs-page/docs/deep-dive/language_model_clients/remote_models/_category_.json b/docs/docs/deep-dive/language_model_clients/remote_models/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/language_model_clients/remote_models/_category_.json rename to docs/docs/deep-dive/language_model_clients/remote_models/_category_.json diff --git a/docs-page/docs/deep-dive/modules/_category_.json b/docs/docs/deep-dive/modules/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/modules/_category_.json rename to docs/docs/deep-dive/modules/_category_.json diff --git a/docs-page/docs/deep-dive/modules/assertions.mdx b/docs/docs/deep-dive/modules/assertions.mdx similarity index 100% rename from docs-page/docs/deep-dive/modules/assertions.mdx rename to docs/docs/deep-dive/modules/assertions.mdx diff --git a/docs-page/docs/deep-dive/modules/chain-of-thought-with-hint.mdx b/docs/docs/deep-dive/modules/chain-of-thought-with-hint.mdx similarity index 100% rename from docs-page/docs/deep-dive/modules/chain-of-thought-with-hint.mdx rename to docs/docs/deep-dive/modules/chain-of-thought-with-hint.mdx diff --git a/docs-page/docs/deep-dive/modules/guide.mdx b/docs/docs/deep-dive/modules/guide.mdx similarity index 86% rename from docs-page/docs/deep-dive/modules/guide.mdx rename to docs/docs/deep-dive/modules/guide.mdx index db7cb7bac1..29c349e329 100644 --- a/docs-page/docs/deep-dive/modules/guide.mdx +++ b/docs/docs/deep-dive/modules/guide.mdx @@ -14,7 +14,7 @@ Remember that **DSPy program** is just Python code that calls one or more **DSPy A **DSPy module** is a building block for programs that use LMs. -- Each built-in module abstracts a **prompting technique** (like chain of thought or ReAct). Crucially, they are generalized to handle any [DSPy Signature](/docs/building-blocks/2-signatures.md). +- Each built-in module abstracts a **prompting technique** (like chain of thought or ReAct). Crucially, they are generalized to handle any [DSPy Signature](https://dspy-docs.vercel.app/docs/building-blocks/signatures). - A DSPy module has **learnable parameters** (i.e., the little pieces comprising the prompt and the LM weights) and can be invoked (called) to process inputs and return outputs. @@ -22,15 +22,15 @@ A **DSPy module** is a building block for programs that use LMs. ### 2) What DSPy Modules are currently built-in? -1. **[`dspy.Predict`](/api/modules/Predict)**: +1. **[`dspy.Predict`](https://dspy-docs.vercel.app/api/modules/Predict)**: -2. **[`dspy.ChainOfThought`](/api/modules/ChainOfThought)**: +2. **[`dspy.ChainOfThought`](https://dspy-docs.vercel.app/api/modules/ChainOfThought)**: -3. **[`dspy.ProgramOfThought`](/api/modules/ProgramOfThought)**: +3. **[`dspy.ProgramOfThought`](https://dspy-docs.vercel.app/api/modules/ProgramOfThought)**: -4. **[`dspy.ReAct`](/api/modules/ReAct)**: +4. **[`dspy.ReAct`](https://dspy-docs.vercel.app/api/modules/ReAct)**: -5. **[`dspy.MultiChainComparison`](/api/modules/MultiChainComparison)**: +5. **[`dspy.MultiChainComparison`](https://dspy-docs.vercel.app/api/modules/MultiChainComparison)**: We also have some function-style modules: @@ -41,7 +41,7 @@ We also have some function-style modules: Let's start with the most fundamental one, `dspy.Predict`. Internally, all of the others are just built using it! -We'll assume you are already at least a little familiar with [DSPy signatures](/docs/building-blocks/2-signatures.md), which are declarative specs for defining the behavior of any module we use in DSPy. +We'll assume you are already at least a little familiar with [DSPy signatures](https://dspy-docs.vercel.app/docs/building-blocks/signatures), which are declarative specs for defining the behavior of any module we use in DSPy. To use a module, we first **declare** it by giving it a signature. Then we **call** the module with the input arguments, and extract the output fields! @@ -127,7 +127,7 @@ True The others are very similar, `dspy.ReAct` and `dspy.ProgramOfThought` etc. They mainly change the internal behavior with which your signature is implemented! -Check out further examples in [each module's respective guide](/api/category/modules/). +Check out further examples in [each module's respective guide](https://dspy-docs.vercel.app/docs/category/modules). ### 5) How do I compose multiple modules into a bigger program? diff --git a/docs-page/docs/deep-dive/modules/program-of-thought.mdx b/docs/docs/deep-dive/modules/program-of-thought.mdx similarity index 100% rename from docs-page/docs/deep-dive/modules/program-of-thought.mdx rename to docs/docs/deep-dive/modules/program-of-thought.mdx diff --git a/docs-page/docs/deep-dive/modules/react.mdx b/docs/docs/deep-dive/modules/react.mdx similarity index 100% rename from docs-page/docs/deep-dive/modules/react.mdx rename to docs/docs/deep-dive/modules/react.mdx diff --git a/docs-page/docs/deep-dive/modules/retrieve.mdx b/docs/docs/deep-dive/modules/retrieve.mdx similarity index 100% rename from docs-page/docs/deep-dive/modules/retrieve.mdx rename to docs/docs/deep-dive/modules/retrieve.mdx diff --git a/docs-page/docs/deep-dive/retrieval_models_clients/Azure.mdx b/docs/docs/deep-dive/retrieval_models_clients/Azure.mdx similarity index 100% rename from docs-page/docs/deep-dive/retrieval_models_clients/Azure.mdx rename to docs/docs/deep-dive/retrieval_models_clients/Azure.mdx diff --git a/docs-page/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx b/docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx similarity index 100% rename from docs-page/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx rename to docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx diff --git a/docs-page/docs/deep-dive/retrieval_models_clients/ColBERTv2.mdx b/docs/docs/deep-dive/retrieval_models_clients/ColBERTv2.mdx similarity index 100% rename from docs-page/docs/deep-dive/retrieval_models_clients/ColBERTv2.mdx rename to docs/docs/deep-dive/retrieval_models_clients/ColBERTv2.mdx diff --git a/docs-page/docs/deep-dive/retrieval_models_clients/_category_.json b/docs/docs/deep-dive/retrieval_models_clients/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/retrieval_models_clients/_category_.json rename to docs/docs/deep-dive/retrieval_models_clients/_category_.json diff --git a/docs-page/docs/deep-dive/retrieval_models_clients/custom-rm-client.mdx b/docs/docs/deep-dive/retrieval_models_clients/custom-rm-client.mdx similarity index 100% rename from docs-page/docs/deep-dive/retrieval_models_clients/custom-rm-client.mdx rename to docs/docs/deep-dive/retrieval_models_clients/custom-rm-client.mdx diff --git a/docs-page/docs/deep-dive/retrieval_models_clients/img/io_rm_module.png b/docs/docs/deep-dive/retrieval_models_clients/img/io_rm_module.png similarity index 100% rename from docs-page/docs/deep-dive/retrieval_models_clients/img/io_rm_module.png rename to docs/docs/deep-dive/retrieval_models_clients/img/io_rm_module.png diff --git a/docs-page/docs/deep-dive/signature/_category_.json b/docs/docs/deep-dive/signature/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/signature/_category_.json rename to docs/docs/deep-dive/signature/_category_.json diff --git a/docs-page/docs/deep-dive/signature/executing-signatures.mdx b/docs/docs/deep-dive/signature/executing-signatures.mdx similarity index 100% rename from docs-page/docs/deep-dive/signature/executing-signatures.mdx rename to docs/docs/deep-dive/signature/executing-signatures.mdx diff --git a/docs-page/docs/deep-dive/signature/img/class_based_prompt_creation.png b/docs/docs/deep-dive/signature/img/class_based_prompt_creation.png similarity index 100% rename from docs-page/docs/deep-dive/signature/img/class_based_prompt_creation.png rename to docs/docs/deep-dive/signature/img/class_based_prompt_creation.png diff --git a/docs-page/docs/deep-dive/signature/img/dspy_signatures.png b/docs/docs/deep-dive/signature/img/dspy_signatures.png similarity index 100% rename from docs-page/docs/deep-dive/signature/img/dspy_signatures.png rename to docs/docs/deep-dive/signature/img/dspy_signatures.png diff --git a/docs-page/docs/deep-dive/signature/img/prompt_creation.png b/docs/docs/deep-dive/signature/img/prompt_creation.png similarity index 100% rename from docs-page/docs/deep-dive/signature/img/prompt_creation.png rename to docs/docs/deep-dive/signature/img/prompt_creation.png diff --git a/docs-page/docs/deep-dive/signature/understanding-signatures.mdx b/docs/docs/deep-dive/signature/understanding-signatures.mdx similarity index 100% rename from docs-page/docs/deep-dive/signature/understanding-signatures.mdx rename to docs/docs/deep-dive/signature/understanding-signatures.mdx diff --git a/docs-page/docs/deep-dive/teleprompter/_category_.json b/docs/docs/deep-dive/teleprompter/_category_.json similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/_category_.json rename to docs/docs/deep-dive/teleprompter/_category_.json diff --git a/docs-page/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx b/docs/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx similarity index 96% rename from docs-page/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx rename to docs/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx index 5d30cdd094..4d15d67094 100644 --- a/docs-page/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx +++ b/docs/docs/deep-dive/teleprompter/bootstrap-fewshot.mdx @@ -10,7 +10,7 @@ When compiling a DSPy program, we generally invoke a teleprompter, which is an o ## Setting up a Sample Pipeline -We'll be making a basic answer generation pipeline over GSM8K dataset that we saw in the [Minimal Example](/docs/quick-start/minimal-example), we won't be changing anything in it! So let's start by configuring the LM which will be OpenAI LM client with `gpt-3.5-turbo` as the LLM in use. +We'll be making a basic answer generation pipeline over GSM8K dataset that we saw in the [Minimal Example](https://dspy-docs.vercel.app/docs/quick-start/minimal-example), we won't be changing anything in it! So let's start by configuring the LM which will be OpenAI LM client with `gpt-3.5-turbo` as the LLM in use. ```python import dspy diff --git a/docs-page/docs/deep-dive/teleprompter/img/signature_optimizer.png b/docs/docs/deep-dive/teleprompter/img/signature_optimizer.png similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/img/signature_optimizer.png rename to docs/docs/deep-dive/teleprompter/img/signature_optimizer.png diff --git a/docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process.png b/docs/docs/deep-dive/teleprompter/img/signature_optimizer_process.png similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process.png rename to docs/docs/deep-dive/teleprompter/img/signature_optimizer_process.png diff --git a/docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process_v2.png b/docs/docs/deep-dive/teleprompter/img/signature_optimizer_process_v2.png similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process_v2.png rename to docs/docs/deep-dive/teleprompter/img/signature_optimizer_process_v2.png diff --git a/docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process_v3.png b/docs/docs/deep-dive/teleprompter/img/signature_optimizer_process_v3.png similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process_v3.png rename to docs/docs/deep-dive/teleprompter/img/signature_optimizer_process_v3.png diff --git a/docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process_v4.png b/docs/docs/deep-dive/teleprompter/img/signature_optimizer_process_v4.png similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/img/signature_optimizer_process_v4.png rename to docs/docs/deep-dive/teleprompter/img/signature_optimizer_process_v4.png diff --git a/docs-page/docs/deep-dive/teleprompter/signature-optimizer.mdx b/docs/docs/deep-dive/teleprompter/signature-optimizer.mdx similarity index 100% rename from docs-page/docs/deep-dive/teleprompter/signature-optimizer.mdx rename to docs/docs/deep-dive/teleprompter/signature-optimizer.mdx diff --git a/docs-page/docs/faqs.md b/docs/docs/faqs.md similarity index 87% rename from docs-page/docs/faqs.md rename to docs/docs/faqs.md index b65b2e517c..3308ba5a53 100644 --- a/docs-page/docs/faqs.md +++ b/docs/docs/faqs.md @@ -16,7 +16,7 @@ The **DSPy** philosophy and abstraction differ significantly from other librarie ## Basic Usage -**How should I use DSPy for my task?** We wrote a [seven-step guide](/docs/building-blocks/solving_your_task.md) on this. In short, using DSPy is an iterative process. You first define your task and the metrics you want to maximize, and prepare a few example inputs — typically without labels (or only with labels for the final outputs, if your metric requires them). Then, you build your pipeline by selecting built-in layers (`modules`) to use, giving each layer a `signature` (input/output spec), and then calling your modules freely in your Python code. Lastly, you use a DSPy `optimizer` to compile your code into high-quality instructions, automatic few-shot examples, or updated LM weights for your LM. +**How should I use DSPy for my task?** We wrote a [eight-step guide](https://dspy-docs.vercel.app/docs/building-blocks/solving_your_task) on this. In short, using DSPy is an iterative process. You first define your task and the metrics you want to maximize, and prepare a few example inputs — typically without labels (or only with labels for the final outputs, if your metric requires them). Then, you build your pipeline by selecting built-in layers (`modules`) to use, giving each layer a `signature` (input/output spec), and then calling your modules freely in your Python code. Lastly, you use a DSPy `optimizer` to compile your code into high-quality instructions, automatic few-shot examples, or updated LM weights for your LM. **How do I convert my complex prompt into a DSPy pipeline?** See the same answer above. @@ -34,11 +34,11 @@ You can specify the generation of long responses as a `dspy.OutputField`. To ens - **How do I define my own metrics? Can metrics return a float?** -You can define metrics as simply Python functions that process model generations and evaluate them based on user-defined requirements. Metrics can compare existent data (e.g. gold labels) to model predictions or they can be used to assess various components of an output using validation feedback from LMs (e.g. LLMs-as-Judges). Metrics can return `bool`, `int`, and `float` types scores. Check out the official [Metrics docs](/docs/building-blocks/5-metrics.md) to learn more about defining custom metrics and advanced evaluations using AI feedback and/or DSPy programs. +You can define metrics as simply Python functions that process model generations and evaluate them based on user-defined requirements. Metrics can compare existent data (e.g. gold labels) to model predictions or they can be used to assess various components of an output using validation feedback from LMs (e.g. LLMs-as-Judges). Metrics can return `bool`, `int`, and `float` types scores. Check out the official [Metrics docs](https://dspy-docs.vercel.app/docs/building-blocks/metrics) to learn more about defining custom metrics and advanced evaluations using AI feedback and/or DSPy programs. - **How expensive or slow is compiling??** -To reflect compiling metrics, we highlight an experiment for reference, compiling the [`SimplifiedBaleen`](/docs/tutorials/simplified-baleen.md) using the `dspy.BootstrapFewShotWithRandomSearch` optimizer on the `gpt-3.5-turbo-1106` model over 7 candidate programs and 10 threads. We report that compiling this program takes around 6 minutes with 3200 API calls, 2.7 million input tokens and 156,000 output tokens, reporting a total cost of $3 USD (at the current pricing of the OpenAI model). +To reflect compiling metrics, we highlight an experiment for reference, compiling the [`SimplifiedBaleen`](https://dspy-docs.vercel.app/docs/tutorials/simplified-baleen) using the [`dspy.BootstrapFewShotWithRandomSearch`](https://dspy-docs.vercel.app/docs/deep-dive/teleprompter/bootstrap-fewshot) optimizer on the `gpt-3.5-turbo-1106` model over 7 candidate programs and 10 threads. We report that compiling this program takes around 6 minutes with 3200 API calls, 2.7 million input tokens and 156,000 output tokens, reporting a total cost of $3 USD (at the current pricing of the OpenAI model). Compiling DSPy `optimizers` naturally will incur additional LM calls, but we substantiate this overhead with minimalistic executions with the goal of maximizing performance. This invites avenues to enhance performance of smaller models by compiling DSPy programs with larger models to learn enhanced behavior during compile-time and propagate such behavior to the tested smaller model during inference-time. @@ -88,7 +88,7 @@ Modules can be frozen by setting their `._compiled` attribute to be True, indica You can specify JSON-type descriptions in the `desc` field of the long-form signature `dspy.OutputField` (e.g. `output = dspy.OutputField(desc='key-value pairs')`). -If you notice outputs are still not conforming to JSON formatting, try Asserting this constraint! Check out [Assertions](/docs/building-blocks/7-assertions.md) (or the next question!) +If you notice outputs are still not conforming to JSON formatting, try Asserting this constraint! Check out [Assertions](https://dspy-docs.vercel.app/docs/building-blocks/assertions) (or the next question!) - **How do I use DSPy assertions?** @@ -127,4 +127,4 @@ If all variables seem stable, you may be experiencing timeouts or backoff errors **How can I add my favorite LM or vector store?** -Check out these walkthroughs on setting up a [Custom LM client](/docs/deep-dive/language_model_clients/custom-lm-client.mdx) and [Custom RM client](/docs/deep-dive/retrieval_models_clients/custom-rm-client.mdx). +Check out these walkthroughs on setting up a [Custom LM client](https://dspy-docs.vercel.app/docs/deep-dive/language_model_clients/custom-lm-client) and [Custom RM client](https://dspy-docs.vercel.app/docs/deep-dive/retrieval_models_clients/custom-rm-client). diff --git a/docs-page/docs/intro.md b/docs/docs/intro.md similarity index 100% rename from docs-page/docs/intro.md rename to docs/docs/intro.md diff --git a/docs-page/docs/quick-start/_category_.json b/docs/docs/quick-start/_category_.json similarity index 100% rename from docs-page/docs/quick-start/_category_.json rename to docs/docs/quick-start/_category_.json diff --git a/docs-page/docs/quick-start/installation.mdx b/docs/docs/quick-start/installation.mdx similarity index 100% rename from docs-page/docs/quick-start/installation.mdx rename to docs/docs/quick-start/installation.mdx diff --git a/docs-page/docs/quick-start/minimal-example.mdx b/docs/docs/quick-start/minimal-example.mdx similarity index 100% rename from docs-page/docs/quick-start/minimal-example.mdx rename to docs/docs/quick-start/minimal-example.mdx diff --git a/docs-page/docs/tutorials/_category_.json b/docs/docs/tutorials/_category_.json similarity index 100% rename from docs-page/docs/tutorials/_category_.json rename to docs/docs/tutorials/_category_.json diff --git a/docs-page/docs/tutorials/other_tutorial.md b/docs/docs/tutorials/other_tutorial.md similarity index 89% rename from docs-page/docs/tutorials/other_tutorial.md rename to docs/docs/tutorials/other_tutorial.md index 4545eea7df..6f678da9ca 100644 --- a/docs-page/docs/tutorials/other_tutorial.md +++ b/docs/docs/tutorials/other_tutorial.md @@ -9,8 +9,8 @@ sidebar_position: 99999 | **Level** | **Tutorial** | **Run in Colab** | **Description** | | --- | ------------- | ------------- | ------------- | | Beginner | [**Getting Started**](https://github.com/stanfordnlp/dspy/blob/main/intro.ipynb) | [](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/intro.ipynb) | Introduces the basic building blocks in DSPy. Tackles the task of complex question answering with HotPotQA. | -| Beginner | [**Minimal Working Example**](/docs/quick-start/minimal-example) | N/A | Builds and optimizes a very simple chain-of-thought program in DSPy for math question answering. Very short. | -| Beginner | [**Compiling for Tricky Tasks**](https://github.com/stanfordnlp/dspy/blob/main/examples/nli/scone/scone.ipynb) | N/A | Teaches LMs to reason about logical statements and negation. Uses GPT-4 to bootstrap few-shot CoT demonstations for GPT-3.5. Establishes a state-of-the-art result on [ScoNe](https://arxiv.org/abs/2305.19426). Contributed by [Chris Potts](https://twitter.com/ChrisGPotts/status/1740033519446057077). | +| Beginner | [**Minimal Working Example**](https://dspy-docs.vercel.app/docs/quick-start/minimal-example) | N/A | Builds and optimizes a very simple chain-of-thought program in DSPy for math question answering. Very short. | +| Beginner | [**Compiling for Tricky Tasks**](https://github.com/stanfordnlp/dspy/blob/main/examples/nli/scone/scone.ipynb) | N/A | Teaches LMs to reason about logical statements and negation. Uses GPT-4 to bootstrap few-shot CoT demonstrations for GPT-3.5. Establishes a state-of-the-art result on [ScoNe](https://arxiv.org/abs/2305.19426). Contributed by [Chris Potts](https://twitter.com/ChrisGPotts/status/1740033519446057077). | | Beginner | [**Local Models & Custom Datasets**](https://github.com/stanfordnlp/dspy/blob/main/skycamp2023.ipynb) | [](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/skycamp2023.ipynb) | Illustrates two different things together: how to use local models (Llama-2-13B in particular) and how to use your own data examples for training and development. | Intermediate | [**The DSPy Paper**](https://arxiv.org/abs/2310.03714) | N/A | Sections 3, 5, 6, and 7 of the DSPy paper can be consumed as a tutorial. They include explained code snippets, results, and discussions of the abstractions and API. | Intermediate | [**DSPy Assertions**](https://arxiv.org/abs/2312.13382) | [](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/longformqa/longformqa_assertions.ipynb) | Introduces example of applying DSPy Assertions while generating long-form responses to questions with citations. Presents comparative evaluation in both zero-shot and compiled settings. diff --git a/docs-page/docs/tutorials/rag.md b/docs/docs/tutorials/rag.md similarity index 92% rename from docs-page/docs/tutorials/rag.md rename to docs/docs/tutorials/rag.md index 0560d10b53..fe45e8b308 100644 --- a/docs-page/docs/tutorials/rag.md +++ b/docs/docs/tutorials/rag.md @@ -10,7 +10,7 @@ RAG ensures LLMs can dynamically utilize real-time knowledge even if not origina ## Configuring LM and RM -We'll start by setting up the language model (LM) and retrieval model (RM), which **DSPy** supports through multiple [LM](/docs/category/remote-language-model-clients) and [RM](/docs/category/retrieval-model-clients) APIs and [local models hosting](/docs/category/local-language-model-clients). +We'll start by setting up the language model (LM) and retrieval model (RM), which **DSPy** supports through multiple [LM](https://dspy-docs.vercel.app/docs/category/language-model-clients) and [RM](https://dspy-docs.vercel.app/docs/category/retrieval-model-clients) APIs and [local models hosting](https://dspy-docs.vercel.app/docs/category/local-language-model-clients). In this notebook, we'll work with GPT-3.5 (`gpt-3.5-turbo`) and the `ColBERTv2` retriever (a free server hosting a Wikipedia 2017 "abstracts" search index containing the first paragraph of each article from this [2017 dump](https://hotpotqa.github.io/wiki-readme.html)). We configure the LM and RM within DSPy, allowing DSPy to internally call the respective module when needed for generation or retrieval. @@ -47,7 +47,7 @@ len(trainset), len(devset) ## Building Signatures -Now that we have the data loaded, let's start defining the [signatures](/docs/building-blocks/signatures) for the sub-tasks of our pipeline. +Now that we have the data loaded, let's start defining the [signatures](https://dspy-docs.vercel.app/docs/building-blocks/signatures) for the sub-tasks of our pipeline. We can identify our simple input `question` and output `answer`, but since we are building out a RAG pipeline, we wish to utilize some contextual information from our ColBERT corpus. So let's define our signature: `context, question --> answer`. @@ -64,7 +64,7 @@ We include small descriptions for the `context` and `answer` fields to define mo ## Building the Pipeline -We will build our RAG pipeline as a [DSPy module](/docs/building-blocks/modules) which will require two methods: +We will build our RAG pipeline as a [DSPy module](https://dspy-docs.vercel.app/docs/building-blocks/modules) which will require two methods: * The `__init__` method will simply declare the sub-modules it needs: `dspy.Retrieve` and `dspy.ChainOfThought`. The latter is defined to implement our `GenerateAnswer` signature. * The `forward` method will describe the control flow of answering the question using the modules we have: Given a question, we'll search for the top-3 relevant passages and then feed them as context for answer generation. @@ -88,7 +88,7 @@ class RAG(dspy.Module): ##### Compiling the RAG program -Having defined this program, let's now **compile** it. [Compiling a program](/docs/building-blocks/optimizers) will update the parameters stored in each module. In our setting, this is primarily in the form of collecting and selecting good demonstrations for inclusion within the prompt(s). +Having defined this program, let's now **compile** it. [Compiling a program](https://dspy-docs.vercel.app/docs/building-blocks/optimizers) will update the parameters stored in each module. In our setting, this is primarily in the form of collecting and selecting good demonstrations for inclusion within the prompt(s). Compiling depends on three things: diff --git a/docs-page/docs/tutorials/simplified-baleen.md b/docs/docs/tutorials/simplified-baleen.md similarity index 97% rename from docs-page/docs/tutorials/simplified-baleen.md rename to docs/docs/tutorials/simplified-baleen.md index 2f9f2e6d68..6a7e94ca2b 100644 --- a/docs-page/docs/tutorials/simplified-baleen.md +++ b/docs/docs/tutorials/simplified-baleen.md @@ -10,7 +10,7 @@ The standard approach for this challenge in retrieval-augmented NLP literature i ## Configuring LM and RM -We'll start by setting up the language model (LM) and retrieval model (RM), which **DSPy** supports through multiple [LM](/docs/category/remote-language-model-clients) and [RM](/docs/category/retrieval-model-clients) APIs and [local models hosting](/docs/category/local-language-model-clients). +We'll start by setting up the language model (LM) and retrieval model (RM), which **DSPy** supports through multiple [LM](https://dspy-docs.vercel.app/docs/category/language-model-clients) and [RM](https://dspy-docs.vercel.app/docs/category/retrieval-model-clients) APIs and [local models hosting](https://dspy-docs.vercel.app/docs/category/local-language-model-clients). In this notebook, we'll work with GPT-3.5 (`gpt-3.5-turbo`) and the `ColBERTv2` retriever (a free server hosting a Wikipedia 2017 "abstracts" search index containing the first paragraph of each article from this [2017 dump](https://hotpotqa.github.io/wiki-readme.html)). We configure the LM and RM within DSPy, allowing DSPy to internally call the respective module when needed for generation or retrieval. diff --git a/docs/docs_requirements.txt b/docs/docs_requirements.txt deleted file mode 100644 index 9837430cf5..0000000000 --- a/docs/docs_requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -mkdocs -mkdocs-gen-files -mkdocs-material -mkdocs-material-extensions -mkdocstrings-python diff --git a/docs-page/docusaurus.config.ts b/docs/docusaurus.config.ts similarity index 100% rename from docs-page/docusaurus.config.ts rename to docs/docusaurus.config.ts diff --git a/docs/guides/README.md b/docs/guides/README.md deleted file mode 100644 index 4f699d07e4..0000000000 --- a/docs/guides/README.md +++ /dev/null @@ -1,3 +0,0 @@ -For the guides, please visit: - -**https://dspy-docs.vercel.app/docs/category/dspy-building-blocks** diff --git a/docs/guides/assertions.ipynb b/docs/guides/assertions.ipynb deleted file mode 100644 index 711e6e3d69..0000000000 --- a/docs/guides/assertions.ipynb +++ /dev/null @@ -1,77 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "import sys; sys.path.append('/future/u/okhattab/repos/public/tmp/dspy')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"DSPy7\n", - "\n", - "## Guide: **DSPy Assertions**\n", - "\n", - "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/signatures.ipynb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Quick Recap\n", - "\n", - "This guide assumes you followed the [intro tutorial]() to build your first few DSPy programs.\n", - "\n", - "Remember that a **DSPy program** is just Python code that calls one or more DSPy modules, like `dspy.Predict` or `dspy.ChainOfThought`, to use LMs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1) What is a DSPy Assertion?\n", - "\n", - "While we prepare this guide, please [read the DSPy assertions paper](https://arxiv.org/abs/2312.13382) and follow the examples in it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Install `dspy-ai` if needed. Then set up a default language model.\n", - "# TODO: Add a graceful line for OPENAI_API_KEY.\n", - "\n", - "try: import dspy\n", - "except ImportError:\n", - " %pip install dspy-ai\n", - " import dspy\n", - "\n", - "dspy.configure(lm=dspy.OpenAI(model='gpt-3.5-turbo-1106'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/guides/language_model_details/launching_mlc.md b/docs/guides/language_model_details/launching_mlc.md deleted file mode 100644 index 87bf65d0d0..0000000000 --- a/docs/guides/language_model_details/launching_mlc.md +++ /dev/null @@ -1,48 +0,0 @@ -## Setting up an MLC language model - -### Prerequisites - -Install the required packages using the following commands: - -```shell -pip install --no-deps --pre --force-reinstall mlc-ai-nightly-cu118 mlc-chat-nightly-cu118 -f https://mlc.ai/wheels -pip install transformers -git lfs install -``` - -Adjust the pip wheels according to your OS/platform by referring to the provided commands in [MLC packages](https://mlc.ai/package/). - - -### Running MLC Llama-2 models - -1. Create a directory for prebuilt models: - -```shell -mkdir -p dist/prebuilt -``` - -2. Clone the necessary libraries from the repository: - -```shell -git clone https://github.com/mlc-ai/binary-mlc-llm-libs.git dist/prebuilt/lib -cd dist/prebuilt -``` - -3. Choose a Llama-2 model from [MLC LLMs](https://huggingface.co/mlc-ai) and clone the model repository: - -```shell -git clone https://huggingface.co/mlc-ai/mlc-chat-Llama-2-7b-chat-hf-q4f16_1 -``` - -### Sending requests to the server - -Initialize the `ChatModuleClient` within your program with the desired parameters. Here's an example call: - -```python -model = 'dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1' -model_path = 'dist/prebuilt/lib/Llama-2-7b-chat-hf-q4f16_1-cuda.so' - -llama = dspy.ChatModuleClient(model=model, model_path=model_path) -``` - -Please refer to the [official MLC repository](https://github.com/mlc-ai/mlc-llm) for more detailed [docs](https://mlc.ai/mlc-llm/docs/get_started/try_out.html). diff --git a/docs/guides/language_model_details/launching_ollama.md b/docs/guides/language_model_details/launching_ollama.md deleted file mode 100644 index 242fc6ec71..0000000000 --- a/docs/guides/language_model_details/launching_ollama.md +++ /dev/null @@ -1,41 +0,0 @@ -## Setting up an Ollama language model - -Ollama is a good software tool that allows you to run LLMs locally, such as Mistral, Llama2, and Phi. -The following are the instructions to install and run Ollama. - -### Prerequisites - -Install Ollama by following the instructions from this page: - -- https://ollama.ai - -Download model: `ollama pull` - -Download a model by running the `ollama pull` command. You can download Mistral, Llama2, and Phi. - -```bash -# download mistral -ollama pull mistral -``` - -Here is the list of other models you can download: -- https://ollama.ai/library - -### Running Ollama model - -Run model: `ollama run` - -You can test a model by running the model with the `ollama run` command. - -```bash -# run mistral -ollama run mistral -``` - -### Sending requests to the server - -Here is the code to load a model through Ollama: - -```python -lm = dspy.OllamaLocal(model='mistral') -``` diff --git a/docs/guides/language_model_details/launching_tgi.md b/docs/guides/language_model_details/launching_tgi.md deleted file mode 100644 index d45c2dea51..0000000000 --- a/docs/guides/language_model_details/launching_tgi.md +++ /dev/null @@ -1,60 +0,0 @@ -## Launching a Text Generation Inference (TGI) Server - -### Prerequisites - -- Docker must be installed on your system. If you don't have Docker installed, you can get it from [here](https://docs.docker.com/get-docker/). - -### Setting up the Text-Generation-Inference Server - -1. Clone the Text-Generation-Inference repository from GitHub by executing the following command: - -```bash -git clone https://github.com/huggingface/text-generation-inference.git -``` - -2. Change into the cloned repository directory: - -```bash -cd text-generation-inference -``` - -3. Execute the Docker command under the "Get Started" section to run the server: - -```bash -model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. -num_shard=1 # set to the number of shards you wish to use. -volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard -``` - -This command will start the server and make it accessible at `http://localhost:8080`. - -If you want to connect to [Meta Llama 2 models](https://huggingface.co/meta-llama), make sure to use version 9.3 (or higher) of the docker image (ghcr.io/huggingface/text-generation-inference:0.9.3) and pass in your huggingface token as an environment variable. - -```bash -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard -``` - -### Sending requests to the server - -After setting up the text-generation-inference server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientTGI`. - -Initialize the `HFClientTGI` within your program with the desired parameters. Here is an example call: - - ```python - lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") - ``` - - Customize the `model`, `port`, and `url` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. - - -### FAQs - -1. If your model doesn't require any shards, you still need to set a value for `num_shard`, but you don't need to include the parameter `--num-shard` on the command line. - -2. If your model runs into any "token exceeded" issues, you can set the following parameters on the command line to adjust the input length and token limit: - - `--max-input-length`: Set the maximum allowed input length for the text. - - `--max-total-tokens`: Set the maximum total tokens allowed for text generation. - -Please refer to the [official TGI repository](https://github.com/huggingface/text-generation-inference) for detailed docs. diff --git a/docs/guides/language_model_details/launching_vllm.md b/docs/guides/language_model_details/launching_vllm.md deleted file mode 100644 index f75d538b0c..0000000000 --- a/docs/guides/language_model_details/launching_vllm.md +++ /dev/null @@ -1,31 +0,0 @@ -## Launching a vLLM Server - -### Setting up the vLLM Server - -Follow these steps to set up the vLLM Server: - -1. Build the server from source by following the instructions provided in the [Build from Source guide](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source). - -2. Start the server by running the following command, and specify your desired model, host, and port using the appropriate arguments. The default server address is http://localhost:8000. - -Example command: - -```bash - python -m vllm.entrypoints.openai.api_server --model mosaicml/mpt-7b --port 8000 -``` - -This will launch the vLLM server. - -### Sending requests to the server - -After setting up the vLLM server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientVLLM`. - -Initialize the `HFClientVLLM` within your program with the desired parameters. Here is an example call: - -```python - lm = dspy.HFClientVLLM(model="mosaicml/mpt-7b", port=8000, url="http://localhost") -``` - -Customize the `model`, `port`, `url`, and `max_tokens` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. - -Please refer to the [official vLLM repository](https://github.com/vllm-project/vllm) for more detailed information and documentation. diff --git a/docs/guides/language_models.ipynb b/docs/guides/language_models.ipynb deleted file mode 100644 index 8313bf9143..0000000000 --- a/docs/guides/language_models.ipynb +++ /dev/null @@ -1,257 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "import sys; sys.path.append('/future/u/okhattab/repos/public/tmp/dspy')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"DSPy7\n", - "\n", - "## Guide: **Language Models**\n", - "\n", - "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/signatures.ipynb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Quick Recap\n", - "\n", - "This guide assumes you followed the [intro tutorial]() to build your first few DSPy programs.\n", - "\n", - "Remember that a **DSPy program** is just Python code that calls one or more DSPy modules, like `dspy.Predict` or `dspy.ChainOfThought`, to use LMs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1) Short Intro to LMs in DSPy\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# Install `dspy-ai` if needed.\n", - "\n", - "try: import dspy\n", - "except ImportError:\n", - " %pip install dspy-ai\n", - " import dspy" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2) Supported LM clients.\n", - "\n", - "#### Remote LMs.\n", - "\n", - "These models are managed services. You just need to sign up and obtain an API key.\n", - "\n", - "1. `dspy.OpenAI` for GPT-3.5 and GPT-4.\n", - "\n", - "2. `dspy.Cohere`\n", - "\n", - "3. `dspy.Anyscale` for hosted Llama2 models.\n", - "\n", - "4. `dspy.Together` for hosted various open source models.\n", - "\n", - "#### Local LMs.\n", - "\n", - "You need to host these models on your own GPU(s). Below, we include pointers for how to do that.\n", - "\n", - "4. `dspy.HFClientTGI`: for HuggingFace models through the Text Generation Inference (TGI) system. [Tutorial: How do I install and launch the TGI server?](language_model_details/launching_tgi.md)\n", - "\n", - "5. `dspy.HFClientVLLM`: for HuggingFace models through vLLM. [Tutorial: How do I install and launch the vLLM server?](language_model_details/launching_vllm.md)\n", - "\n", - "6. `dspy.HFModel` (experimental)\n", - "\n", - "7. `dspy.Ollama` (experimental) for open source models through [Ollama](https://ollama.com). [Tutorial: How do I install and use Ollama on a local computer?](language_model_details/launching_ollama.md)\n", - "\n", - "\n", - "8. `dspy.ChatModuleClient` (experimental): [How do I install and use MLC?](language_model_details/launching_mlc.md)\n", - "\n", - "\n", - "\n", - "If there are other clients you want added, let us know!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3) Setting up the LM client.\n", - "\n", - "You can just call the constructor that connects to the LM. Then, use `dspy.configure` to declare this as the default LM.\n", - "\n", - "For example, for OpenAI, you can do it as follows." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "# TODO: Add a graceful line for OPENAI_API_KEY.\n", - "\n", - "gpt3_turbo = dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=300)\n", - "gpt4_turbo = dspy.OpenAI(model='gpt-4-1106-preview', max_tokens=300)\n", - "\n", - "# cohere = dspy.Cohere(...)\n", - "# anyscale = dspy.Anyscale(...)\n", - "# together = dspy.Together(...)\n", - "# ollama = dspy.OllamaLocal(...)\n", - "# tgi_llama2 = dspy.HFClientTGI(model=\"meta-llama/Llama-2-7b-hf\", port=8080, url=\"http://localhost\")\n", - "\n", - "dspy.configure(lm=gpt3_turbo)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4) Using a different LM within a code block.\n", - "\n", - "The default LM above is GPT-3.5, `gpt3_turbo`. What if I want to run a piece of code with, say, GPT-4 or LLama-2?\n", - "\n", - "Instead of changing the default LM, you can just change it inside a block of code.\n", - "\n", - "**Tip:** Using `dspy.configure` and `dspy.context` is thread-safe!" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The castle David Gregory inherited has 7 floors.\n", - "The number of floors in the castle David Gregory inherited cannot be determined with the information provided.\n" - ] - } - ], - "source": [ - "qa = dspy.ChainOfThought('question -> answer')\n", - "\n", - "response = qa(question=\"How many floors are in the castle David Gregory inherited?\")\n", - "print(response.answer)\n", - "\n", - "with dspy.context(lm=gpt4_turbo):\n", - " response = qa(question=\"How many floors are in the castle David Gregory inherited?\")\n", - " print(response.answer)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 5) Tips and Tricks.\n", - "\n", - "In DSPy, all LM calls are cached. If you repeat the same call, you will get the same outputs. (If you change the inputs or configurations, you will get new outputs.)\n", - "\n", - "To generate 5 outputs, you can use `n=5` in the module constructor, or pass `config=dict(n=5)` when invoking the module." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\"The specific number of floors in David Gregory's inherited castle is not provided here, so further research would be needed to determine the answer.\",\n", - " 'The castle David Gregory inherited has 4 floors.',\n", - " 'The castle David Gregory inherited has 5 floors.',\n", - " 'David Gregory inherited 10 floors in the castle.',\n", - " 'The castle David Gregory inherited has 5 floors.']" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "qa = dspy.ChainOfThought('question -> answer', n=5)\n", - "\n", - "response = qa(question=\"How many floors are in the castle David Gregory inherited?\")\n", - "response.completions.answer" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you just call `qa(...)` in a loop with the same input, it will always return the same value! That's by design.\n", - "\n", - "To loop and generate one output at a time with the same input, bypass the cache by making sure each request is (slightly) unique, as below." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The specific number of floors in David Gregory's inherited castle is not provided here, so further research would be needed to determine the answer.\n", - "It is not possible to determine the exact number of floors in the castle David Gregory inherited without specific information about the castle's layout and history.\n", - "The castle David Gregory inherited has 5 floors.\n", - "We need more information to determine the number of floors in the castle David Gregory inherited.\n", - "The castle David Gregory inherited has a total of 6 floors.\n" - ] - } - ], - "source": [ - "for idx in range(5):\n", - " response = qa(question=\"How many floors are in the castle David Gregory inherited?\", config=dict(temperature=0.7+0.0001*idx))\n", - " print(response.answer)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py39_nov2023", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.18" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/guides/modules.ipynb b/docs/guides/modules.ipynb deleted file mode 100644 index e755312413..0000000000 --- a/docs/guides/modules.ipynb +++ /dev/null @@ -1,287 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "import sys; sys.path.append('/future/u/okhattab/repos/public/tmp/dspy')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"DSPy7\n", - "\n", - "## Guide: **DSPy Modules**\n", - "\n", - "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/signatures.ipynb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Quick Recap\n", - "\n", - "This guide assumes you followed the [intro tutorial]() to build your first few DSPy programs.\n", - "\n", - "Remember that **DSPy program** is just Python code that calls one or more **DSPy modules**, like `dspy.Predict` or `dspy.ChainOfThought`, to use LMs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1) What is a DSPy Module?\n", - "\n", - "A **DSPy module** is a building block for programs that use LMs.\n", - "\n", - "- Each built-in module abstracts a **prompting technique** (like chain of thought or ReAct). Crucially, they are generalized to handle any [DSPy Signature]().\n", - "\n", - "- A DSPy module has **learnable parameters** (i.e., the little pieces comprising the prompt and the LM weights) and can be invoked (called) to process inputs and return outputs.\n", - "\n", - "- Multiple modules can be composed into bigger modules (programs). DSPy modules are inspired directly by NN modules in PyTorch, but applied to LM programs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2) Why should I use a DSPy Module?\n", - "\n", - "TODO. I typically take this as self-evident, but I'll spell it out here." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# Install `dspy-ai` if needed. Then set up a default language model.\n", - "# TODO: Add a graceful line for OPENAI_API_KEY.\n", - "\n", - "try: import dspy\n", - "except ImportError:\n", - " %pip install dspy-ai\n", - " import dspy\n", - "\n", - "dspy.configure(lm=dspy.OpenAI(model='gpt-3.5-turbo-1106'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3) What DSPy Modules are currently built-in?\n", - "\n", - "1. **`dspy.Predict`**:\n", - "\n", - "2. **`dspy.ChainOfThought`**: \n", - "\n", - "3. **`dspy.ProgramOfThought`**:\n", - "\n", - "4. **`dspy.ReAct`**:\n", - "\n", - "5. **`dspy.MultiChainComparison`**:\n", - "\n", - "\n", - "We also have some function-style modules:\n", - "\n", - "6. **`dspy.majority`**:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4) How do I use a built-in module, like `dspy.Predict` or `dspy.ChainOfThought`?\n", - "\n", - "Let's start with the most fundamental one, `dspy.Predict`. Internally, all of the others are just built using it!\n", - "\n", - "We'll assume you are already at least a little familiar with [DSPy signatures](), which are declarative specs for defining the behavior of any module we use in DSPy.\n", - "\n", - "To use a module, we first **declare** it by giving it a signature. Then we **call** the module with the input arguments, and extract the output fields!" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Positive\n" - ] - } - ], - "source": [ - "sentence = \"it's a charming and often affecting journey.\" # example from the SST-2 dataset.\n", - "\n", - "# 1) Declare with a signature.\n", - "classify = dspy.Predict('sentence -> sentiment')\n", - "\n", - "# 2) Call with input argument(s). \n", - "response = classify(sentence=sentence)\n", - "\n", - "# 3) Access the output.\n", - "print(response.sentiment)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When we declare a module, we can pass configuration keys to it.\n", - "\n", - "Below, we'll pass `n=5` to request five completions. We can also pass `temperature` or `max_len`, etc.\n", - "\n", - "Let's use `dspy.ChainOfThought`. In many cases, simply swapping `dspy.ChainOfThought` in place of `dspy.Predict` improves quality." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['One great thing about the ColBERT retrieval model is its superior efficiency and effectiveness compared to other models.',\n", - " 'Its ability to efficiently retrieve relevant information from large document collections.',\n", - " 'One great thing about the ColBERT retrieval model is its superior performance compared to other models and its efficient use of pre-trained language models.',\n", - " 'One great thing about the ColBERT retrieval model is its superior efficiency and accuracy compared to other models.',\n", - " 'One great thing about the ColBERT retrieval model is its ability to incorporate user feedback and support complex queries.']" - ] - }, - "execution_count": 40, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "question = \"What's something great about the ColBERT retrieval model?\"\n", - "\n", - "# 1) Declare with a signature, and pass some config.\n", - "classify = dspy.ChainOfThought('question -> answer', n=5)\n", - "\n", - "# 2) Call with input argument.\n", - "response = classify(question=question)\n", - "\n", - "# 3) Access the outputs.\n", - "response.completions.answer" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's dicuss the output object here.\n", - "\n", - "The `dspy.ChainOfThought` module will generally inject a `rationale` before the output field(s) of your signature.\n", - "\n", - "Let's inspect the (first) rationale and answer!" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Rationale: produce the answer. We can consider the fact that ColBERT has shown to outperform other state-of-the-art retrieval models in terms of efficiency and effectiveness. It uses contextualized embeddings and performs document retrieval in a way that is both accurate and scalable.\n", - "Answer: One great thing about the ColBERT retrieval model is its superior efficiency and effectiveness compared to other models.\n" - ] - } - ], - "source": [ - "print(f\"Rationale: {response.rationale}\")\n", - "print(f\"Answer: {response.answer}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is accessible whether we request one or many completions.\n", - "\n", - "We can also access the different completions as a list of `Prediction`s or as several lists, one for each field." - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 45, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "response.completions[3].rationale == response.completions.rationale[3]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 5) How do I use more complex built-in modules?\n", - "\n", - "The others are very similar, `dspy.ReAct` and `dspy.ProgramOfThough` etc. They mainly change the internal behavior with which your signature is implemented!\n", - "\n", - "More example soon!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 6) How do I compose multiple modules into a bigger program?\n", - "\n", - "DSPy is just Python code that uses modules in any control flow you like. (There's some magic internally at `compile` time to trace your LM calls.)\n", - "\n", - "What this means is that, you can just call the modules freely. No weird abstractions for chaining calls.\n", - "\n", - "This is basically PyTorch's design approach for define-by-run / dynamic computation graphs. Refer to the intro tutorials for examples." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py39_nov2023", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.18" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/guides/optimizers.ipynb b/docs/guides/optimizers.ipynb deleted file mode 100644 index dcdb8c3d00..0000000000 --- a/docs/guides/optimizers.ipynb +++ /dev/null @@ -1,168 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "UsageError: unrecognized arguments: import sys; sys.path.append('/future/u/okhattab/repos/public/tmp/dspy')\n" - ] - } - ], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "import sys; sys.path.append('/future/u/okhattab/repos/public/tmp/dspy')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"DSPy7\n", - "\n", - "## Guide: **DSPy Optimizers**\n", - "\n", - "Formerly called **DSPy Teleprompters**. We will be making an official name update.\n", - "\n", - "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/signatures.ipynb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Quick Recap\n", - "\n", - "This guide assumes you followed the [intro tutorial]() to build your first few DSPy programs.\n", - "\n", - "Remember that a **DSPy program** is just Python code that calls one or more DSPy modules, like `dspy.Predict` or `dspy.ChainOfThought`, to use LMs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1) What is a DSPy Optimizer?\n", - "\n", - "A **DSPy optimizer** is an algorithm that can tune the parameters of a DSPy program (i.e., the prompts and the LM weights) to maximize the metrics you specify, like accuracy.\n", - "\n", - "There are many built-in optimizers in DSPy. They apply different strategies to tune your programs. A typical DSPy optimizer takes three things:\n", - "\n", - "- Your **DSPy program**. This may be a single module (e.g., `dspy.Predict`) or a complex multi-module program.\n", - "\n", - "- Your **metric**. This is a function that evaluates the output of your program, and assigns it a score (higher is better).\n", - "\n", - "- A few **training inputs**. This may be very small (i.e., only 5 or 10 examples) or incomplete (only inputs to your program, without any labels).\n", - "\n", - "Your training data could also be large or complete. DSPy can leverage having a lot of data, but you can start small." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2) **What** does a DSPy Optimizer tune? **How** does it tune them?\n", - "\n", - "Traditional deep neural networks (DNNs) can be optimized with gradient descent, given a loss function and some training data.\n", - "\n", - "DSPy programs consist of multiple calls to LMs, stacked togther as [DSPy modules](). Each DSPy module has internal parameters of three kinds: (1) the LM weights, (2) the instructions, and (3) demonstrations of the input/output behavior.\n", - "\n", - "Given a metric, DSPy can optimize all of these three with multi-stage optimization algorithms. These can combine gradient descent (for LM weights) and LM-driven optimization (for the instructions), but primarily rely on discrete optimization for creating and validating demonstrations. DSPy Demonstrations are like few-shot examples, but they're far more powerful. They can be created from scratch, given your program, and their creation and selection can be optimized in many effective ways.\n", - "\n", - "In many cases, we found that compiling leads to better prompts than humans write. Not because DSPy optimizers are more creative than humans, but simply because they can try more things and tune the metrics directly." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Install `dspy-ai` if needed. Then set up a default language model.\n", - "# TODO: Add a graceful line for OPENAI_API_KEY.\n", - "\n", - "try: import dspy\n", - "except ImportError:\n", - " %pip install dspy-ai\n", - " import dspy\n", - "\n", - "dspy.configure(lm=dspy.OpenAI(model='gpt-3.5-turbo-1106'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2) What DSPy Optimizers are currently available?\n", - "\n", - "All of these can be accessed via `from dspy.teleprompt import *`.\n", - "\n", - "#### Automatic Few-Shot Learning\n", - "\n", - "1. **`LabeledFewShot`**:\n", - "\n", - "2. **`BootstrapFewShot`**: \n", - "\n", - "3. **`BootstrapFewShotWithRandomSearch`**:\n", - "\n", - "4. **`BootstrapFewShotWithOptuna`**:\n", - "\n", - "\n", - "#### Automatic Instruction Optimization\n", - "\n", - "5. **`SignatureOptimizer`**:\n", - "\n", - "\n", - "#### Automatic Finetuning\n", - "\n", - "6. **`BootstrapFinetune`**:\n", - "\n", - "\n", - "#### Program Transformations\n", - "\n", - "7. **`KNNFewShot`**:\n", - "\n", - "8. **`Ensemble`**:\n", - "\n", - "\n", - "#### Which one should I use?\n", - "\n", - "As a rule of thumb, if you don't know where to start, use `BootstrapFewShotWithRandomSearch`.\n", - "\n", - "There are some old docs for:\n", - "\n", - "- [`dspy.teleprompt.LabeledFewShot`](docs/teleprompters.md#telepromptlabeledfewshot)\n", - "- [`dspy.teleprompt.BootstrapFewShot`](docs/teleprompters.md#telepromptbootstrapfewshot)\n", - "- [`dspy.teleprompt.BootstrapFewShotWithRandomSearch`](docs/teleprompters.md#telepromptbootstrapfewshotwithrandomsearch)\n", - "- [`dspy.teleprompt.BootstrapFinetune`](docs/teleprompters.md#telepromptbootstrapfinetune)\n", - "- [`dspy.teleprompt.Ensemble`](docs/teleprompters.md#telepromptensemble)\n", - "- `dspy.teleprompt.kNN`\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/guides/signatures.ipynb b/docs/guides/signatures.ipynb deleted file mode 100644 index 7b3d1a82fa..0000000000 --- a/docs/guides/signatures.ipynb +++ /dev/null @@ -1,334 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "import sys; sys.path.append('/future/u/okhattab/repos/public/tmp/dspy')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"DSPy7\n", - "\n", - "## Guide: **DSPy Signatures**\n", - "\n", - "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/signatures.ipynb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Quick Recap\n", - "\n", - "This guide assumes you followed the [intro tutorial]() to build your first few DSPy programs.\n", - "\n", - "Remember that a **DSPy program** is just Python code that calls one or more **DSPy modules**, like `dspy.Predict` or `dspy.ChainOfThought`, to use LMs." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1) What is a DSPy Signature?\n", - "\n", - "When we assign tasks to LMs in DSPy, we specify the behavior we need as a Signature.\n", - "\n", - "**A signature is a declarative specification of input/output behavior of a DSPy module.**\n", - "\n", - "You're probably familiar with function signatures. The differences are that:\n", - "\n", - "- While typical function signatures just _describe_ things, DSPy Signatures _define and control the behavior_ of modules.\n", - "\n", - "- The field names matter in DSPy Signatures. You express semantic roles in plain English: a `question` is different from an `answer`, a `sql_query` is different from `python_code`." - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [], - "source": [ - "# Install `dspy-ai` if needed. Then set up a default language model.\n", - "# TODO: Add a graceful line for OPENAI_API_KEY.\n", - "\n", - "try: import dspy\n", - "except ImportError:\n", - " %pip install dspy-ai\n", - " import dspy\n", - "\n", - "dspy.configure(lm=dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=300))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2) Why should I use a DSPy Signature?\n", - "\n", - "**tl;dr** For modular and clean code, in which LM calls can be optimized into high-quality prompts (or automatic finetunes).\n", - "\n", - "**Long Answer:** Most people coerce LMs to do tasks by hacking long, brittle prompts. Or by collecting/generating data for fine-tuning.\n", - "\n", - "Writing signatures is far more modular, adaptive, and reproducible than hacking at prompts or finetunes. The DSPy compiler will figure out how to build a highly-optimized prompt for your LM (or finetune your small LM) for your signature, on your data, and within your pipeline. In many cases, we found that compiling leads to better prompts than humans write. Not because DSPy optimizers are more creative than humans, but simply because they can try more things and tune the metrics directly." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3) **Short** DSPy Signatures\n", - "\n", - "Signatures can be defined as a short string, with argument names that define semantic roles for inputs/outputs.\n", - "\n", - "1. Question Answering: `\"question -> answer\"`\n", - "\n", - "2. Sentiment Classification: `\"sentence -> sentiment\"`\n", - "\n", - "3. Summarization: `\"document -> summary\"`\n", - "\n", - "Your signatures can also have multiple input/output fields.\n", - "\n", - "4. Retrieval-Augmented Question Answering: `\"context, question -> answer\"`\n", - "\n", - "5. Multiple-Choice Question Answering with Reasoning: `\"question, choices -> reasoning, selection\"`\n", - "\n", - "\n", - "**Tip:** For fields, any valid variable names work! Field names should be semantically meaningful, but start simple and don't prematurely optimize keywords! Leave that kind of hacking to the DSPy compiler. For example, for summarization, it's probably fine to say `\"document -> summary\"`, `\"text -> gist\"`, or `\"long_context -> tldr\"`." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4) Example 1: Sentiment Classification" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Positive'" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sentence = \"it's a charming and often affecting journey.\" # example from the SST-2 dataset.\n", - "\n", - "classify = dspy.Predict('sentence -> sentiment')\n", - "classify(sentence=sentence).sentiment" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Above, we covered a simple example with `dspy.Predict`.\n", - "\n", - "Below, let's use `dspy.ChainOfThought`." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 5) Example 2: Summarization" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The 21-year-old Lee made seven appearances and scored one goal for West Ham last season. He had loan spells in League One with Blackpool and Colchester United, scoring twice for the latter. He has now signed a contract with Barnsley, but the length of the contract has not been revealed.\n" - ] - } - ], - "source": [ - "# Example from the XSum dataset.\n", - "document = \"\"\"The 21-year-old made seven appearances for the Hammers and netted his only goal for them in a Europa League qualification round match against Andorran side FC Lustrains last season. Lee had two loan spells in League One last term, with Blackpool and then Colchester United. He scored twice for the U's but was unable to save them from relegation. The length of Lee's contract with the promoted Tykes has not been revealed. Find all the latest football transfers on our dedicated page.\"\"\"\n", - "\n", - "summarize = dspy.ChainOfThought('document -> summary')\n", - "response = summarize(document=document)\n", - "\n", - "print(response.summary)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Many DSPy modules (except `dspy.Predict`) return auxiliary information by expanding your signature under the hood.\n", - "\n", - "For example, `dspy.ChainOfThought` also adds a `rationale` field that includes the LM's reasoning before it generates the output `summary`." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Rationale: produce the summary. We need to highlight the key points about Lee's performance for West Ham, his loan spells in League One, and his new contract with Barnsley. We also need to mention that his contract length has not been disclosed.\n" - ] - } - ], - "source": [ - "print(\"Rationale:\", response.rationale)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 6) Examples of _long_ DSPy Signatures\n", - "\n", - "For some advanced tasks, you need more verbose signatures. This is typically to:\n", - "\n", - "1. Clarify something about the nature of the task (expressed below as a `docstring`).\n", - "\n", - "2. Supply hints on the nature of an input field, expressed as a `desc` keyword argument for `dspy.InputField`.\n", - "\n", - "2. Supply constraints on an output field, expressed as a `desc` keyword argument for `dspy.OutputField." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 7) Example C: Classification\n", - "\n", - "Notice how the docstring contains (minimal) instructions, which in this case are necessary to have a fully-defined task.\n", - "\n", - "Some optimizers in DSPy, like `SignatureOptimizer`, can take this simple docstring and then generate more effective variants if needed." - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Prediction(\n", - " sentiment='Fear'\n", - ")" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "class Emotion(dspy.Signature):\n", - " \"\"\"Classify emotion among sadness, joy, love, anger, fear, surprise.\"\"\"\n", - " \n", - " sentence = dspy.InputField()\n", - " sentiment = dspy.OutputField()\n", - "\n", - "sentence = \"i started feeling a little vulnerable when the giant spotlight started blinding me\" # from dair-ai/emotion\n", - "\n", - "classify = dspy.Predict(Emotion)\n", - "classify(sentence=sentence)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 8) Example D: A metric that evaluates faithfulness to citations" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Prediction(\n", - " rationale=\"produce the faithfulness. We know that Lee had two loan spells in League One last term, with Blackpool and then Colchester United. He scored twice for the U's but was unable to save them from relegation. However, there is no mention of him scoring three goals for Colchester United.\",\n", - " faithfulness='False'\n", - ")" - ] - }, - "execution_count": 40, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "class CheckCitationFaithfulness(dspy.Signature):\n", - " \"\"\"Verify that the text is based on the provided context.\"\"\"\n", - "\n", - " context = dspy.InputField(desc=\"facts here are assumed to be true\")\n", - " text = dspy.InputField()\n", - " faithfulness = dspy.OutputField(desc=\"True/False indicating if text is faithful to context\")\n", - "\n", - "context = \"The 21-year-old made seven appearances for the Hammers and netted his only goal for them in a Europa League qualification round match against Andorran side FC Lustrains last season. Lee had two loan spells in League One last term, with Blackpool and then Colchester United. He scored twice for the U's but was unable to save them from relegation. The length of Lee's contract with the promoted Tykes has not been revealed. Find all the latest football transfers on our dedicated page.\"\n", - "\n", - "text = \"Lee scored 3 goals for Colchester United.\"\n", - "\n", - "faithfulness = dspy.ChainOfThought(CheckCitationFaithfulness)\n", - "faithfulness(context=context, text=text)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 9) Building modules & compiling them\n", - "\n", - "While signatures are covenient for prototyping with structured inputs/outputs, that's not the main reason to use them!\n", - "\n", - "You should compose multiple signatures into bigger [DSPy modules]() and [compile]() these modules into optimized prompts and finetunes." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py39_nov2023", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.18" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index c72e6c223a..0000000000 --- a/docs/index.md +++ /dev/null @@ -1,58 +0,0 @@ -# 🌟👋 Welcome to DSPy -- The framework for programming—not prompting—foundation models 🌐🚀 - - - - - - - - - -

- -

- - - - -## 🎯 The Vision Behind DSPy - -**DSPy** is a framework for developing **high-quality systems** with LMs. While prompting LMs can quickly build (brittle) demos, the best LM systems generally break down problems into steps and tune the prompts or LM weights of each step well. As a bonus, these systems use small LMs to save costs. - -This is hard as we usually don't have data to tune each of these steps. **DSPy** treats prompts and LM weights as parameters to be optimized in LM pipelines, given the metrics you want to maximize. - -To make this possible: - -- [x] **DSPy** provides **composable and declarative modules** for instructing LMs in a familiar Pythonic syntax. It upgrades "prompting techniques" like chain-of-thought and self-reflection from hand-adapted _string manipulation tricks_ into truly modular _generalized operations that learn to adapt to your task_. - -- [x] **DSPy** introduces an **automatic compiler that teaches LMs** how to conduct the declarative steps in your program. Specifically, the **DSPy compiler** will internally _trace_ your program and then **craft high-quality prompts for large LMs (or train automatic finetunes for small LMs)** to teach them the steps of your task. - -- [x] **DSPy** has many modules and optimizers built-in and we want you to add more. Think of this like PyTorch but for LM pipelines, not DNNs. The **DSPy compiler** _bootstraps_ prompts and finetunes from minimal data **without needing manual labels for the intermediate steps** in your program. Instead of brittle "prompt engineering" with hacky string manipulation, you can explore a systematic space of modular and trainable pieces. - -- [x] For complex tasks, **DSPy** can routinely teach powerful models like `GPT-3.5` and local models like `T5-base` or `Llama2-13b` to be much more reliable at tasks. **DSPy** will compile the _same program_ into different few-shot prompts and/or finetunes for each LM. - -## 🚀 Analogy to Neural Networks - -When we build neural networks, we don't write manual _for-loops_ over lists of _hand-tuned_ floats. Instead, you might use a framework like [PyTorch](https://pytorch.org/) to compose declarative layers (e.g., `Convolution` or `Dropout`) and then use optimizers (e.g., SGD or Adam) to learn the parameters of the network. - -Ditto! **DSPy** gives you the right general-purpose modules (e.g., `ChainOfThought`, `Retrieve`, etc.) and takes care of optimizing their prompts _for your program_ and your metric, whatever they aim to do. Whenever you modify your code, your data, or your validation constraints, you can _compile_ your program again and **DSPy** will create new effective prompts that fit your changes. - -**Welcome to the future of LLMs programmig! 🌟🌐** diff --git a/docs/language_models_client.md b/docs/language_models_client.md deleted file mode 100644 index 55a28d1cfb..0000000000 --- a/docs/language_models_client.md +++ /dev/null @@ -1,313 +0,0 @@ -# LM Modules Documentation - -This documentation provides an overview of the DSPy Language Model Clients. - -### Quickstart - -```python -import dspy - -lm = dspy.OpenAI(model='gpt-3.5-turbo') - -prompt = "Translate the following English text to Spanish: 'Hi, how are you?'" -completions = lm(prompt, n=5, return_sorted=False) -for i, completion in enumerate(completions): - print(f"Completion {i+1}: {completion}") -``` - -## Supported LM Clients - -| LM Client | Jump To | -| --- | --- | -| OpenAI | [OpenAI Section](#openai) | -| AzureOpenAI | [Azure OpenAI Section](#azureopenai) | -| Cohere | [Cohere Section](#cohere) | -| TGI | [TGI Section](#tgi) | -| VLLM | [VLLM Section](#vllm) | -| Anyscale | [Anyscale Section](#anyscale) | -| Together | [Together Section](#together) | - -## OpenAI - -### Usage - -```python -lm = dspy.OpenAI(model='gpt-3.5-turbo') -``` - -### Constructor - -The constructor initializes the base class `LM` and verifies the provided arguments like the `api_provider`, `api_key`, and `api_base` to set up OpenAI request retrieval. The `kwargs` attribute is initialized with default values for relevant text generation parameters needed for communicating with the GPT API, such as `temperature`, `max_tokens`, `top_p`, `frequency_penalty`, `presence_penalty`, and `n`. - -```python -class OpenAI(LM): - def __init__( - self, - model: str = "text-davinci-002", - api_key: Optional[str] = None, - api_provider: Literal["openai"] = "openai", - model_type: Literal["chat", "text"] = None, - **kwargs, - ): -``` - - - -**Parameters:** -- `api_key` (_Optional[str]_, _optional_): API provider authentication token. Defaults to None. -- `api_provider` (_Literal["openai"]_, _optional_): API provider to use. Defaults to "openai". -- `model_type` (_Literal["chat", "text"]_): Specified model type to use. -- `**kwargs`: Additional language model arguments to pass to the API provider. - -### Methods - -#### `__call__(self, prompt: str, only_completed: bool = True, return_sorted: bool = False, **kwargs) -> List[Dict[str, Any]]` - -Retrieves completions from OpenAI by calling `request`. - -Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. - -After generation, the completions are post-processed based on the `model_type` parameter. If the parameter is set to 'chat', the generated content look like `choice["message"]["content"]`. Otherwise, the generated text will be `choice["text"]`. - -**Parameters:** -- `prompt` (_str_): Prompt to send to OpenAI. -- `only_completed` (_bool_, _optional_): Flag to return only completed responses and ignore completion due to length. Defaults to True. -- `return_sorted` (_bool_, _optional_): Flag to sort the completion choices using the returned averaged log-probabilities. Defaults to False. -- `**kwargs`: Additional keyword arguments for completion request. - -**Returns:** -- `List[Dict[str, Any]]`: List of completion choices. - -## AzureOpenAI - -### Usage - -```python -lm = dspy.AzureOpenAI(api_base='...', api_version='2023-12-01-preview', model='gpt-3.5-turbo') -``` - -### Constructor - -The constructor initializes the base class `LM` and verifies the provided arguments like the `api_provider`, `api_key`, and `api_base` to set up OpenAI request retrieval through Azure. The `kwargs` attribute is initialized with default values for relevant text generation parameters needed for communicating with the GPT API, such as `temperature`, `max_tokens`, `top_p`, `frequency_penalty`, `presence_penalty`, and `n`. - -```python -class AzureOpenAI(LM): - def __init__( - self, - api_base: str, - api_version: str, - model: str = "gpt-3.5-turbo-instruct", - api_key: Optional[str] = None, - model_type: Literal["chat", "text"] = None, - **kwargs, - ): -``` - - - -**Parameters:** -- `api_base` (str): Azure Base URL. -- `api_version` (str): Version identifier for Azure OpenAI API. -- `api_key` (_Optional[str]_, _optional_): API provider authentication token. Retrieves from `AZURE_OPENAI_KEY` environment variable if None. -- `model_type` (_Literal["chat", "text"]_): Specified model type to use, defaults to 'chat'. -- `**kwargs`: Additional language model arguments to pass to the API provider. - -### Methods - -#### `__call__(self, prompt: str, only_completed: bool = True, return_sorted: bool = False, **kwargs) -> List[Dict[str, Any]]` - -Retrieves completions from Azure OpenAI Endpoints by calling `request`. - -Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. - -After generation, the completions are post-processed based on the `model_type` parameter. If the parameter is set to 'chat', the generated content look like `choice["message"]["content"]`. Otherwise, the generated text will be `choice["text"]`. - -**Parameters:** -- `prompt` (_str_): Prompt to send to Azure OpenAI. -- `only_completed` (_bool_, _optional_): Flag to return only completed responses and ignore completion due to length. Defaults to True. -- `return_sorted` (_bool_, _optional_): Flag to sort the completion choices using the returned averaged log-probabilities. Defaults to False. -- `**kwargs`: Additional keyword arguments for completion request. - -**Returns:** -- `List[Dict[str, Any]]`: List of completion choices. - -## Cohere - -### Usage - -```python -lm = dsp.Cohere(model='command-nightly') -``` - -### Constructor - -The constructor initializes the base class `LM` and verifies the `api_key` to set up Cohere request retrieval. - -```python -class Cohere(LM): - def __init__( - self, - model: str = "command-nightly", - api_key: Optional[str] = None, - stop_sequences: List[str] = [], - ): -``` - -**Parameters:** -- `model` (_str_): Cohere pretrained models. Defaults to `command-nightly`. -- `api_key` (_Optional[str]_, _optional_): API provider from Cohere. Defaults to None. -- `stop_sequences` (_List[str]_, _optional_): List of stopping tokens to end generation. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. - -## TGI - -### Usage - -```python -lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") -``` - -### Prerequisites - -Refer to the [Text Generation-Inference Server](https://github.com/stanfordnlp/dspy/blob/local_models_docs/docs/using_local_models.md#text-generation-inference-server) section of the `Using Local Models` documentation. - -### Constructor - -The constructor initializes the `HFModel` base class and configures the client for communicating with the TGI server. It requires a `model` instance, communication `port` for the server, and the `url` for the server to host generate requests. Additional configuration can be provided via keyword arguments in `**kwargs`. - -```python -class HFClientTGI(HFModel): - def __init__(self, model, port, url="http://future-hgx-1", **kwargs): -``` - -**Parameters:** -- `model` (_HFModel_): Instance of Hugging Face model connected to the TGI server. -- `port` (_int_): Port for TGI server. -- `url` (_str_): Base URL where the TGI server is hosted. -- `**kwargs`: Additional keyword arguments to configure the client. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. - -## VLLM - -### Usage - -```python -lm = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") -``` - -### Prerequisites - -Refer to the [vLLM Server](https://github.com/stanfordnlp/dspy/blob/local_models_docs/docs/using_local_models.md#vllm-server) section of the `Using Local Models` documentation. - -### Constructor - -Refer to [`dspy.TGI`](#tgi) documentation. Replace with `HFClientVLLM`. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. - -## Anyscale - -### Usage - -```python -lm = dspy.Anyscale(model="mistralai/Mistral-7B-Instruct-v0.1") -``` - -### Constructor - -The constructor initializes the base class `LM` and verifies the `api_key` for using Anyscale API. -We expect the following environment variables to be set: -- `ANYSCALE_API_KEY`: API key for Together. -- `ANYSCALE_API_BASE`: API base URL for Together. - - -```python -class Anyscale(HFModel): - def __init__(self, model, **kwargs): -``` - -**Parameters:** -- `model` (_str_): models hosted on Together. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. - - -## Together - -### Usage - -```python -lm = dspy.Together(model="mistralai/Mistral-7B-v0.1") -``` - -### Constructor - -The constructor initializes the base class `LM` and verifies the `api_key` for using Together API. -We expect the following environment variables to be set: -- `TOGETHER_API_KEY`: API key for Together. -- `TOGETHER_API_BASE`: API base URL for Together. - - -```python -class Together(HFModel): - def __init__(self, model, **kwargs): -``` - -**Parameters:** -- `model` (_str_): models hosted on Together. -- `stop` (_List[str]_, _optional_): List of stopping tokens to end generation. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. - - -## Databricks (Model Serving Endpoints) - -### Usage -```python -lm = dspy.Databricks(model="databricks-mpt-30b-instruct") -``` - -### Constructor - -The constructor inherits from the `GPT3` class and verifies the Databricks authentication credentials for using Databricks Model Serving API through the OpenAI SDK. -We expect the following environment variables to be set: -- `openai.api_key`: Databricks API key. -- `openai.base_url`: Databricks Model Endpoint url - -The `kwargs` attribute is initialized with default values for relevant text generation parameters needed for communicating with the Databricks OpenAI SDK, such as `temperature`, `max_tokens`, `top_p`, and `n`. However, it removes the `frequency_penalty` and `presence_penalty` arguments as these are not currently supported by the Databricks API. - -```python -class Databricks(GPT3): - def __init__( - self, - model: str, - api_key: Optional[str] = None, - api_base: Optional[str] = None, - model_type: Literal["chat", "text"] = None, - **kwargs, - ): -``` - -**Parameters:** -- `model` (_str_): models hosted on Databricks. -- `stop` (_List[str]_, _optional_): List of stopping tokens to end generation. -- `api_key` (_Optional[str]_): Databricks API key. Defaults to None -- `api_base` (_Optional[str]_): Databricks Model Endpoint url Defaults to None. -- `model_type` (_Literal["chat", "text", "embeddings"]_): Specified model type to use. -- `**kwargs`: Additional language model arguments to pass to the API provider. - -### Methods - -Refer to [`dspy.OpenAI`](#openai) documentation. \ No newline at end of file diff --git a/docs/modules.md b/docs/modules.md deleted file mode 100644 index 2c68cb441e..0000000000 --- a/docs/modules.md +++ /dev/null @@ -1,431 +0,0 @@ -# dspy.Modules Documentation - -This documentation provides an overview of the DSPy Modules. - -## DSPy Modules - -| Module | Jump To | -| --- | --- | -| Predict | [Predict Section](#dspypredict) | -| Retrieve | [Retrieve Section](#dspyretrieve) | -| ChainOfThought | [ChainOfThought Section](#dspychainofthought) | -| ChainOfThoughtWithHint | [ChainOfThoughtWithHint Section](#dspychainofthoughtwithhint) | -| MultiChainComparison | [MultiChainComparison Section](#dspymultichaincomparison) | -| ReAct | [ReAct Section](#dspyreact) | - -## dspy.Predict - -### Constructor - -The constructor initializes the `Predict` class and sets up its attributes, taking in the `signature` and additional config options. If the `signature` is a string, it processes the input and output fields, generates instructions, and creates a template for the specified `signature` type. - -```python -class Predict(Parameter): - def __init__(self, signature, **config): - self.stage = random.randbytes(8).hex() - self.signature = signature - self.config = config - self.reset() - - if isinstance(signature, str): - inputs, outputs = signature.split("->") - inputs, outputs = inputs.split(","), outputs.split(",") - inputs, outputs = [field.strip() for field in inputs], [field.strip() for field in outputs] - - assert all(len(field.split()) == 1 for field in (inputs + outputs)) - - inputs_ = ', '.join([f"`{field}`" for field in inputs]) - outputs_ = ', '.join([f"`{field}`" for field in outputs]) - - instructions = f"""Given the fields {inputs_}, produce the fields {outputs_}.""" - - inputs = {k: InputField() for k in inputs} - outputs = {k: OutputField() for k in outputs} - - for k, v in inputs.items(): - v.finalize(k, infer_prefix(k)) - - for k, v in outputs.items(): - v.finalize(k, infer_prefix(k)) - - self.signature = dsp.Template(instructions, **inputs, **outputs) -``` - -**Parameters:** -- `signature` (_Any_): Signature of predictive model. -- `**config` (_dict_): Additional configuration parameters for model. - -### Method - -#### `__call__(self, **kwargs)` - -This method serves as a wrapper for the `forward` method. It allows making predictions using the `Predict` class by providing keyword arguments. - -**Paramters:** -- `**kwargs`: Keyword arguments required for prediction. - -**Returns:** -- The result of `forward` method. - -### Examples - -```python -#Define a simple signature for basic question answering -class BasicQA(dspy.Signature): - """Answer questions with short factoid answers.""" - question = dspy.InputField() - answer = dspy.OutputField(desc="often between 1 and 5 words") - -#Pass signature to Predict module -generate_answer = dspy.Predict(BasicQA) - -# Call the predictor on a particular input. -question='What is the color of the sky?' -pred = generate_answer(question=question) - -print(f"Question: {question}") -print(f"Predicted Answer: {pred.answer}") -``` - - -## dspy.Retrieve - -### Constructor - -The constructor initializes the `Retrieve` class and sets up its attributes, taking in `k` number of retrieval passages to return for a query. - -```python -class Retrieve(Parameter): - def __init__(self, k=3): - self.stage = random.randbytes(8).hex() - self.k = k -``` - -**Parameters:** -- `k` (_Any_): Number of retrieval responses - -### Method - -#### `__call__(self, *args, **kwargs):` - -This method serves as a wrapper for the `forward` method. It allows making retrievals on an input query using the `Retrieve` class. - -**Parameters:** -- `**args`: Arguments required for retrieval. -- `**kwargs`: Keyword arguments required for retrieval. - -**Returns:** -- The result of the `forward` method. - -### Examples - -```python -query='When was the first FIFA World Cup held?' - -# Call the retriever on a particular query. -retrieve = dspy.Retrieve(k=3) -topK_passages = retrieve(query).passages - -print(f"Top {retrieve.k} passages for question: {query} \n", '-' * 30, '\n') - -for idx, passage in enumerate(topK_passages): - print(f'{idx+1}]', passage, '\n') -``` - -# dspy.ChainOfThought - -The constructor initializes the `ChainOfThought` class and sets up its attributes. It inherits from the `Predict` class and adds specific functionality for chain of thought processing. - -Internally, the class initializes the `activated` attribute to indicate if chain of thought processing has been selected. It extends the `signature` to include additional reasoning steps and an updated `rationale_type` when chain of thought processing is activated. - -```python -class ChainOfThought(Predict): - def __init__(self, signature, rationale_type=None, activated=True, **config): - super().__init__(signature, **config) - - self.activated = activated - - signature = self.signature - *keys, last_key = signature.kwargs.keys() - - DEFAULT_RATIONALE_TYPE = dsp.Type(prefix="Reasoning: Let's think step by step in order to", - desc="${produce the " + last_key + "}. We ...") - - rationale_type = rationale_type or DEFAULT_RATIONALE_TYPE - - extended_kwargs = {key: signature.kwargs[key] for key in keys} - extended_kwargs.update({'rationale': rationale_type, last_key: signature.kwargs[last_key]}) - - self.extended_signature = dsp.Template(signature.instructions, **extended_kwargs) -``` - -**Parameters:** -- `signature` (_Any_): Signature of predictive model. -- `rationale_type` (_dsp.Type_, _optional_): Rationale type for reasoning steps. Defaults to `None`. -- `activated` (_bool_, _optional_): Flag for activated chain of thought processing. Defaults to `True`. -- `**config` (_dict_): Additional configuration parameters for model. - -### Method - -#### `forward(self, **kwargs)` - -This method extends the parent `Predict` class' forward pass while updating the signature when chain of thought reasoning is activated or if the language model is a GPT3 model. - -**Parameters:** -- `**kwargs`: Keyword arguments required for prediction. - -**Returns:** -- The result of the `forward` method. - -### Examples - -```python -#Define a simple signature for basic question answering -class BasicQA(dspy.Signature): - """Answer questions with short factoid answers.""" - question = dspy.InputField() - answer = dspy.OutputField(desc="often between 1 and 5 words") - -#Pass signature to ChainOfThought module -generate_answer = dspy.ChainOfThought(BasicQA) - -# Call the predictor on a particular input. -question='What is the color of the sky?' -pred = generate_answer(question=question) - -print(f"Question: {question}") -print(f"Predicted Answer: {pred.answer}") -``` - -## dspy.ChainOfThoughtWithHint - -### Constructor - -The constructor initializes the `ChainOfThoughtWithHint` class and sets up its attributes, inheriting from the `Predict` class. This class enhances the `ChainOfThought` class by offering an additional option to provide hints for reasoning. Two distinct signature templates are created internally depending on the presence of the hint. - -```python -class ChainOfThoughtWithHint(Predict): - def __init__(self, signature, rationale_type=None, activated=True, **config): - super().__init__(signature, **config) - - self.activated = activated - - signature = self.signature - *keys, last_key = signature.kwargs.keys() - - DEFAULT_HINT_TYPE = dsp.Type(prefix="Hint:", desc="${hint}") - - DEFAULT_RATIONALE_TYPE = dsp.Type(prefix="Reasoning: Let's think step by step in order to", - desc="${produce the " + last_key + "}. We ...") - - rationale_type = rationale_type or DEFAULT_RATIONALE_TYPE - - extended_kwargs1 = {key: signature.kwargs[key] for key in keys} - extended_kwargs1.update({'rationale': rationale_type, last_key: signature.kwargs[last_key]}) - - extended_kwargs2 = {key: signature.kwargs[key] for key in keys} - extended_kwargs2.update({'hint': DEFAULT_HINT_TYPE, 'rationale': rationale_type, last_key: signature.kwargs[last_key]}) - - self.extended_signature1 = dsp.Template(signature.instructions, **extended_kwargs1) - self.extended_signature2 = dsp.Template(signature.instructions, **extended_kwargs2) -``` - -**Parameters:** -- `signature` (_Any_): Signature of predictive model. -- `rationale_type` (_dsp.Type_, _optional_): Rationale type for reasoning steps. Defaults to `None`. -- `activated` (_bool_, _optional_): Flag for activated chain of thought processing. Defaults to `True`. -- `**config` (_dict_): Additional configuration parameters for model. - -### Method - -#### `forward(self, **kwargs)` - -This method extends the parent `Predict` class's forward pass, updating the signature dynamically based on the presence of `hint` in the keyword arguments and the `activated` attribute. - -**Parameters:** -- `**kwargs`: Keyword arguments required for prediction. - -**Returns:** -- The result of the `forward` method in the parent `Predict` class. - -### Examples - -```python -#Define a simple signature for basic question answering -class BasicQA(dspy.Signature): - """Answer questions with short factoid answers.""" - question = dspy.InputField() - answer = dspy.OutputField(desc="often between 1 and 5 words") - -#Pass signature to ChainOfThought module -generate_answer = dspy.ChainOfThoughtWithHint(BasicQA) - -# Call the predictor on a particular input alongside a hint. -question='What is the color of the sky?' -hint = "It's what you often see during a sunny day." -pred = generate_answer(question=question, hint=hint) - -print(f"Question: {question}") -print(f"Predicted Answer: {pred.answer}") -``` - - -## dspy.MultiChainComparison - -### Constructor - -The constructor initializes the `MultiChainComparison` class and sets up its attributes. It inherits from the `Predict` class and adds specific functionality for multiple chain comparisons. - -The class incorporates multiple student attempt reasonings and concludes with the selected best reasoning path out of the available attempts. - -```python -from .predict import Predict -from ..primitives.program import Module - -import dsp - -class MultiChainComparison(Module): - def __init__(self, signature, M=3, temperature=0.7, **config): - super().__init__() - - self.M = M - signature = Predict(signature).signature - *keys, last_key = signature.kwargs.keys() - - extended_kwargs = {key: signature.kwargs[key] for key in keys} - - for idx in range(M): - candidate_type = dsp.Type(prefix=f"Student Attempt #{idx+1}:", desc="${reasoning attempt}") - extended_kwargs.update({f'reasoning_attempt_{idx+1}': candidate_type}) - - rationale_type = dsp.Type(prefix="Accurate Reasoning: Thank you everyone. Let's now holistically", desc="${corrected reasoning}") - extended_kwargs.update({'rationale': rationale_type, last_key: signature.kwargs[last_key]}) - - signature = dsp.Template(signature.instructions, **extended_kwargs) - self.predict = Predict(signature, temperature=temperature, **config) - self.last_key = last_key -``` - -**Parameters:** -- `signature` (_Any_): Signature of predictive model. -- `M` (_int_, _optional_): Number of student reasoning attempts. Defaults to `3`. -- `temperature` (_float_, _optional_): Temperature parameter for prediction. Defaults to `0.7`. -- `**config` (_dict_): Additional configuration parameters for model. - -### Method - -#### `forward(self, completions, **kwargs)` - -This method aggregates all the student reasoning attempts and calls the predict method with extended signatures to get the best reasoning. - -**Parameters:** -- `completions`: List of completion objects which include student reasoning attempts. -- `**kwargs`: Additional keyword arguments. - -**Returns:** -- The result of the `predict` method for the best reasoning. - -### Examples - -```python -class BasicQA(dspy.Signature): - """Answer questions with short factoid answers.""" - question = dspy.InputField() - answer = dspy.OutputField(desc="often between 1 and 5 words") - -# Example completions generated by a model for reference -completions = [ - dspy.Prediction(rationale="I recall that during clear days, the sky often appears this color.", answer="blue"), - dspy.Prediction(rationale="Based on common knowledge, I believe the sky is typically seen as this color.", answer="green"), - dspy.Prediction(rationale="From images and depictions in media, the sky is frequently represented with this hue.", answer="blue"), -] - -# Pass signature to MultiChainComparison module -compare_answers = dspy.MultiChainComparison(BasicQA) - -# Call the MultiChainComparison on the completions -question = 'What is the color of the sky?' -final_pred = compare_answers(completions, question=question) - -print(f"Question: {question}") -print(f"Final Predicted Answer (after comparison): {final_pred.answer}") -print(f"Final Rationale: {final_pred.rationale}") -``` - -## dspy.ReAct - -### Constructor - -The constructor initializes the `ReAct` class and sets up its attributes. It is specifically designed to compose the interleaved steps of Thought, Action, and Observation. - -Internally, the class follows a sequential process: Thoughts (or reasoning) lead to Actions (such as queries or activities). These Actions then result in Observations (like results or responses), which subsequently feedback into the next Thought. This cycle is maintained for a predefined number of iterations. - -```python -import dsp -import dspy -from ..primitives.program import Module -from .predict import Predict - -class ReAct(Module): - def __init__(self, signature, max_iters=5, num_results=3, tools=None): - ... -``` - -**Parameters:** -- `signature` (_Any_): Signature of the predictive model. -- `max_iters` (_int_, _optional_): Maximum number of iterations for the Thought-Action-Observation cycle. Defaults to `5`. -- `num_results` (_int_, _optional_): Number of results to retrieve in the action step. Defaults to `3`. -- `tools` (_List[dspy.Tool]_, _optional_): List of tools available for actions. If none is provided, a default `Retrieve` tool with `num_results` is used. - -### Methods - -#### `_generate_signature(self, iters)` - -Generates a signature for the Thought-Action-Observation cycle based on the number of iterations. - -**Parameters:** -- `iters` (_int_): Number of iterations. - -**Returns:** -- A dictionary representation of the signature. - -#### `act(self, output, hop)` - -Processes an action and returns the observation or final answer. - -**Parameters:** -- `output` (_dict_): Current output from the Thought. -- `hop` (_int_): Current iteration number. - -**Returns:** -- A string representing the final answer or `None`. - -#### `forward(self, **kwargs)` - -Main method to execute the Thought-Action-Observation cycle for a given set of input fields. - -**Parameters:** -- `**kwargs`: Keyword arguments corresponding to input fields. - -**Returns:** -- A `dspy.Prediction` object containing the result of the ReAct process. - -### Examples - -```python -# Define a simple signature for basic question answering -class BasicQA(dspy.Signature): - """Answer questions with short factoid answers.""" - question = dspy.InputField() - answer = dspy.OutputField(desc="often between 1 and 5 words") - -# Pass signature to ReAct module -react_module = dspy.ReAct(BasicQA) - -# Call the ReAct module on a particular input -question = 'What is the color of the sky?' -result = react_module(question=question) - -print(f"Question: {question}") -print(f"Final Predicted Answer (after ReAct process): {result.answer}") -``` \ No newline at end of file diff --git a/docs-page/package-lock.json b/docs/package-lock.json similarity index 100% rename from docs-page/package-lock.json rename to docs/package-lock.json diff --git a/docs-page/package.json b/docs/package.json similarity index 100% rename from docs-page/package.json rename to docs/package.json diff --git a/docs/repo/contributing.md b/docs/repo/contributing.md deleted file mode 100644 index a7e952769f..0000000000 --- a/docs/repo/contributing.md +++ /dev/null @@ -1,71 +0,0 @@ -# ⚙️ Setting-up working envirionment - -## 💻 Env Setup - -``` -conda create --name dspy python=3.11 -``` - -or - -``` -python3 -m venv dspy -``` - -## 🚀 Pre-commit hook - -Before using pre-commit hook you need to install it in your python environment. - -``` -conda install -c conda-forge pre-commit -``` - -go to the root folder and then activate it as follows (it will first download all required dependencies): - -``` -pre-commit install -``` - -> Pre-commit hooks will attept to fix all your files and so you will need to (add + commit) them once the fixes are done ! - -!!! info "Optionally" - - Generally the pre-commit will run automatically before each of your commit, - but you can also manually trigger it, as follows: - - ```python - pre-commit run --all-files - ``` - -## 📝 Commit with Style - -Use standarized commit message: - -`{LABEL}(ACRONYM): {message}` - -This is very important for the automatic releases and a clean history on the `main` branch. - -!!! Labels-types - - | Label | Usage | - | ----- | ----- | - | break| `break` is used to identify changes related to old compatibility or functionality that breaks the current usage (major) | - | feat | `feat` is used to identify changes related to new backward-compatible abilities or functionality (minor) | - | init | `init` is used to indentify the starting related to the project (minor) | - | enh | `enh` is used to indentify changes related to amelioration of abilities or functionality (patch) | - | build | `build` (also known as `chore`) is used to identify **development** changes related to the build system (involving scripts, configurations, or tools) and package dependencies (patch) | - | ci | `ci` is used to identify **development** changes related to the continuous integration and deployment system - involving scripts, configurations, or tools (minor) | - | docs | `docs` is used to identify documentation changes related to the project; whether intended externally for the end-users or internally for the developers (patch) | - | perf | `perf` is used to identify changes related to backward-compatible **performance improvements** (patch) | - | refactor | `refactor` is used to identify changes related to modifying the codebase, which neither adds a feature nor fixes a bug - such as removing redundant code, simplifying the code, renaming variables, etc.
i.e. handy for your wip ; ) (patch) | - | style | `style` is used to identify **development** changes related to styling the codebase, regardless of the meaning - such as indentations, semi-colons, quotes, trailing commas, and so on (patch) | - | test | `test` is used to identify **development** changes related to tests - such as refactoring existing tests or adding new tests. (minor) | - | fix | `fix` is used to identify changes related to backward-compatible bug fixes. (patch) | - | ops | `ops` is used to identify changes related to deployment files like `values.yml`, `gateway.yml,` or `Jenkinsfile` in the **ops** directory. (minor) | - | hotfix | `hotfix` is used to identify **production** changes related to backward-compatible bug fixes (patch) | - | revert | `revert` is used to identify backward changes (patch) | - | maint | `maint` is used to identify **maintenance** changes related to project (patch) | - -``` - -``` diff --git a/docs/repo/documentation.md b/docs/repo/documentation.md deleted file mode 100644 index 685bf7dfc8..0000000000 --- a/docs/repo/documentation.md +++ /dev/null @@ -1,82 +0,0 @@ -# 📃 Documentation - -We are using MkDocs to build the documentation, you can find more info about all possibilites here: [Examples](https://squidfunk.github.io/mkdocs-material/reference/#setting-the-page-title). It is basically a combination of Markdown and Mermaid for graph. - -!!! info -You can read more about [Mermaid](https://github.com/mermaid-js/mermaid) with all its possibilities ! -If you would like to test your mermaid FlowChart online without having to install any required libraries, you can refere to [Online Schema Editor](https://mermaid-js.github.io/mermaid-live-editor) - -## ➕ Extending the documentation - -- [x] start from the `dev` branch and create a new branch with the correct naming convention, see: [how to contribute](contributing.md) - -- [x] add additional '.md' files to the documentation directory: `/docs` - -- [x] add the new entry into the navigation bar and connect it with your `md` file. This can be done in: [`root/mkdocs.yml`](mkdocs.yml) - -- [x] You can interactively test the documentation locally by using the following command: `mkdocs serve` - - > You will need to have all local docs-related requirements installed (see: [tool.poetry.group.doc.dependencies]): - - ```python - mkdocs = ">=1.5.3" - mkdocs-material = ">=9.0.6" - mkdocs-material-extensions = ">=1.3.1" - mkdocs-gen-files = "^0.5.0" - mkdocstrings-python = "^1.7.5" - mkdocstrings = {extras = ["python"], version = ">=0.20.0"} - mike = ">=2.0.0" - ``` - -- [x] Once you are done, create a new Merge Request to the `dev` branch. - -- [x] When your MR gets approved merge it into the `dev` following the well know conventions [how to contribute](contributing.md) - -- [x] New documentation will be automatically deployed once your MR gets merged ! - -!!! warning -In some cases you may need to deploy the new doc to Github-pages immediatly, this can be done using the following command: `mkdocs gh-deploy` (while being in a right venv) - -## 🔍 Documenting code - -Documenting code is done using dedicated docstrings, which are then automatically parsed and converted into the documentation. - -In order to document your code, you need to use the following syntax: - -```python -# 🗺️ PARAGRAPG NAME - -::: dspy.predict.predict.Predict - handler: python - options: - show_root_heading: true - show_source: true -``` - -and the Predict class documentation needs to foollow the Google Style Guide, see: [Google Style Guide](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) - -!!! example - - ```python - """Send a static HTML report along with a message to the Slack channel. - - Args: - project_name (str): The name of the project for which the job was submitted. - html_file_path (str): The file path of the HTML report to be sent. - message (str): The message to send along with the HTML report. - - Example: - ```python - from theparrot.notify import SlackBot - - bot = SlackBot() - bot.send_html_report( - html_file_path="path/to/your/report.html", - message="Check out this report!", - ) - ``` - """ - ``` - -This approach allows to handle documentation directly from the code, which is a great way to keep it up to date. -It also allows to version the documentation, which is a great way to keep track of changes and handle multiple versions of the package. diff --git a/docs/repo/getting_started.md b/docs/repo/getting_started.md deleted file mode 100644 index 0a0bfd7304..0000000000 --- a/docs/repo/getting_started.md +++ /dev/null @@ -1,42 +0,0 @@ -## 💻 Installation - -To install python packages, you need: - -```python -pip install dspy-ai -``` - -Or open our intro notebook in Google Colab: [](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/intro.ipynb) - -!!! info -By default, DSPy depends on `openai==0.28`. However, if you install `openai>=1.0`, the library will use that just fine. Both are supported. - -For the optional Pinecone, Qdrant, [chromadb](https://github.com/chroma-core/chroma), or [marqo](https://github.com/marqo-ai/marqo) retrieval integration(s), include the extra(s) below: - -```python -pip install dspy-ai[pinecone] # or [qdrant] or [chromadb] or [marqo] or [mongodb] -``` - -## ℹ️ Examples - -The DSPy team believes complexity has to be justified. We take this seriously: we never release a complex tutorial (above) or example (below) _unless we can demonstrate empirically that this complexity has generally led to improved quality or cost._ This kind of rule is rarely enforced by other frameworks or docs, but you can count on it in DSPy examples. - -There's a bunch of examples in the `examples/` directory and in the top-level directory. We welcome contributions! - -You can find other examples tweeted by [@lateinteraction](https://twitter.com/lateinteraction) on Twitter/X. - -## 🔍 Detailed Tutorials - -If you're new to DSPy, it's probably best to go in sequential order. You will probably refer to these guides frequently after that, e.g. to copy/paste snippets that you can edit for your own DSPy programs. - -1. **[DSPy Signatures](docs/guides/signatures.ipynb)** - -2. **[Language Models](docs/guides/language_models.ipynb)** and **[Retrieval Models](docs/guides/retrieval_models.ipynb)** - -3. **[DSPy Modules](docs/guides/modules.ipynb)** - -4. **[DSPy Optimizers](docs/guides/optimizers.ipynb)** - -5. **[DSPy Metrics](docs/guides/metrics.ipynb)** - -6. **[DSPy Assertions](docs/guides/assertions.ipynb)** diff --git a/docs/retrieval_models_client.md b/docs/retrieval_models_client.md deleted file mode 100644 index 8f31e0bbde..0000000000 --- a/docs/retrieval_models_client.md +++ /dev/null @@ -1,217 +0,0 @@ -# Retriever Modules Documentation - -This documentation provides an overview of the DSPy Retrieval Model Clients. - -## Supported RM Clients - -| RM Client | Jump To | -| --- | --- | -| ColBERTv2 | [ColBERTv2 Section](#ColBERTv2) | -| AzureCognitiveSearch | [AzureCognitiveSearch Section](#AzureCognitiveSearch) | -| ChromadbRM | [ChromadbRM Section](#ChromadbRM) | -| Faiss | [Faiss Section](#Faiss) | - -## ColBERTv2 - -### Quickstart - -```python -import dspy - -colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') - -retrieval_response = colbertv2_wiki17_abstracts('When was the first FIFA World Cup held?', k=5) - -for result in retrieval_response: - print("Text:", result['text'], "\n") -``` - - -### Constructor - -The constructor initializes the `ColBERTv2` class instance and sets up the request parameters for interacting with the ColBERTv2 server. - -```python -class ColBERTv2: - def __init__( - self, - url: str = "http://0.0.0.0", - port: Optional[Union[str, int]] = None, - post_requests: bool = False, - ): -``` - -**Parameters:** -- `url` (_str_): URL for ColBERTv2 server. -- `port` (_Union[str, int]_, _Optional_): Port endpoint for ColBERTv2 server. Defaults to `None`. -- `post_requests` (_bool_, _Optional_): Flag for using HTTP POST requests. Defaults to `False`. - -### Methods - -#### `__call__(self, query: str, k: int = 10, simplify: bool = False) -> Union[list[str], list[dotdict]]` - -Enables making queries to the ColBERTv2 server for retrieval. Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. The function handles the retrieval of the top-k passages based on the provided query. - -**Parameters:** -- `query` (_str_): Query string used for retrieval. -- `k` (_int_, _optional_): Number of passages to retrieve. Defaults to 10. -- `simplify` (_bool_, _optional_): Flag for simplifying output to a list of strings. Defaults to False. - -**Returns:** -- `Union[list[str], list[dotdict]]`: Depending on `simplify` flag, either a list of strings representing the passage content (`True`) or a list of `dotdict` instances containing passage details (`False`). - -## AzureCognitiveSearch - -### Quickstart - -#TODO - -### Constructor - -The constructor initializes an instance of the `AzureCognitiveSearch` class and sets up parameters for sending queries and retreiving results with the Azure Cognitive Search server. - -```python -class AzureCognitiveSearch: - def __init__( - self, - search_service_name: str, - search_api_key: str, - search_index_name: str, - field_text: str, - field_score: str, # required field to map with "score" field in dsp framework - ): -``` - -**Parameters:** -- `search_service_name` (_str_): Name of Azure Cognitive Search server. -- `search_api_key` (_str_): API Authentication token for accessing Azure Cognitive Search server. -- `search_index_name` (_str_): Name of search index in the Azure Cognitive Search server. -- `field_text` (_str_): Field name that maps to DSP "content" field. -- `field_score` (_str_): Field name that maps to DSP "score" field. - -### Methods - -Refer to [ColBERTv2](#ColBERTv2) documentation. Keep in mind there is no `simplify` flag for AzureCognitiveSearch. - -AzureCognitiveSearch supports sending queries and processing the received results, mapping content and scores to a correct format for the Azure Cognitive Search server. - -## ChromadbRM - -### Quickstart with OpenAI Embeddings - -ChromadbRM have the flexibility from a variety of embedding functions as outlined in the [chromadb embeddings documentation](https://docs.trychroma.com/embeddings). While different options are available, this example demonstrates how to utilize OpenAI embeddings specifically. - -```python -from dspy.retrieve import ChromadbRM -import os -import openai -from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction - -embedding_function = OpenAIEmbeddingFunction( - api_key=os.environ.get('OPENAI_API_KEY'), - model_name="text-embedding-ada-002" -) - -retriever_model = ChromadbRM( - 'your_collection_name', - '/path/to/your/db', - embedding_function=embedding_function, - k=5 -) - -results = retriever_model("Explore the significance of quantum computing", k=5) - -for result in results: - print("Document:", result.long_text, "\n") -``` - -### Constructor - -Initialize an instance of the `ChromadbRM` class, with the option to use OpenAI's embeddings or any alternative supported by chromadb, as detailed in the official [chromadb embeddings documentation](https://docs.trychroma.com/embeddings). - -```python -ChromadbRM( - collection_name: str, - persist_directory: str, - embedding_function: Optional[EmbeddingFunction[Embeddable]] = OpenAIEmbeddingFunction(), - k: int = 7, -) -``` - -**Parameters:** -- `collection_name` (_str_): The name of the chromadb collection. -- `persist_directory` (_str_): Path to the directory where chromadb data is persisted. -- `embedding_function` (_Optional[EmbeddingFunction[Embeddable]]_, _optional_): The function used for embedding documents and queries. Defaults to `DefaultEmbeddingFunction()` if not specified. -- `k` (_int_, _optional_): The number of top passages to retrieve. Defaults to 7. - -### Methods - -#### `forward(self, query_or_queries: Union[str, List[str]], k: Optional[int] = None) -> dspy.Prediction` - -Search the chromadb collection for the top `k` passages matching the given query or queries, using embeddings generated via the specified `embedding_function`. - -**Parameters:** -- `query_or_queries` (_Union[str, List[str]]_): The query or list of queries to search for. -- `k` (_Optional[int]_, _optional_): The number of results to retrieve. If not specified, defaults to the value set during initialization. - -**Returns:** -- `dspy.Prediction`: Contains the retrieved passages, each represented as a `dotdict` with a `long_text` attribute. - -## Faiss - -### Quickstart with the default vectorizer - -The **FaissRM** module provides a retriever that uses an in-memory Faiss vector database. This module does not include a vectorizer; instead it supports any subclass of **dsp.modules.sentence_vectorizer.BaseSentenceVectorizer**. If a vectorizer is not provided, an instance of **dsp.modules.sentence_vectorizer.SentenceTransformersVectorizer** is created and used by **FaissRM**. Note that the default embedding model for **SentenceTransformersVectorizer** is **all-MiniLM-L6-v2** - - -```python -import dspy -from dspy.retrieve import faiss_rm - -document_chunks = [ - "The superbowl this year was played between the San Francisco 49ers and the Kanasas City Chiefs", - "Pop corn is often served in a bowl", - "The Rice Bowl is a Chinese Restaurant located in the city of Tucson, Arizona", - "Mars is the fourth planet in the Solar System", - "An aquarium is a place where children can learn about marine life", - "The capital of the United States is Washington, D.C", - "Rock and Roll musicians are honored by being inducted in the Rock and Roll Hall of Fame", - "Music albums were published on Long Play Records in the 70s and 80s", - "Sichuan cuisine is a spicy cuisine from central China", - "The interest rates for mortgages is considered to be very high in 2024", -] - -frm = faiss_rm.FaissRM(document_chunks) -turbo = dspy.OpenAI(model="gpt-3.5-turbo") -dspy.settings.configure(lm=turbo, rm=frm) -print(frm(["I am in the mood for Chinese food"])) -``` - -### Constructor - -Initialize an instance of FaissRM by providing it with a vectorizer and a list of strings - -```python -FaissRM( - document_chunks: List[str], - vectorizer: dsp.modules.sentence_vectorizer.BaseSentenceVectorizer, - k: int = 3 -) -``` - -**Parameters:** -- `document_chunks` (_List[str]_): a list of strings that comprises the corpus to search. You cannot add/insert/upsert to this list after creating this FaissRM object. -- `vectorizer` (_dsp.modules.sentence_vectorizer.BaseSentenceVectorizer_, _optional_): If not provided, a dsp.modules.sentence_vectorizer.SentenceTransformersVectorizer object is created and used. -- `k` (_int_, _optional_): The number of top passages to retrieve. Defaults to 3. - -### Methods - -#### `forward(self, query_or_queries: Union[str, List[str]]) -> dspy.Prediction` - -Search the FaissRM vector database for the top `k` passages matching the given query or queries, using embeddings generated via the vectorizer specified at FaissRM construction time - -**Parameters:** -- `query_or_queries` (_Union[str, List[str]]_): The query or list of queries to search for. - -**Returns:** -- `dspy.Prediction`: Contains the retrieved passages, each represented as a `dotdict` with a `long_text` attribute and an `index` attribute. The `index` attribute is the index in the document_chunks array provided to this FaissRM object at construction time. diff --git a/docs-page/sidebars.ts b/docs/sidebars.ts similarity index 100% rename from docs-page/sidebars.ts rename to docs/sidebars.ts diff --git a/docs-page/src/components/AuthorDetails/index.tsx b/docs/src/components/AuthorDetails/index.tsx similarity index 100% rename from docs-page/src/components/AuthorDetails/index.tsx rename to docs/src/components/AuthorDetails/index.tsx diff --git a/docs-page/src/components/AuthorDetails/styles.module.css b/docs/src/components/AuthorDetails/styles.module.css similarity index 100% rename from docs-page/src/components/AuthorDetails/styles.module.css rename to docs/src/components/AuthorDetails/styles.module.css diff --git a/docs-page/src/components/HomepageFeatures/index.tsx b/docs/src/components/HomepageFeatures/index.tsx similarity index 100% rename from docs-page/src/components/HomepageFeatures/index.tsx rename to docs/src/components/HomepageFeatures/index.tsx diff --git a/docs-page/src/components/HomepageFeatures/styles.module.css b/docs/src/components/HomepageFeatures/styles.module.css similarity index 100% rename from docs-page/src/components/HomepageFeatures/styles.module.css rename to docs/src/components/HomepageFeatures/styles.module.css diff --git a/docs-page/src/css/custom.css b/docs/src/css/custom.css similarity index 100% rename from docs-page/src/css/custom.css rename to docs/src/css/custom.css diff --git a/docs-page/src/pages/index.module.css b/docs/src/pages/index.module.css similarity index 100% rename from docs-page/src/pages/index.module.css rename to docs/src/pages/index.module.css diff --git a/docs-page/src/pages/index.tsx b/docs/src/pages/index.tsx similarity index 100% rename from docs-page/src/pages/index.tsx rename to docs/src/pages/index.tsx diff --git a/docs-page/src/pages/markdown-page.md b/docs/src/pages/markdown-page.md similarity index 100% rename from docs-page/src/pages/markdown-page.md rename to docs/src/pages/markdown-page.md diff --git a/docs-page/static/.nojekyll b/docs/static/.nojekyll similarity index 100% rename from docs-page/static/.nojekyll rename to docs/static/.nojekyll diff --git a/docs-page/static/img/dspy_logo.png b/docs/static/img/dspy_logo.png similarity index 100% rename from docs-page/static/img/dspy_logo.png rename to docs/static/img/dspy_logo.png diff --git a/docs-page/static/img/logo.png b/docs/static/img/logo.png similarity index 100% rename from docs-page/static/img/logo.png rename to docs/static/img/logo.png diff --git a/docs-page/static/img/modular.png b/docs/static/img/modular.png similarity index 100% rename from docs-page/static/img/modular.png rename to docs/static/img/modular.png diff --git a/docs-page/static/img/optimize.png b/docs/static/img/optimize.png similarity index 100% rename from docs-page/static/img/optimize.png rename to docs/static/img/optimize.png diff --git a/docs-page/static/img/undraw_docusaurus_mountain.svg b/docs/static/img/undraw_docusaurus_mountain.svg similarity index 100% rename from docs-page/static/img/undraw_docusaurus_mountain.svg rename to docs/static/img/undraw_docusaurus_mountain.svg diff --git a/docs-page/static/img/undraw_docusaurus_react.svg b/docs/static/img/undraw_docusaurus_react.svg similarity index 100% rename from docs-page/static/img/undraw_docusaurus_react.svg rename to docs/static/img/undraw_docusaurus_react.svg diff --git a/docs-page/static/img/undraw_docusaurus_tree.svg b/docs/static/img/undraw_docusaurus_tree.svg similarity index 100% rename from docs-page/static/img/undraw_docusaurus_tree.svg rename to docs/static/img/undraw_docusaurus_tree.svg diff --git a/docs-page/static/img/universal_compatibility.png b/docs/static/img/universal_compatibility.png similarity index 100% rename from docs-page/static/img/universal_compatibility.png rename to docs/static/img/universal_compatibility.png diff --git a/docs/teleprompters.md b/docs/teleprompters.md deleted file mode 100644 index d60ea7f9d7..0000000000 --- a/docs/teleprompters.md +++ /dev/null @@ -1,283 +0,0 @@ -# Teleprompters Documentation - -Teleprompters are powerful optimizers (included in DSPy) that can learn to bootstrap and select effective prompts for the modules of any program. (The "tele-" in the name means "at a distance", i.e., automatic prompting at a distance.) - -This documentation provides an overview of the DSPy Teleprompters. - -## Teleprompters - -| Module | Jump To | -| --- | --- | -| LabeledFewShot | [LabeledFewShot Section](#telepromptlabeledfewshot) | -| BootstrapFewShot | [BootstrapFewShot Section](#telepromptbootstrapfewshot) | -| Ensemble | [Ensemble Section](#telepromptensemble) | -| BootstrapFewShotWithRandomSearch | [BootstrapFewShotWithRandomSearch Section](#telepromptbootstrapfewshotwithrandomsearch) | -| BootstrapFinetune | [BootstrapFinetune Section](#telepromptbootstrapfinetune) | - -## teleprompt.LabeledFewShot - -### Constructor - -The constructor initializes the `LabeledFewShot` class and sets up its attributes, particularly defining `k` number of samples to be used by the predictor. - -```python -class LabeledFewShot(Teleprompter): - def __init__(self, k=16): - self.k = k -``` - -**Parameters:** -- `k` (_int_): Number of samples to be used for each predictor. Defaults to 16. - -### Method - -#### `compile(self, student, *, trainset)` - -This method compiles the `LabeledFewShot` instance by configuring the `student` predictor. It assigns subsets of the `trainset` in each student's predictor's `demos` attribute. If the `trainset` is empty, the method returns the original `student`. - -**Parameters:** -- `student` (_Teleprompter_): Student predictor to be compiled. -- `trainset` (_list_): Training dataset for compiling with student predictor. - -**Returns:** -- The compiled `student` predictor with assigned training samples for each predictor or the original `student` if the `trainset` is empty. - -### Example - -```python -import dspy - -#Assume defined trainset -class RAG(dspy.Module): - def __init__(self, num_passages=3): - super().__init__() - - #declare retrieval and predictor modules - self.retrieve = dspy.Retrieve(k=num_passages) - self.generate_answer = dspy.ChainOfThought(GenerateAnswer) - - #flow for answering questions using predictor and retrieval modules - def forward(self, question): - context = self.retrieve(question).passages - prediction = self.generate_answer(context=context, question=question) - return dspy.Prediction(context=context, answer=prediction.answer) - -#Define teleprompter -teleprompter = LabeledFewShot() - -# Compile! -compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset) -``` - -## teleprompt.BootstrapFewShot - -### Constructor - -The constructor initializes the `BootstrapFewShot` class and sets up parameters for bootstrapping. - -```python -class BootstrapFewShot(Teleprompter): - def __init__(self, metric=None, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1): - self.metric = metric - self.teacher_settings = teacher_settings - - self.max_bootstrapped_demos = max_bootstrapped_demos - self.max_labeled_demos = max_labeled_demos - self.max_rounds = max_rounds -``` - -**Parameters:** -- `metric` (_callable_, _optional_): Metric function to evaluate examples during bootstrapping. Defaults to `None`. -- `teacher_settings` (_dict_, _optional_): Settings for teacher predictor. Defaults to empty dictionary. -- `max_bootstrapped_demos` (_int_, _optional_): Maximum number of bootstrapped demonstrations per predictor. Defaults to 4. -- `max_labeled_demos` (_int_, _optional_): Maximum number of labeled demonstrations per predictor. Defaults to 16. -- `max_rounds` (_int_, _optional_): Maximum number of bootstrapping rounds. Defaults to 1. - -### Method - -#### `compile(self, student, *, teacher=None, trainset, valset=None)` - -This method compiles the BootstrapFewShot instance by performing bootstrapping to refine the student predictor. - -This process includes preparing the student and teacher predictors, which involves creating predictor copies, verifying the student predictor is uncompiled, and compiling the teacher predictor with labeled demonstrations via LabeledFewShot if the teacher predictor hasn't been compiled. - -The next stage involves preparing predictor mappings by validating that both the student and teacher predictors have the same program structure and the same signatures but are different objects. - -The final stage is performing the bootstrapping iterations. - -**Parameters:** -- `student` (_Teleprompter_): Student predictor to be compiled. -- `teacher` (_Teleprompter_, _optional_): Teacher predictor used for bootstrapping. Defaults to `None`. -- `trainset` (_list_): Training dataset used in bootstrapping. -- `valset` (_list_, _optional_): Validation dataset used in compilation. Defaults to `None`. - -**Returns:** -- The compiled `student` predictor after bootstrapping with refined demonstrations. - -### Example - -```python -#Assume defined trainset -#Assume defined RAG class -... - -#Define teleprompter and include teacher -teacher = dspy.OpenAI(model='gpt-3.5-turbo', api_key = openai.api_key, api_provider = "openai", model_type = "chat") -teleprompter = BootstrapFewShot(teacher_settings=dict({'lm': teacher})) - -# Compile! -compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset) -``` - -## teleprompt.Ensemble - -### Constructor - -The constructor initializes the `Ensemble` class and sets up its attributes. This teleprompter is designed to create ensembled versions of multiple programs, reducing various outputs from different programs into a single output. - -```python -class Ensemble(Teleprompter): - def __init__(self, *, reduce_fn=None, size=None, deterministic=False): -``` - -**Parameters:** -- `reduce_fn` (_callable_, _optional_): Function used to reduce multiple outputs from different programs into a single output. A common choice is `dspy.majority`. Defaults to `None`. -- `size` (_int_, _optional_): Number of programs to randomly select for ensembling. If not specified, all programs will be used. Defaults to `None`. -- `deterministic` (_bool_, _optional_): Specifies whether ensemble should operate deterministically. Currently, setting this to `True` will raise an error as this feature is pending implementation. Defaults to `False`. - -### Method - -#### `compile(self, programs)` - -This method compiles an ensemble of programs into a single program that when run, can either randomly sample a subset of the given programs to produce outputs or use all of them. The multiple outputs can then be reduced into a single output using the `reduce_fn`. - -**Parameters:** -- `programs` (_list_): List of programs to be ensembled. - -**Returns:** -- `EnsembledProgram` (_Module_): An ensembled version of the input programs. - -### Example - -```python -import dspy -from dspy.teleprompt import Ensemble - -# Assume a list of programs -programs = [program1, program2, program3, ...] - -# Define Ensemble teleprompter -teleprompter = Ensemble(reduce_fn=dspy.majority, size=2) - -# Compile to get the EnsembledProgram -ensembled_program = teleprompter.compile(programs) -``` - -## teleprompt.BootstrapFewShotWithRandomSearch - -### Constructor - -The constructor initializes the `BootstrapFewShotWithRandomSearch` class and sets up its attributes. It inherits from the `BootstrapFewShot` class and introduces additional attributes for the random search process. - -```python -class BootstrapFewShotWithRandomSearch(BootstrapFewShot): - def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6): - self.metric = metric - self.teacher_settings = teacher_settings - self.max_rounds = max_rounds - - self.num_threads = num_threads - - self.min_num_samples = 1 - self.max_num_samples = max_bootstrapped_demos - self.num_candidate_sets = num_candidate_programs - self.max_num_traces = 1 + int(max_bootstrapped_demos / 2.0 * self.num_candidate_sets) - - self.max_bootstrapped_demos = self.max_num_traces - self.max_labeled_demos = max_labeled_demos - - print("Going to sample between", self.min_num_samples, "and", self.max_num_samples, "traces per predictor.") - print("Going to sample", self.max_num_traces, "traces in total.") - print("Will attempt to train", self.num_candidate_sets, "candidate sets.") -``` - -**Parameters:** -- `metric` (_callable_, _optional_): Metric function to evaluate examples during bootstrapping. Defaults to `None`. -- `teacher_settings` (_dict_, _optional_): Settings for teacher predictor. Defaults to empty dictionary. -- `max_bootstrapped_demos` (_int_, _optional_): Maximum number of bootstrapped demonstrations per predictor. Defaults to 4. -- `max_labeled_demos` (_int_, _optional_): Maximum number of labeled demonstrations per predictor. Defaults to 16. -- `max_rounds` (_int_, _optional_): Maximum number of bootstrapping rounds. Defaults to 1. -- `num_candidate_programs` (_int_): Number of candidate programs to generate during random search. -- `num_threads` (_int_): Number of threads used for evaluation during random search. - -### Method - -Refer to [teleprompt.BootstrapFewShot](#telepromptbootstrapfewshot) documentation. - -## Example - -```python -#Assume defined trainset -#Assume defined RAG class -... - -#Define teleprompter and include teacher -teacher = dspy.OpenAI(model='gpt-3.5-turbo', api_key = openai.api_key, api_provider = "openai", model_type = "chat") -teleprompter = BootstrapFewShotWithRandomSearch(teacher_settings=dict({'lm': teacher})) - -# Compile! -compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset) -``` - -## teleprompt.BootstrapFinetune - -### Constructor - -### `__init__(self, metric=None, teacher_settings={}, multitask=True)` - -The constructor initializes a `BootstrapFinetune` instance and sets up its attributes. It defines the teleprompter as a `BootstrapFewShot` instance for the finetuning compilation. - -```python -class BootstrapFinetune(Teleprompter): - def __init__(self, metric=None, teacher_settings={}, multitask=True): -``` - -**Parameters:** -- `metric` (_callable_, _optional_): Metric function to evaluate examples during bootstrapping. Defaults to `None`. -- `teacher_settings` (_dict_, _optional_): Settings for teacher predictor. Defaults to empty dictionary. -- `multitask` (_bool_, _optional_): Enable multitask fine-tuning. Defaults to `True`. - -### Method - -#### `compile(self, student, *, teacher=None, trainset, valset=None, target='t5-large', bsize=12, accumsteps=1, lr=5e-5, epochs=1, bf16=False)` - -This method first compiles for bootstrapping with the `BootstrapFewShot` teleprompter. It then prepares fine-tuning data by generating prompt-completion pairs for training and performs finetuning. After compilation, the LMs are set to the finetuned models and the method returns a compiled and fine-tuned predictor. - -**Parameters:** -- `student` (_Predict_): Student predictor to be fine-tuned. -- `teacher` (_Predict_, _optional_): Teacher predictor to help with fine-tuning. Defaults to `None`. -- `trainset` (_list_): Training dataset for fine-tuning. -- `valset` (_list_, _optional_): Validation dataset for fine-tuning. Defaults to `None`. -- `target` (_str_, _optional_): Target model for fine-tuning. Defaults to `'t5-large'`. -- `bsize` (_int_, _optional_): Batch size for training. Defaults to `12`. -- `accumsteps` (_int_, _optional_): Gradient accumulation steps. Defaults to `1`. -- `lr` (_float_, _optional_): Learning rate for fine-tuning. Defaults to `5e-5`. -- `epochs` (_int_, _optional_): Number of training epochs. Defaults to `1`. -- `bf16` (_bool_, _optional_): Enable mixed-precision training with BF16. Defaults to `False`. - -**Returns:** -- `compiled2` (_Predict_): A compiled and fine-tuned `Predict` instance. - -### Example - -```python -#Assume defined trainset -#Assume defined RAG class -... - -#Define teleprompter -teleprompter = BootstrapFinetune(teacher_settings=dict({'lm': teacher})) - -# Compile! -compiled_rag = teleprompter.compile(student=RAG(), trainset=trainset, target='google/flan-t5-base') -``` \ No newline at end of file diff --git a/docs-page/tsconfig.json b/docs/tsconfig.json similarity index 100% rename from docs-page/tsconfig.json rename to docs/tsconfig.json diff --git a/docs/using_local_models.md b/docs/using_local_models.md deleted file mode 100644 index 37c3391ab8..0000000000 --- a/docs/using_local_models.md +++ /dev/null @@ -1,198 +0,0 @@ -# Using local models within DSPy - -DSPy supports various methods including `built-in wrappers`, `server integration`, and `external package integration` for model loading. This documentation provides a concise introduction on how to load in models within DSPy extending these capabilities for your specific needs. - -## Local Model Loaders - -| Loaders | Jump To | -| --- | --- | -| HFModel | [HFModel Section](#hfmodel) | -| Cohere | [Cohere Section](#cohere) | -| TGI | [TGI Section](#tgi) | -| VLLM | [VLLM Section](#vllm) | -| MLC LLM | [MLC LLM Section](#mlc-llm) | -| Ollama | [Ollama Section](#ollama) | - - -# HFModel - -Initialize `HFModel` within your program with the desired model to load in. Here's an example call: - - ```python - llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') - ``` - -# Text-Generation-Inference Server - -## Prerequisites - -- Docker must be installed on your system. If you don't have Docker installed, you can get it from [here](https://docs.docker.com/get-docker/). - -## Setting up the Text-Generation-Inference Server - -1. Clone the Text-Generation-Inference repository from GitHub by executing the following command: - - ``` - git clone https://github.com/huggingface/text-generation-inference.git - ``` - -2. Change into the cloned repository directory: - - ``` - cd text-generation-inference - ``` - -3. Execute the Docker command under the "Get Started" section to run the server: - - ``` - model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. - num_shard=2 # set to the number of shards you wish to use. - volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - - docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9 --model-id $model --num-shard $num_shard - ``` - - This command will start the server and make it accessible at `http://localhost:8080`. - -If you want to connect to [Meta Llama 2 models](https://huggingface.co/meta-llama), make sure to use version 9.3 (or higher) of the docker image (ghcr.io/huggingface/text-generation-inference:0.9.3) and pass in your huggingface token as an environment variable. - - docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model --num-shard $num_shard - -## Sending requests to the server - -After setting up the text-generation-inference server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientTGI`. - -Initialize the `HFClientTGI` within your program with the desired parameters. Here is an example call: - - ```python - lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") - ``` - - Customize the `model`, `port`, and `url` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. - - -### FAQs - -1. If your model doesn't require any shards, you still need to set a value for `num_shard`, but you don't need to include the parameter `--num-shard` on the command line. - -2. If your model runs into any "token exceeded" issues, you can set the following parameters on the command line to adjust the input length and token limit: - - `--max-input-length`: Set the maximum allowed input length for the text. - - `--max-total-tokens`: Set the maximum total tokens allowed for text generation. - -Please refer to the [official Text-Generation-Inference repository](https://github.com/huggingface/text-generation-inference) for more detailed information and documentation. - - -# vLLM Server - -## Setting up the vLLM Server - -Follow these steps to set up the vLLM Server: - -1. Build the server from source by following the instructions provided in the [Build from Source guide](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source). - -2. Start the server by running the following command, and specify your desired model, host, and port using the appropriate arguments. The default server address is http://localhost:8000. - - Example command: - ``` - python -m vllm.entrypoints.openai.api_server --model mosaicml/mpt-7b --port 8000 - ``` - -This will launch the vLLM server. - -## Sending requests to the vLLM server - -After setting up the vLLM server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientVLLM`. - -Initialize the `HFClientVLLM` within your program with the desired parameters. Here is an example call: - - ```python - lm = dspy.HFClientVLLM(model="mosaicml/mpt-7b", port=8000, url="http://localhost") - ``` - - Customize the `model`, `port`, `url`, and `max_tokens` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. - -Please refer to the [official vLLM repository](https://github.com/vllm-project/vllm) for more detailed information and documentation. - -# MLC LLM - -## Prerequisites - -1. Install the required packages using the following commands: - - ```shell - pip install --no-deps --pre --force-reinstall mlc-ai-nightly-cu118 mlc-chat-nightly-cu118 -f https://mlc.ai/wheels - pip install transformers - git lfs install - ``` - - Adjust the pip wheels according to your OS/platform by referring to the provided commands in [MLC packages](https://mlc.ai/package/). - -## Running MLC Llama-2 models - -1. Create a directory for prebuilt models: - - ```shell - mkdir -p dist/prebuilt - ``` - -2. Clone the necessary libraries from the repository: - - ```shell - git clone https://github.com/mlc-ai/binary-mlc-llm-libs.git dist/prebuilt/lib - cd dist/prebuilt - ``` - -3. Choose a Llama-2 model from [MLC LLMs](https://huggingface.co/mlc-ai) and clone the model repository: - - ```shell - git clone https://huggingface.co/mlc-ai/mlc-chat-Llama-2-7b-chat-hf-q4f16_1 - ``` - -4. Initialize the `ChatModuleClient` within your program with the desired parameters. Here's an example call: - - ```python - llama = dspy.ChatModuleClient(model='dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1', model_path='dist/prebuilt/lib/Llama-2-7b-chat-hf-q4f16_1-cuda.so') - ``` -Please refer to the [official MLC repository](https://github.com/mlc-ai/mlc-llm) for more detailed information and [documentation](https://mlc.ai/mlc-llm/docs/get_started/try_out.html). - -# Ollama - -Ollama is a good software tool that allows you to run LLMs locally, such as Mistral, Llama2, and Phi. -The following are the instructions to install and run Ollama. - -## Prerequisites - -Install Ollama by following the instructions from this page: - -- https://ollama.ai - -Download model: `ollama pull` - -Download a model by running the `ollama pull` command. You can download Mistral, Llama2, and Phi. - -```bash -# download mistral -ollama pull mistral -``` - -Here is the list of other models you can download: -- https://ollama.ai/library - -## Running Ollama model - -Run model: `ollama run` - -You can test a model by running the model with the `ollama run` command. - -```bash -# run mistral -ollama run mistral -``` - -## Sending requests to the server - -Here is the code to load a model through Ollama: - -```python -lm = dspy.OllamaLocal(model='mistral') -``` \ No newline at end of file diff --git a/examples/longformqa/longformqa_assertions.ipynb b/examples/longformqa/longformqa_assertions.ipynb index d3059b1cbc..ae40d72bd0 100644 --- a/examples/longformqa/longformqa_assertions.ipynb +++ b/examples/longformqa/longformqa_assertions.ipynb @@ -6,6 +6,7 @@ "source": [ "\"DSPy7\n", "\n", + "\n", "## **DSPy Assertions**: Asserting Computational Constraints on Foundation \n", "\n", "### **LongFormQA**: Generating long-form length responses to answer questions" diff --git a/examples/quiz/quiz_assertions.ipynb b/examples/quiz/quiz_assertions.ipynb index 8cdbdbcc9d..6fa21df532 100644 --- a/examples/quiz/quiz_assertions.ipynb +++ b/examples/quiz/quiz_assertions.ipynb @@ -18,10 +18,10 @@ "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/quiz/quiz_assertions.ipynb)\n", "\n", "\n", - "This notebook highlights an example of [**DSPy Assertions**](../../docs/assertions.md), allowing for declaration of computational constraints within DSPy programs. \n", + "This notebook highlights an example of [**DSPy Assertions**](https://dspy-docs.vercel.app/docs/building-blocks/assertions), allowing for declaration of computational constraints within DSPy programs. \n", "\n", "\n", - "This notebook builds upon the foundational concepts of the **DSPy** framework. Prerequisites of following this notebook is having gone through the [DSPy tutorial](../../intro.ipynb), the [**DSPy Assertions documentation**](../../docs/assertions.md) and the introductory DSPy Assertions [tutorial on LongFormQA](../longformqa/longformqa_assertions.ipynb).\n" + "This notebook builds upon the foundational concepts of the **DSPy** framework. Prerequisites of following this notebook is having gone through the [DSPy tutorial](../../intro.ipynb), the [**DSPy Assertions documentation**](https://dspy-docs.vercel.app/docs/building-blocks/assertions) and the introductory DSPy Assertions [tutorial on LongFormQA](../longformqa/longformqa_assertions.ipynb).\n" ] }, { diff --git a/examples/tweets/tweets_assertions.ipynb b/examples/tweets/tweets_assertions.ipynb index 22c2db296a..dc699dc135 100644 --- a/examples/tweets/tweets_assertions.ipynb +++ b/examples/tweets/tweets_assertions.ipynb @@ -18,10 +18,10 @@ "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/tweets/tweets_assertions.ipynb)\n", "\n", "\n", - "This notebook highlights an example of [**DSPy Assertions**](../../docs/assertions.md), allowing for declaration of computational constraints within DSPy programs. \n", + "This notebook highlights an example of [**DSPy Assertions**](https://dspy-docs.vercel.app/docs/building-blocks/assertions), allowing for declaration of computational constraints within DSPy programs. \n", "\n", "\n", - "This notebook builds upon the foundational concepts of the **DSPy** framework. Prerequisites of following this notebook is having gone through the [DSPy tutorial](../../intro.ipynb), the [**DSPy Assertions documentation**](../../docs/assertions.md) and the introductory DSPy Assertions [tutorial on LongFormQA](../longformqa/longformqa_assertions.ipynb).\n" + "This notebook builds upon the foundational concepts of the **DSPy** framework. Prerequisites of following this notebook is having gone through the [DSPy tutorial](../../intro.ipynb), the [**DSPy Assertions documentation**](https://dspy-docs.vercel.app/docs/building-blocks/assertions) and the introductory DSPy Assertions [tutorial on LongFormQA](../longformqa/longformqa_assertions.ipynb).\n" ] }, { From a81f9a32a79f96f44c18516e793396f4dd28177a Mon Sep 17 00:00:00 2001 From: ragul-kachiappan Date: Thu, 29 Feb 2024 14:15:28 +0530 Subject: [PATCH 006/243] fix: use generic embedding function as default in ChromadbRM --- dspy/retrieve/chromadb_rm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index b55fec8fff..e41be84277 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -70,11 +70,11 @@ def __init__( persist_directory: str, embedding_function: Optional[ EmbeddingFunction[Embeddable] - ] = None, + ] = ef.DefaultEmbeddingFunction(), k: int = 7, ): self._init_chromadb(collection_name, persist_directory) - self.ef = embedding_function or self._chromadb_collection._embedding_function + self.ef = embedding_function super().__init__(k=k) From e62f7fabcb63368f610645e248beca36c8081360 Mon Sep 17 00:00:00 2001 From: Jean Nshuti Date: Thu, 29 Feb 2024 10:58:53 +0200 Subject: [PATCH 007/243] Update language_models.ipynb Colab link The Colab button is being redirected to signatures.ipynb instead of language_models.ipynb --- docs/guides/language_models.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/language_models.ipynb b/docs/guides/language_models.ipynb index 8313bf9143..be7f373180 100644 --- a/docs/guides/language_models.ipynb +++ b/docs/guides/language_models.ipynb @@ -19,7 +19,7 @@ "\n", "## Guide: **Language Models**\n", "\n", - "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/signatures.ipynb)" + "[](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/docs/guides/language_models.ipynb)" ] }, { From 58c4f366667d0b910b4ecf5b4a2d3cd3106dd5d9 Mon Sep 17 00:00:00 2001 From: drawal1 <114010652+drawal1@users.noreply.github.com> Date: Thu, 29 Feb 2024 11:33:29 -0600 Subject: [PATCH 008/243] Update template_v2.py Fixed all code that could cause issue #428 - key error with the key 'augmented' --- dsp/templates/template_v2.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/dsp/templates/template_v2.py b/dsp/templates/template_v2.py index f61085dd05..656f490748 100644 --- a/dsp/templates/template_v2.py +++ b/dsp/templates/template_v2.py @@ -94,7 +94,7 @@ def query(self, example: Example, is_demo: bool = False) -> str: def format_handler(x): assert type(x) == str, f"Need format_handler for {field.input_variable} of type {type(x)}" return " ".join(x.split()) - + formatted_value = format_handler(example[field.input_variable]) separator = '\n' if field.separator == ' ' and '\n' in formatted_value else field.separator @@ -102,7 +102,7 @@ def format_handler(x): f"{field.name}{separator}{formatted_value}" ) - if self._has_augmented_guidelines() and ("augmented" in example and example.augmented): + if self._has_augmented_guidelines() and (example.get('augmented', False)): return "\n\n".join([r for r in result if r]) return "\n".join([r for r in result if r]) @@ -207,7 +207,7 @@ def __call__(self, example, show_guidelines=True) -> str: self.query(demo, is_demo=True) for demo in example.demos if ( - ("augmented" not in demo or not demo.augmented) + (not demo.get('augmented', False)) and ( # validate that the training example has the same primitive input var as the template self.fields[-1].input_variable in demo and demo[self.fields[-1].input_variable] is not None @@ -218,7 +218,7 @@ def __call__(self, example, show_guidelines=True) -> str: ademos = [ self.query(demo, is_demo=True) for demo in example.demos - if "augmented" in demo and demo.augmented + if demo.get('augmented', False) ] # Move the rdemos to ademos if rdemo has all the fields filled in @@ -238,7 +238,7 @@ def __call__(self, example, show_guidelines=True) -> str: ademos.append(rdemo) else: rdemos_.append(rdemo) - + ademos = new_ademos + ademos rdemos = rdemos_ @@ -254,12 +254,12 @@ def __call__(self, example, show_guidelines=True) -> str: if len(query.split('\n')) > len(self.fields): long_query = True - if "augmented" not in example or not example.augmented: + if not example.get('augmented', False): example["augmented"] = True query = self.query(example) rdemos = "\n\n".join(rdemos) - + if len(rdemos) >= 1 and len(ademos) == 0 and not long_query: rdemos_and_query = "\n\n".join([rdemos, query]) parts = [ From c430c161b289a097407c50652b373fd25be1b39e Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 29 Feb 2024 23:47:17 +0530 Subject: [PATCH 009/243] update synthesis logic --- dspy/datasets/synthesizer.py | 132 ++++++++++++++++++++++++----------- 1 file changed, 91 insertions(+), 41 deletions(-) diff --git a/dspy/datasets/synthesizer.py b/dspy/datasets/synthesizer.py index 65e3be5a9f..0ba9392a82 100644 --- a/dspy/datasets/synthesizer.py +++ b/dspy/datasets/synthesizer.py @@ -1,29 +1,29 @@ import dspy from typing import List +from datasets import DatasetDict -class ExplainTask(dspy.Signature): - """Imagine you're a detective in a game where your mission is to unlock the mystery of a hidden treasure. The treasure map, in this case, is the task description. Your first step is to study the map carefully to understand where the treasure is hidden and how to get there. This means figuring out what the task is all about—like decoding clues. Once you've got a good grasp, your job is to explain your plan to your team in a way that's super easy to understand, as if you're telling a friend how to find the treasure without using the map. You won't be using the clues directly in your explanation but rather your understanding of them to guide your team clearly and simply to the treasure.""" +def format_examples(examples: List[dspy.Example]): + if isinstance(examples, str): + return examples - @staticmethod - def format_examples(examples: List[dspy.Example]): - if isinstance(examples, str): - return examples + formatted_example = "" - formatted_example = "" + for example in examples: + input_keys = example.inputs().keys() + label_keys = example.labels().keys() - for example in examples: - input_keys = example.inputs().keys() - label_keys = example.labels().keys() + formatted_example += f"Input:\n" + for key in input_keys: + formatted_example += f"{key}: {example[key]}\n" - formatted_example += f"Input:\n" - for key in input_keys: - formatted_example += f"{key}: {example[key]}\n" + formatted_example += f"Output:\n" + for key in label_keys: + formatted_example += f"{key}: {example[key]}\n" - formatted_example += f"Output:\n" - for key in label_keys: - formatted_example += f"{key}: {example[key]}\n" + return formatted_example - return formatted_example +class ExplainTask(dspy.Signature): + """Imagine you're a detective in a game where your mission is to unlock the mystery of a hidden treasure. The treasure map, in this case, is the task description. Your first step is to study the map carefully to understand where the treasure is hidden and how to get there. This means figuring out what the task is all about—like decoding clues. Once you've got a good grasp, your job is to explain your plan to your team in a way that's super easy to understand, as if you're telling a friend how to find the treasure without using the map. You won't be using the clues directly in your explanation but rather your understanding of them to guide your team clearly and simply to the treasure.""" examples = dspy.InputField( prefix="Few Shot Examples:-", @@ -53,7 +53,11 @@ class GenerateFieldDescription(dspy.Signature): class GenerateInputFieldsData(dspy.Signature): """You are an expert data generator with 30 years of experience in generating synthetic data. We want you to put these skills at work, I'll be providing you with some input fields that are columns of the csv file and the explanation of the task thse fields would be an input to. Your task is to generate synthetic for these fields.""" - pass + + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task the field is an input to.", + ) class GenerateOutputFieldsData(dspy.Signature): pass @@ -66,40 +70,86 @@ def __init__(self): self.generate_input_data = GenerateInputFieldsData() self.generate_output_data = GenerateOutputFieldsData() - def _prepare_synthetic_data_signature(self, signature: dspy.Signature): - signature + def _prepare_synthetic_data_signatures(self, input_keys: List[str], output_keys: List[str], task_description: str): + for key in input_keys: + field_details = self.generate_field_description( + task_description=task_description, + field_name=key, + ) - def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Example]: - input_keys = examples[0].keys() + field_name = field_details.field_name + field_description = field_details.field_description + + output_field = dspy.OutputField( + prefix=f"{field_name}:", + desc=field_description, + ) + setattr(self.generate_input_data, field_name, output_field) + + input_field = dspy.InputField( + prefix=f"{field_name}:", + desc=field_description, + ) + setattr(self.generate_output_data, field_name, input_field) + + for key in output_keys: + field_details = self.generate_field_description( + task_description=task_description, + field_name=key, + ) + + field_name = field_details.field_name + field_description = field_details.field_description + + output_field = dspy.OutputField( + prefix=f"{field_name}:", + desc=field_description, + ) + setattr(self.generate_output_data, field_name, output_field) + + return dspy.Predict(self.generate_input_data), dspy.Predict(self.generate_output_data) + def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Example]: task_description = self.explain_task(examples=examples) - self.generate_output_data.__doc__ = task_description + self.generate_output_data.__doc__ = task_description.explanation + + input_keys = [key for key in examples[0].inputs()] + output_keys = [key for key in examples[0].labels()] - self._prepare_synthetic_data_signature() + self.input_predictor, self.output_predictor = self._prepare_synthetic_data_signatures( + input_keys=input_keys, + output_keys=output_keys, + task_description=task_description, + ) data = [] for _ in range(num_data): - synthetic_data = {field: self.generate_synthetic_data() for field in fields} - data.append(synthetic_data) - + inputs = self.input_predictor(task_description=task_description) - def export(self): - pass + input_kwargs = { + key: inputs[key] + for key in input_keys + } - def _to_csv(self): - pass + outputs = self.output_predictor(**input_kwargs) - def _to_jsonl(self): - pass + output_kwargs = { + key: outputs[key] + for key in output_keys + } - def _to_pickle(self): - pass + data.append(dspy.Example(**input_kwargs, **output_kwargs).with_inputs(*input_keys)) - def _to_sql(self): - pass + return data + - def _to_parquet(self): - pass + def export(self, data: List[dspy.Example], path: str, mode: str = None): + extention = mode or path.split(".")[-1] + dataset_dict = DatasetDict( + [example.toDict() for example in data] + ) - def _to_arrow(self): - pass \ No newline at end of file + dataset_dict.save_to_disk( + path=path, + extention=extention, + ) \ No newline at end of file From c584be89ae907debfa2282a8f22f65cb4e88fc17 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 29 Feb 2024 23:48:46 +0530 Subject: [PATCH 010/243] update input key updation logic in dataloaders --- dspy/datasets/dataloader.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dspy/datasets/dataloader.py b/dspy/datasets/dataloader.py index b4593bd8af..2ac6358c4e 100644 --- a/dspy/datasets/dataloader.py +++ b/dspy/datasets/dataloader.py @@ -32,16 +32,16 @@ def from_huggingface( returned_split = {} for split_name in dataset.keys(): if fields: - returned_split[split_name] = [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset[split_name]] + returned_split[split_name] = [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset[split_name]] else: - returned_split[split_name] = [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(input_keys) for row in dataset[split_name]] + returned_split[split_name] = [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(*input_keys) for row in dataset[split_name]] return returned_split except AttributeError: if fields: - return [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset] else: - return [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(*input_keys) for row in dataset] def from_csv(self, file_path:str, fields: List[str] = None, input_keys: Tuple[str] = ()) -> List[dspy.Example]: dataset = load_dataset("csv", data_files=file_path)["train"] @@ -49,7 +49,7 @@ def from_csv(self, file_path:str, fields: List[str] = None, input_keys: Tuple[st if not fields: fields = list(dataset.features) - return [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset] def from_json(self, file_path:str, fields: List[str] = None, input_keys: Tuple[str] = ()) -> List[dspy.Example]: dataset = load_dataset("json", data_files=file_path)["train"] @@ -57,7 +57,7 @@ def from_json(self, file_path:str, fields: List[str] = None, input_keys: Tuple[s if not fields: fields = list(dataset.features) - return [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset] def sample( From 449fbcc4b3210c034beccfcd0942230e066ee032 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 29 Feb 2024 23:48:46 +0530 Subject: [PATCH 011/243] update input key updation logic in dataloaders --- dspy/datasets/dataloader.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dspy/datasets/dataloader.py b/dspy/datasets/dataloader.py index b4593bd8af..2ac6358c4e 100644 --- a/dspy/datasets/dataloader.py +++ b/dspy/datasets/dataloader.py @@ -32,16 +32,16 @@ def from_huggingface( returned_split = {} for split_name in dataset.keys(): if fields: - returned_split[split_name] = [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset[split_name]] + returned_split[split_name] = [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset[split_name]] else: - returned_split[split_name] = [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(input_keys) for row in dataset[split_name]] + returned_split[split_name] = [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(*input_keys) for row in dataset[split_name]] return returned_split except AttributeError: if fields: - return [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset] else: - return [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in row.keys()}).with_inputs(*input_keys) for row in dataset] def from_csv(self, file_path:str, fields: List[str] = None, input_keys: Tuple[str] = ()) -> List[dspy.Example]: dataset = load_dataset("csv", data_files=file_path)["train"] @@ -49,7 +49,7 @@ def from_csv(self, file_path:str, fields: List[str] = None, input_keys: Tuple[st if not fields: fields = list(dataset.features) - return [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset] def from_json(self, file_path:str, fields: List[str] = None, input_keys: Tuple[str] = ()) -> List[dspy.Example]: dataset = load_dataset("json", data_files=file_path)["train"] @@ -57,7 +57,7 @@ def from_json(self, file_path:str, fields: List[str] = None, input_keys: Tuple[s if not fields: fields = list(dataset.features) - return [dspy.Example({field:row[field] for field in fields}).with_inputs(input_keys) for row in dataset] + return [dspy.Example({field:row[field] for field in fields}).with_inputs(*input_keys) for row in dataset] def sample( From 81a83cc06436193059b5fbd8123415c33c3c9643 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 28 Feb 2024 15:25:26 -0800 Subject: [PATCH 012/243] Fixed error with generics, Fixed tests for python 3.9 --- dspy/functional/functional.py | 4 +-- dspy/signatures/signature.py | 23 ++++++++---- tests/functional/test_functional.py | 54 +++++++++++++++++++++++------ 3 files changed, 61 insertions(+), 20 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 6fe3423330..c1dcec9a7b 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -103,9 +103,7 @@ def _prepare_signature(self): else: # Anything else we wrap in a pydantic object unwrap = lambda x: x - if not inspect.isclass(type_) or not issubclass( - type_, pydantic.BaseModel - ): + if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): type_ = pydantic.create_model( "Output", value=(type_, ...), __base__=pydantic.BaseModel ) diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index c0a56070c0..ac9a334ce8 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -1,4 +1,5 @@ from copy import deepcopy +import typing import dsp from pydantic import BaseModel, Field, create_model from pydantic.fields import FieldInfo @@ -111,7 +112,7 @@ def insert(cls, index: int, name: str, field, type_: Type = None): # It's posisble to set the type as annotation=type in pydantic.Field(...) # But this may be annoying for users, so we allow them to pass the type if type_ is None: - type_ = field.annotation + type_ = field.annotation if type_ is None: type_ = str @@ -170,26 +171,34 @@ def __call__( fields = cls._parse_signature(signature) else: fields = signature - + # Validate the fields, this is important because we sometimes forget the # slightly unintuitive syntax with tuples of (type, Field) fixed_fields = {} for name, type_field in fields.items(): - assert isinstance(name, str), f"Field names must be strings, not {type(name)}" + assert isinstance( + name, str + ), f"Field names must be strings, not {type(name)}" if isinstance(type_field, FieldInfo): type_ = type_field.annotation field = type_field else: - assert isinstance(type_field, tuple), f"Field values must be tuples, not {type(type_field)}" + assert isinstance( + type_field, tuple + ), f"Field values must be tuples, not {type(type_field)}" type_, field = type_field # It might be better to be explicit about the type, but it currently would break # program of thought and teleprompters, so we just silently default to string. if type_ is None: type_ = str - assert isinstance(type_, type), f"Field types must be types, not {type(type_)}" - assert isinstance(field, FieldInfo), f"Field values must be Field instances, not {type(field)}" + assert isinstance(type_, type) or isinstance( + typing.get_origin(type_), type + ), f"Field types must be types, not {type(type_)}" + assert isinstance( + field, FieldInfo + ), f"Field values must be Field instances, not {type(field)}" fixed_fields[name] = (type_, field) - + # Fixing the fields shouldn't change the order assert list(fixed_fields.keys()) == list(fields.keys()) diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 4d77c7e265..6cb005812b 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -1,10 +1,10 @@ import datetime -import json import textwrap import pydantic from pydantic import Field, BaseModel, field_validator from typing import Annotated import warnings +from typing import List import pytest @@ -30,6 +30,40 @@ def hard_question(topic: str) -> str: assert question == expected +def test_list_output(): + @predictor + def hard_question(topic: str) -> List[str]: + """Think of a hard factual question about a topic.""" + + expected = ["What is the speed of light?", "What is the speed of sound?"] + lm = DummyLM( + ['{"value": ["What is the speed of light?", "What is the speed of sound?"]}'] + ) + dspy.settings.configure(lm=lm) + + question = hard_question(topic="Physics") + lm.inspect_history(n=2) + + assert question == expected + + +def test_list_output(): + @predictor + def hard_question(topic: str) -> List[str]: + """Think of a hard factual question about a topic.""" + + expected = ["What is the speed of light?", "What is the speed of sound?"] + lm = DummyLM( + ['{"value": ["What is the speed of light?", "What is the speed of sound?"]}'] + ) + dspy.settings.configure(lm=lm) + + question = hard_question(topic="Physics") + lm.inspect_history(n=2) + + assert question == expected + + def test_simple_type(): class Question(pydantic.BaseModel): value: str @@ -72,7 +106,7 @@ def test_simple_class(): class Answer(pydantic.BaseModel): value: float certainty: float - comments: list[str] = pydantic.Field( + comments: List[str] = pydantic.Field( description="At least two comments about the answer" ) @@ -173,7 +207,10 @@ def answer(self, question: str) -> str: named_predictors = list(qa.named_predictors()) assert len(named_predictors) == 2 names, _ = zip(*qa.named_predictors()) - assert set(names) == {"hard_question.predictor.predictor", "answer.predictor.predictor"} + assert set(names) == { + "hard_question.predictor.predictor", + "answer.predictor.predictor", + } def test_bootstrap_effectiveness(): @@ -204,18 +241,15 @@ def simple_metric(example, prediction, trace=None): # This test verifies if the bootstrapping process improves the student's predictions student = SimpleModule() teacher = SimpleModule() - assert student.output.predictor.signature.equals( - teacher.output.predictor.signature) + assert student.output.predictor.signature.equals(teacher.output.predictor.signature) - lm = DummyLM(["blue", "Ring-ding-ding-ding-dingeringeding!"], - follow_examples=True) + lm = DummyLM(["blue", "Ring-ding-ding-ding-dingeringeding!"], follow_examples=True) dspy.settings.configure(lm=lm, trace=[]) bootstrap = BootstrapFewShot( metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 ) - compiled_student = bootstrap.compile( - student, teacher=teacher, trainset=trainset) + compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) lm.inspect_history(n=2) @@ -388,7 +422,7 @@ def get_user_details() -> UserDetails: ] * 10 ) - dspy.settings.configure(lm=lm) + dspy.settings.configure(lm=lm) with pytest.raises(ValueError): get_user_details() From ed10b7408515470af44bbb63dd2951f8d67ba037 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Thu, 29 Feb 2024 15:27:00 -0500 Subject: [PATCH 013/243] [DX] Increase default timeout on Ollama from 15s to 120s --- dsp/modules/ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/modules/ollama.py b/dsp/modules/ollama.py index 31bb7e10fb..faa2c41005 100644 --- a/dsp/modules/ollama.py +++ b/dsp/modules/ollama.py @@ -33,7 +33,7 @@ def __init__( model: str = "llama2", model_type: Literal["chat", "text"] = "text", base_url: str = "http://localhost:11434", - timeout_s: float = 15, + timeout_s: float = 120, temperature: float = 0.0, max_tokens: int = 150, top_p: int = 1, From c323c8d01da57d71c53727ce5e3df5b5caac05d6 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Fri, 1 Mar 2024 03:00:33 +0530 Subject: [PATCH 014/243] finalize flow and signature --- dspy/datasets/synthesizer.py | 70 +++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 29 deletions(-) diff --git a/dspy/datasets/synthesizer.py b/dspy/datasets/synthesizer.py index 0ba9392a82..cc5b34e95e 100644 --- a/dspy/datasets/synthesizer.py +++ b/dspy/datasets/synthesizer.py @@ -1,5 +1,6 @@ import dspy from typing import List +from tqdm import tqdm, trange from datasets import DatasetDict def format_examples(examples: List[dspy.Example]): @@ -12,31 +13,31 @@ def format_examples(examples: List[dspy.Example]): input_keys = example.inputs().keys() label_keys = example.labels().keys() - formatted_example += f"Input:\n" + formatted_example += f"Inputs:\n" for key in input_keys: formatted_example += f"{key}: {example[key]}\n" - formatted_example += f"Output:\n" + formatted_example += f"Outputs:\n" for key in label_keys: formatted_example += f"{key}: {example[key]}\n" return formatted_example class ExplainTask(dspy.Signature): - """Imagine you're a detective in a game where your mission is to unlock the mystery of a hidden treasure. The treasure map, in this case, is the task description. Your first step is to study the map carefully to understand where the treasure is hidden and how to get there. This means figuring out what the task is all about—like decoding clues. Once you've got a good grasp, your job is to explain your plan to your team in a way that's super easy to understand, as if you're telling a friend how to find the treasure without using the map. You won't be using the clues directly in your explanation but rather your understanding of them to guide your team clearly and simply to the treasure.""" + """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" examples = dspy.InputField( - prefix="Few Shot Examples:-", - desc="List of examples to analyze and explain the task.", + prefix="Examples Datapoints:-", + desc="List of datapoints to analyze and explain the task.", format=format_examples, ) explanation = dspy.OutputField( - prefix="Explanation:", + prefix="Task Description:", desc="Explanation of the task.", ) class GenerateFieldDescription(dspy.Signature): - """I'll be providing you with the name of the field and the task description. Your task is to generate a description for the field. The description should be such that it is easy to understand and gives a clear idea of what the field is about.""" + """Generate a concise and informative description for a given field based on the provided name and task description. This description should be no longer than 10 words and should be in simple english.""" task_description = dspy.InputField( prefix="Task Description:", @@ -52,8 +53,6 @@ class GenerateFieldDescription(dspy.Signature): ) class GenerateInputFieldsData(dspy.Signature): - """You are an expert data generator with 30 years of experience in generating synthetic data. We want you to put these skills at work, I'll be providing you with some input fields that are columns of the csv file and the explanation of the task thse fields would be an input to. Your task is to generate synthetic for these fields.""" - task_description = dspy.InputField( prefix="Task Description:", desc="Description of the task the field is an input to.", @@ -64,77 +63,90 @@ class GenerateOutputFieldsData(dspy.Signature): class Synthesizer: def __init__(self): - self.explain_task = ExplainTask() - self.generate_field_description = GenerateFieldDescription() + self.explain_task = dspy.Predict(ExplainTask) + self.generate_field_description = dspy.Predict(GenerateFieldDescription) - self.generate_input_data = GenerateInputFieldsData() - self.generate_output_data = GenerateOutputFieldsData() + self.generate_input_data = GenerateInputFieldsData + self.generate_output_data = GenerateOutputFieldsData - def _prepare_synthetic_data_signatures(self, input_keys: List[str], output_keys: List[str], task_description: str): - for key in input_keys: + def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: List[str], task_description: str): + for key in tqdm(input_keys, desc="Preparing Input Fields"): field_details = self.generate_field_description( task_description=task_description, field_name=key, ) - field_name = field_details.field_name + field_name = key field_description = field_details.field_description output_field = dspy.OutputField( prefix=f"{field_name}:", desc=field_description, ) - setattr(self.generate_input_data, field_name, output_field) + self.generate_input_data = self.generate_input_data.insert( + -1, + field_name, + output_field + ) input_field = dspy.InputField( prefix=f"{field_name}:", desc=field_description, ) - setattr(self.generate_output_data, field_name, input_field) + self.generate_output_data = self.generate_output_data.insert( + -1, + field_name, + input_field + ) - for key in output_keys: + for key in tqdm(output_keys, desc="Preparing Output Fields"): field_details = self.generate_field_description( task_description=task_description, field_name=key, ) - field_name = field_details.field_name + field_name = key field_description = field_details.field_description output_field = dspy.OutputField( prefix=f"{field_name}:", desc=field_description, ) - setattr(self.generate_output_data, field_name, output_field) + self.generate_output_data = self.generate_output_data.insert( + -1, + field_name, + output_field + ) return dspy.Predict(self.generate_input_data), dspy.Predict(self.generate_output_data) def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Example]: - task_description = self.explain_task(examples=examples) - self.generate_output_data.__doc__ = task_description.explanation + task_description = self.explain_task(examples=examples).explanation + self.generate_output_data.__doc__ = task_description input_keys = [key for key in examples[0].inputs()] output_keys = [key for key in examples[0].labels()] - self.input_predictor, self.output_predictor = self._prepare_synthetic_data_signatures( + self.input_predictor, self.output_predictor = self._prepare_synthetic_data_predictors( input_keys=input_keys, output_keys=output_keys, task_description=task_description, ) data = [] - for _ in range(num_data): - inputs = self.input_predictor(task_description=task_description) + for idx in trange(num_data, desc="Generating Synthetic Data"): + inputs = self.input_predictor(task_description=task_description, config=dict(temperature=0.7+0.01*idx)) + input_kwargs = { - key: inputs[key] + key: getattr(inputs, key) for key in input_keys } - outputs = self.output_predictor(**input_kwargs) + outputs = self.output_predictor(**input_kwargs, config=dict(temperature=0.7+0.01*idx)) output_kwargs = { - key: outputs[key] + key: getattr(outputs, key) for key in output_keys } From 3965f05f4f2fc63a430fe00348f0bc342b41b390 Mon Sep 17 00:00:00 2001 From: Frederick Ros Date: Fri, 1 Mar 2024 18:13:03 +0100 Subject: [PATCH 015/243] Update skycamp2023.ipynb There are 7 train examples. --- skycamp2023.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/skycamp2023.ipynb b/skycamp2023.ipynb index 50d7aff544..28cd3c7607 100644 --- a/skycamp2023.ipynb +++ b/skycamp2023.ipynb @@ -193,7 +193,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's compile this using our six `train` examples. We will use the very simple `BootstrapFewShot` in DSPy." + "Now let's compile this using our seven `train` examples. We will use the very simple `BootstrapFewShot` in DSPy." ] }, { From 55955ea75f159cc012015a95ef377ba3c2162ffe Mon Sep 17 00:00:00 2001 From: arnavsinghvi11 <54859892+arnavsinghvi11@users.noreply.github.com> Date: Fri, 1 Mar 2024 09:50:49 -0800 Subject: [PATCH 016/243] Update README.md --- docs/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/README.md b/docs/README.md index 9688ccf22b..7c4caca548 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,7 +10,7 @@ This guide is for contributors looking to make changes to the documentation in t ```bash #Ensure you are in the top-level dspy/ folder -git subtree pull --prefix=docs +git subtree pull --prefix=docs https://github.com/krypticmouse/dspy-docs master ``` 2. **Push your new changes on a new branch**: Feel free to add or edit existing documentation and open a PR for your changes. Once your PR is reviewed and approved, the changes will be ready to merge into main. @@ -19,5 +19,5 @@ git subtree pull --prefix=docs ```bash #Ensure you are in the top-level dspy/ folder -git subtree push --prefix=docs -``` \ No newline at end of file +git subtree push --prefix=docs https://github.com/krypticmouse/dspy-docs master +``` From b7dca2453050f17b8aa9abe301660c98ee575abb Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 29 Feb 2024 14:42:21 -0800 Subject: [PATCH 017/243] test output from make_example --- dspy/functional/functional.py | 8 +++++++- tests/functional/test_functional.py | 18 +----------------- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index c1dcec9a7b..15cf41a274 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -72,12 +72,18 @@ def copy(self): @staticmethod def _make_example(type_): # Note: DSPy will cache this call so we only pay the first time TypedPredictor is called. - return dspy.Predict( + json_object = dspy.Predict( dspy.Signature( "json_schema -> json_object", "Make a very succinct json object that validates with the following schema", ) )(json_schema=json.dumps(type_.model_json_schema())).json_object + # We use the model_validate_json method to make sure the example is valid + try: + type_.model_validate_json(_unwrap_json(json_object)) + except (pydantic.ValidationError, ValueError): + return "" # Unable to make an example + return json_object # TODO: Another fun idea is to only (but automatically) do this if the output fails. # We could also have a more general "suggest solution" prompt that tries to fix the output # More directly. diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 6cb005812b..438891a552 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -5,6 +5,7 @@ from typing import Annotated import warnings from typing import List +from pyparsing import Literal import pytest @@ -47,23 +48,6 @@ def hard_question(topic: str) -> List[str]: assert question == expected -def test_list_output(): - @predictor - def hard_question(topic: str) -> List[str]: - """Think of a hard factual question about a topic.""" - - expected = ["What is the speed of light?", "What is the speed of sound?"] - lm = DummyLM( - ['{"value": ["What is the speed of light?", "What is the speed of sound?"]}'] - ) - dspy.settings.configure(lm=lm) - - question = hard_question(topic="Physics") - lm.inspect_history(n=2) - - assert question == expected - - def test_simple_type(): class Question(pydantic.BaseModel): value: str From ec731f7ee040ef19b1cf024b71689ba28279c3e8 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 29 Feb 2024 16:22:51 -0800 Subject: [PATCH 018/243] Made functional more robust --- dspy/functional/functional.py | 40 ++++++++++++++++++++++------- tests/functional/test_functional.py | 18 ++++++------- 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 15cf41a274..3003e9eef5 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -46,7 +46,7 @@ def __init__(self): self.__dict__[name] = attr.copy() -def TypedChainOfThought(signature, make_example=False): +def TypedChainOfThought(signature): """ Just like TypedPredictor, but adds a ChainOfThought OutputField. """ signature = ensure_signature(signature) output_keys = ", ".join(signature.output_fields.keys()) @@ -56,18 +56,17 @@ def TypedChainOfThought(signature, make_example=False): prefix="Reasoning: Let's think step by step in order to", desc="${produce the " + output_keys + "}. We ...", ), - ), make_example) + )) class TypedPredictor(dspy.Module): - def __init__(self, signature, make_example=False): + def __init__(self, signature): super().__init__() self.signature = signature self.predictor = dspy.Predict(signature) - self.make_example = make_example def copy(self): - return TypedPredictor(self.signature, self.make_example) + return TypedPredictor(self.signature) @staticmethod def _make_example(type_): @@ -120,7 +119,6 @@ def _prepare_signature(self): + ( f". Respond with a single JSON object using the schema " + json.dumps(type_.model_json_schema()) - + (". For example: " + self._make_example(type_) if self.make_example else "") ), format=lambda x: ( x if isinstance(x, str) else x.model_dump_json() @@ -155,11 +153,23 @@ def forward(self, **kwargs): parser = field.json_schema_extra.get("parser", lambda x: x) parsed_results[name] = parser(value) except (pydantic.ValidationError, ValueError) as e: - errors[name] = e + errors[name] = _format_error(e) + # If we can, we add an example to the error message + current_desc = field.json_schema_extra.get("desc", "") + i = current_desc.find("Respond with a single JSON object using the schema") + if i == -1: + continue # Only add examples to JSON objects + suffix, current_desc = current_desc[i:], current_desc[:i] + prefix = "You MUST use this format: " + if try_i + 1 < MAX_RETRIES and prefix not in current_desc: + if example := self._make_example(field.annotation): + signature = signature.with_updated_fields(name, + desc = current_desc + "\n" + prefix + example + "\n" + suffix + ) if errors: # Add new fields for each error for name, error in errors.items(): - modified_kwargs[f"error_{name}_{try_i}"] = str(error) + modified_kwargs[f"error_{name}_{try_i}"] = error signature = signature.append( f"error_{name}_{try_i}", dspy.InputField( @@ -173,7 +183,19 @@ def forward(self, **kwargs): for name, value in parsed_results.items(): setattr(result, name, value) return result - raise ValueError("Too many retries") + raise ValueError("Too many retries trying to get the correct output format. " + + "Try simplifying the requirements.") + + +def _format_error(error: Exception): + if isinstance(error, pydantic.ValidationError): + errors = [] + for e in error.errors(): + fields = ", ".join(e["loc"]) + errors.append(f"{e['msg']}: {fields} (error type: {e['type']})") + return "; ".join(errors) + else: + return str(error) def _func_to_signature(func): diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 438891a552..216ec33150 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -3,9 +3,7 @@ import pydantic from pydantic import Field, BaseModel, field_validator from typing import Annotated -import warnings from typing import List -from pyparsing import Literal import pytest @@ -118,6 +116,7 @@ def forward(self, **kwargs): "What is the speed of light?", "Some bad reasoning, 3e8 m/s.", "3e8", # Bad answer 1 + "{...}", # Model is asked to create an example "Some good reasoning...", expected.model_dump_json(), # Good answer ] @@ -295,6 +294,8 @@ def flight_information(email: str) -> TravelInformation: [ # Example with a bad origin code. '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', + # Example to help the model understand + '{...}', # Fixed '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', ] @@ -343,7 +344,9 @@ def flight_information(email: str) -> TravelInformation: [ # First origin is wrong, then destination, then all is good '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', + '{...}', # Example to help the model understand '{"origin": "JFK", "destination": "LA0", "date": "2022-12-25"}', + '{...}', # Example to help the model understand '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', ] ) @@ -352,7 +355,6 @@ def flight_information(email: str) -> TravelInformation: assert flight_information(email="Some email") == TravelInformation( origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) ) - warnings.warn("This test is dependent on the version of pydantic used.") assert lm.get_convo(-1) == textwrap.dedent( """\ Given the fields `email`, produce the fields `flight_information`. @@ -373,12 +375,11 @@ def flight_information(email: str) -> TravelInformation: Email: Some email - Past Error (flight_information): 1 validation error for TravelInformation origin String should match pattern '^[A-Z]{3}$' [type=string_pattern_mismatch, input_value='JF0', input_type=str] For further information visit https://errors.pydantic.dev/2.5/v/string_pattern_mismatch + Past Error (flight_information): String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) - Past Error (flight_information, 2): 1 validation error for TravelInformation destination String should match pattern '^[A-Z]{3}$' [type=string_pattern_mismatch, input_value='LA0', input_type=str] For further information visit https://errors.pydantic.dev/2.5/v/string_pattern_mismatch + Past Error (flight_information, 2): String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) Flight Information: {"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}""" - # Note: Pydantic version is hardcoded in the url here ) @@ -411,7 +412,6 @@ def get_user_details() -> UserDetails: with pytest.raises(ValueError): get_user_details() - warnings.warn("This test is dependent on the version of pydantic used.") assert lm.get_convo(-1) == textwrap.dedent( """\ Given the fields , produce the fields `get_user_details`. @@ -426,7 +426,7 @@ def get_user_details() -> UserDetails: --- - Past Error (get_user_details): 1 validation error for UserDetails name Value error, Name must be in uppercase. [type=value_error, input_value='lower case name', input_type=str] For further information visit https://errors.pydantic.dev/2.5/v/value_error - Past Error (get_user_details, 2): 1 validation error for UserDetails name Value error, Name must be in uppercase. [type=value_error, input_value='lower case name', input_type=str] For further information visit https://errors.pydantic.dev/2.5/v/value_error + Past Error (get_user_details): Value error, Name must be in uppercase.: name (error type: value_error) + Past Error (get_user_details, 2): Value error, Name must be in uppercase.: name (error type: value_error) Get User Details: {"name": "lower case name", "age": 25}""" ) From b8d0c64b9d32f8b5c7dd4a0dacecba4979f772b3 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 29 Feb 2024 16:30:50 -0800 Subject: [PATCH 019/243] Removed make_example=True from functional ipynb --- examples/functional/functional.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functional/functional.ipynb b/examples/functional/functional.ipynb index 0dcbef5dc5..cf7088a5d9 100644 --- a/examples/functional/functional.ipynb +++ b/examples/functional/functional.ipynb @@ -175,7 +175,7 @@ " entry_point: str = InputField()\n", " solution: PythonCode = OutputField()\n", "\n", - "predictor = TypedPredictor(CodeSignature, make_example=True)\n", + "predictor = TypedPredictor(CodeSignature)\n", "prediction = predictor(\n", " prompt=PythonCode(code=ds['test'][0]['prompt']),\n", " test=PythonCode(code=ds['test'][0]['test']),\n", From 702a89c443dea63e4065d529e006cc200ef88efa Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 00:56:31 -0800 Subject: [PATCH 020/243] Fixed bug with wrapped output fields in examples --- dspy/functional/functional.py | 87 ++++++++++++----------------- dspy/signatures/signature.py | 4 +- tests/functional/test_functional.py | 27 +++++++-- 3 files changed, 61 insertions(+), 57 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 3003e9eef5..f349fa2e40 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -1,5 +1,5 @@ import inspect, os, openai, dspy, typing, pydantic -from typing import Annotated +from typing import Annotated, List, Tuple import typing from dsp.templates import passages2text import json @@ -27,7 +27,7 @@ def __init__(self, predictor, output_key): super().__init__() self.predictor = predictor self.output_key = output_key - + def copy(self): return _StripOutput(self.predictor.copy(), self.output_key) @@ -37,7 +37,8 @@ def forward(self, **kwargs): class FunctionalModule(dspy.Module): - """ To use the @cot and @predictor decorators, your module needs to inheret form this class. """ + """To use the @cot and @predictor decorators, your module needs to inheret form this class.""" + def __init__(self): super().__init__() for name in dir(self): @@ -47,16 +48,18 @@ def __init__(self): def TypedChainOfThought(signature): - """ Just like TypedPredictor, but adds a ChainOfThought OutputField. """ + """Just like TypedPredictor, but adds a ChainOfThought OutputField.""" signature = ensure_signature(signature) output_keys = ", ".join(signature.output_fields.keys()) - return TypedPredictor(signature.prepend( - "reasoning", - dspy.OutputField( - prefix="Reasoning: Let's think step by step in order to", - desc="${produce the " + output_keys + "}. We ...", - ), - )) + return TypedPredictor( + signature.prepend( + "reasoning", + dspy.OutputField( + prefix="Reasoning: Let's think step by step in order to", + desc="${produce the " + output_keys + "}. We ...", + ), + ) + ) class TypedPredictor(dspy.Module): @@ -108,28 +111,25 @@ def _prepare_signature(self): else: # Anything else we wrap in a pydantic object unwrap = lambda x: x + wrap = lambda x: x if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): - type_ = pydantic.create_model( - "Output", value=(type_, ...), __base__=pydantic.BaseModel - ) + type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) + wrap = lambda x: type_(value=x) unwrap = lambda x: x.value signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") + ( - f". Respond with a single JSON object using the schema " + f". Respond with a single JSON object. JSON Schema: " + json.dumps(type_.model_json_schema()) ), - format=lambda x: ( - x if isinstance(x, str) else x.model_dump_json() - ), - parser=lambda x: unwrap( - type_.model_validate_json(_unwrap_json(x)) - ), + format=lambda x: (x if isinstance(x, str) else wrap(x).model_dump_json()), + parser=lambda x: unwrap(type_.model_validate_json(_unwrap_json(x))), + type_=type_, ) else: # If input field format = lambda x: x if isinstance(x, str) else str(x) - if type_ in (list[str], tuple[str]): + if type_ in (List[str], list[str], Tuple[str], tuple[str]): format = passages2text elif inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): format = lambda x: x if isinstance(x, str) else x.model_dump_json() @@ -156,15 +156,15 @@ def forward(self, **kwargs): errors[name] = _format_error(e) # If we can, we add an example to the error message current_desc = field.json_schema_extra.get("desc", "") - i = current_desc.find("Respond with a single JSON object using the schema") + i = current_desc.find("JSON Schema: ") if i == -1: continue # Only add examples to JSON objects suffix, current_desc = current_desc[i:], current_desc[:i] prefix = "You MUST use this format: " if try_i + 1 < MAX_RETRIES and prefix not in current_desc: if example := self._make_example(field.annotation): - signature = signature.with_updated_fields(name, - desc = current_desc + "\n" + prefix + example + "\n" + suffix + signature = signature.with_updated_fields( + name, desc=current_desc + "\n" + prefix + example + "\n" + suffix ) if errors: # Add new fields for each error @@ -173,8 +173,7 @@ def forward(self, **kwargs): signature = signature.append( f"error_{name}_{try_i}", dspy.InputField( - prefix=f"Past Error " - + (f"({name}):" if try_i == 0 else f"({name}, {try_i+1}):"), + prefix=f"Past Error " + (f"({name}):" if try_i == 0 else f"({name}, {try_i+1}):"), desc="An error to avoid in the future", ), ) @@ -183,8 +182,9 @@ def forward(self, **kwargs): for name, value in parsed_results.items(): setattr(result, name, value) return result - raise ValueError("Too many retries trying to get the correct output format. " - + "Try simplifying the requirements.") + raise ValueError( + "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", errors + ) def _format_error(error: Exception): @@ -249,9 +249,7 @@ def main(): class Answer(pydantic.BaseModel): value: float certainty: float - comments: list[str] = pydantic.Field( - description="At least two comments about the answer" - ) + comments: list[str] = pydantic.Field(description="At least two comments about the answer") class QA(dspy.Module): @predictor @@ -290,16 +288,11 @@ def validate_context_and_answer_and_hops(example, pred, trace=None): if not dspy.evaluate.answer_passage_match(example, pred): return False - hops = [example.question] + [ - outputs.query for *_, outputs in trace if "query" in outputs - ] + hops = [example.question] + [outputs.query for *_, outputs in trace if "query" in outputs] if max([len(h) for h in hops]) > 100: return False - if any( - dspy.evaluate.answer_exact_match_str(hops[idx], hops[:idx], frac=0.8) - for idx in range(2, len(hops)) - ): + if any(dspy.evaluate.answer_exact_match_str(hops[idx], hops[:idx], frac=0.8) for idx in range(2, len(hops))): return False return True @@ -307,9 +300,7 @@ def validate_context_and_answer_and_hops(example, pred, trace=None): def gold_passages_retrieved(example, pred, trace=None): gold_titles = set(map(dspy.evaluate.normalize_text, example["gold_titles"])) - found_titles = set( - map(dspy.evaluate.normalize_text, [c.split(" | ")[0] for c in pred.context]) - ) + found_titles = set(map(dspy.evaluate.normalize_text, [c.split(" | ")[0] for c in pred.context])) return gold_titles.issubset(found_titles) @@ -322,9 +313,7 @@ def hotpot(): from dspy.teleprompt.bootstrap import BootstrapFewShot print("Load the dataset.") - dataset = HotPotQA( - train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0 - ) + dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs("question") for x in dataset.train] devset = [x.with_inputs("question") for x in dataset.dev] print("Done") @@ -361,9 +350,7 @@ def forward(self, question): lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000) dspy.settings.configure(lm=lm, rm=rm, trace=[]) - evaluate_on_hotpotqa = Evaluate( - devset=devset, num_threads=10, display_progress=True, display_table=5 - ) + evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=10, display_progress=True, display_table=5) # uncompiled (i.e., zero-shot) program uncompiled_baleen = SimplifiedBaleen() @@ -373,9 +360,7 @@ def forward(self, question): ) # compiled (i.e., few-shot) program - compiled_baleen = BootstrapFewShot( - metric=validate_context_and_answer_and_hops - ).compile( + compiled_baleen = BootstrapFewShot(metric=validate_context_and_answer_and_hops).compile( SimplifiedBaleen(), teacher=SimplifiedBaleen(passages_per_hop=2), trainset=trainset, diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index ac9a334ce8..a5f6b976a7 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -77,7 +77,7 @@ def fields(cls): # Make sure to give input fields before output fields return {**cls.input_fields, **cls.output_fields} - def with_updated_fields(cls, name, **kwargs): + def with_updated_fields(cls, name, type_=None, **kwargs): """Returns a new Signature type with the field, name, updated with fields[name].json_schema_extra[key] = value.""" fields_copy = deepcopy(cls.fields) @@ -85,6 +85,8 @@ def with_updated_fields(cls, name, **kwargs): **fields_copy[name].json_schema_extra, **kwargs, } + if type_ is not None: + fields_copy[name].annotation = type_ return Signature(fields_copy, cls.instructions) @property diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 216ec33150..5d0f230b16 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -31,8 +31,8 @@ def hard_question(topic: str) -> str: def test_list_output(): @predictor - def hard_question(topic: str) -> List[str]: - """Think of a hard factual question about a topic.""" + def hard_questions(topics: List[str]) -> List[str]: + pass expected = ["What is the speed of light?", "What is the speed of sound?"] lm = DummyLM( @@ -40,7 +40,7 @@ def hard_question(topic: str) -> List[str]: ) dspy.settings.configure(lm=lm) - question = hard_question(topic="Physics") + question = hard_questions(topics=["Physics", "Music"]) lm.inspect_history(n=2) assert question == expected @@ -369,7 +369,7 @@ def flight_information(email: str) -> TravelInformation: Past Error (flight_information, 2): An error to avoid in the future - Flight Information: ${flight_information}. Respond with a single JSON object using the schema {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} + Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} --- @@ -422,7 +422,7 @@ def get_user_details() -> UserDetails: Past Error (get_user_details): An error to avoid in the future Past Error (get_user_details, 2): An error to avoid in the future - Get User Details: ${get_user_details}. Respond with a single JSON object using the schema {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} + Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} --- @@ -430,3 +430,20 @@ def get_user_details() -> UserDetails: Past Error (get_user_details, 2): Value error, Name must be in uppercase.: name (error type: value_error) Get User Details: {"name": "lower case name", "age": 25}""" ) + + +def test_annotated_field(): + # Since we don't currently validate fields on the main signature, + # the annotated fields are also not validated. + # But at least it should not crash. + + @predictor + def test(input: Annotated[str, Field(description="description")]) -> Annotated[float, Field(gt=0, lt=1)]: + pass + + lm = DummyLM(["0.5"]) + dspy.settings.configure(lm=lm) + + output = test(input="input") + + assert output == 0.5 From f6baa0cb1e8502e36692256dd418c2ca3f048c47 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Sat, 2 Mar 2024 00:04:25 +0530 Subject: [PATCH 021/243] Example page and content changes --- docs/docs/tutorials/examples.md | 28 +++++++++++++++++++ .../src/components/HomepageFeatures/index.tsx | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 docs/docs/tutorials/examples.md diff --git a/docs/docs/tutorials/examples.md b/docs/docs/tutorials/examples.md new file mode 100644 index 0000000000..a0f51e4361 --- /dev/null +++ b/docs/docs/tutorials/examples.md @@ -0,0 +1,28 @@ +--- +sidebar_position: 99998 +--- + +# Community Examples + +The DSPy team believes complexity has to be justified. We take this seriously: we never release a complex tutorial (above) or example (below) _unless we can demonstrate empirically that this complexity has generally led to improved quality or cost._ This kind of rule is rarely enforced by other frameworks or docs, but you can count on it in DSPy examples. + +There's a bunch of examples in the `examples/` directory and in the top-level directory. We welcome contributions! + +You can find other examples tweeted by [@lateinteraction](https://twitter.com/lateinteraction) on Twitter/X. + +**Some other examples (not exhaustive, feel free to add more via PR):** + +- Applying DSPy Assertions + - [Long-form Answer Generation with Citations, by Arnav Singhvi](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/longformqa/longformqa_assertions.ipynb) + - [Generating Answer Choices for Quiz Questions, by Arnav Singhvi](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/quiz/quiz_assertions.ipynb) + - [Generating Tweets for QA, by Arnav Singhvi](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/tweets/tweets_assertions.ipynb) +- [Compiling LCEL runnables from LangChain in DSPy](https://github.com/stanfordnlp/dspy/blob/main/examples/tweets/compiling_langchain.ipynb) +- [AI feedback, or writing LM-based metrics in DSPy](https://github.com/stanfordnlp/dspy/blob/main/examples/tweets/tweet_metric.py) +- [DSPy Optimizers Benchmark on a bunch of different tasks, by Michael Ryan](https://github.com/stanfordnlp/dspy/tree/main/testing/tasks) +- [Indian Languages NLI with gains due to compiling by Saiful Haq](https://github.com/saifulhaq95/DSPy-Indic/blob/main/indicxlni.ipynb) +- [Sophisticated Extreme Multi-Class Classification, IReRa, by Karel D’Oosterlinck](https://github.com/KarelDO/xmc.dspy) +- [DSPy on BIG-Bench Hard Example, by Chris Levy](https://drchrislevy.github.io/posts/dspy/dspy.html) +- [Using Ollama with DSPy for Mistral (quantized) by @jrknox1977](https://gist.github.com/jrknox1977/78c17e492b5a75ee5bbaf9673aee4641) +- [Using DSPy, "The Unreasonable Effectiveness of Eccentric Automatic Prompts" (paper) by VMware's Rick Battle & Teja Gollapudi, and interview at TheRegister](https://www.theregister.com/2024/02/22/prompt_engineering_ai_models/) + +There are also recent cool examples at [Weaviate's DSPy cookbook](https://github.com/weaviate/recipes/tree/main/integrations/dspy) by Connor Shorten. [See tutorial on YouTube](https://www.youtube.com/watch?v=CEuUG4Umfxs). \ No newline at end of file diff --git a/docs/src/components/HomepageFeatures/index.tsx b/docs/src/components/HomepageFeatures/index.tsx index a09634beb0..5d7eb942d3 100644 --- a/docs/src/components/HomepageFeatures/index.tsx +++ b/docs/src/components/HomepageFeatures/index.tsx @@ -28,7 +28,7 @@ const FeatureList: FeatureItem[] = [ ), }, { - title: 'Universal Compatibility', + title: 'Cross-LM Compatibility', img: '/img/universal_compatibility.png', description: ( <> From 3069b4ec5da37982cd4da9ba0ff8acc83ce70af9 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 11:36:16 -0800 Subject: [PATCH 022/243] Support for max_errors in randomsearch --- dspy/teleprompt/random_search.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index 8605cd427a..43d2c5d4c7 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -28,7 +28,7 @@ class BootstrapFewShotWithRandomSearch(Teleprompter): - def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6, stop_at_score=None): + def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6, max_errors=10, stop_at_score=None): self.metric = metric self.teacher_settings = teacher_settings self.max_rounds = max_rounds @@ -37,6 +37,7 @@ def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_la self.stop_at_score = stop_at_score self.min_num_samples = 1 self.max_num_samples = max_bootstrapped_demos + self.max_erros = max_errors self.num_candidate_sets = num_candidate_programs # self.max_num_traces = 1 + int(max_bootstrapped_demos / 2.0 * self.num_candidate_sets) @@ -93,7 +94,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None program2 = teleprompter.compile(student, teacher=teacher, trainset=trainset2) evaluate = Evaluate(devset=self.valset, metric=self.metric, num_threads=self.num_threads, - display_table=False, display_progress=True) + max_errors=self.max_errors, display_table=False, display_progress=True) score, subscores = evaluate(program2, return_all_scores=True) From 9f726ff588f592d5564eaa11c245866a79013cb7 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya <43719685+krypticmouse@users.noreply.github.com> Date: Sat, 2 Mar 2024 01:19:55 +0530 Subject: [PATCH 023/243] Add local models back --- .../local_models/HFClientTGI.mdx | 90 +++++++++++++++++++ .../local_models/HFClientVLLM.mdx | 82 +++++++++++++++++ 2 files changed, 172 insertions(+) create mode 100644 docs/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx create mode 100644 docs/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx diff --git a/docs/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx b/docs/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx new file mode 100644 index 0000000000..2550ed3c5f --- /dev/null +++ b/docs/docs/deep-dive/language_model_clients/local_models/HFClientTGI.mdx @@ -0,0 +1,90 @@ +import AuthorDetails from '@site/src/components/AuthorDetails'; + +## [HFClient TGI](https://github.com/huggingface/text-generation-inference) + +### Prerequisites - Launching TGI Server locally + +Refer to the [Text Generation-Inference Server API](/api/local_language_model_clients/TGI) for setting up the TGI server locally. + +```bash +#Example TGI Server Launch + +model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. +num_shard=1 # set to the number of shards you wish to use. +volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run + +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:latest --model-id $model --num-shard $num_shard +``` + +This command will start the server and make it accessible at `http://localhost:8080`. + + +### Setting up the TGI Client + +The constructor initializes the `HFModel` base class to support the handling of prompting HuggingFace models. It configures the client for communicating with the hosted TGI server to generate requests. This requires the following parameters: + +- `model` (_str_): ID of Hugging Face model connected to the TGI server. +- `port` (_int_ or _list_): Port for communicating to the TGI server. This can be a single port number (`8080`) or a list of TGI ports (`[8080, 8081, 8082]`) to route the requests to. +- `url` (_str_): Base URL of hosted TGI server. This will often be `"http://localhost"`. +- `http_request_kwargs` (_dict_): Dictionary of additional keyword agruments to pass to the HTTP request function to the TGI server. This is `None` by default. +- `**kwargs`: Additional keyword arguments to configure the TGI client. + +Example of the TGI constructor: + +```python +class HFClientTGI(HFModel): + def __init__(self, model, port, url="http://future-hgx-1", http_request_kwargs=None, **kwargs): +``` + +### Under the Hood + +#### `_generate(self, prompt, **kwargs) -> dict` + +**Parameters:** +- `prompt` (_str_): Prompt to send to model hosted on TGI server. +- `**kwargs`: Additional keyword arguments for completion request. + +**Returns:** +- `dict`: dictionary with `prompt` and list of response `choices`. + +Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. + +After generation, the method parses the JSON response received from the server and retrieves the output through `json_response["generated_text"]`. This is then stored in the `completions` list. + +If the JSON response includes the additional `details` argument and correspondingly, the `best_of_sequences` within `details`, this indicates multiple sequences were generated. This is also usually the case when `best_of > 1` in the initialized kwargs. Each of these sequences is accessed through `x["generated_text"]` and added to the `completions` list. + +Lastly, the method constructs the response dictionary with two keys: the original request `prompt` and `choices`, a list of dictionaries representing generated completions with the key `text` holding the response's generated text. + + +### Using the TGI Client + +```python +tgi_llama2 = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") +``` + +### Sending Requests via TGI Client + +1) _**Recommended**_ Configure default LM using `dspy.configure`. + +This allows you to define programs in DSPy and simply call modules on your input fields, having DSPy internally call the prompt on the configured LM. + +```python +dspy.configure(lm=tgi_llama2) + +#Example DSPy CoT QA program +qa = dspy.ChainOfThought('question -> answer') + +response = qa(question="What is the capital of Paris?") #Prompted to tgi_llama2 +print(response.answer) +``` + +2) Generate responses using the client directly. + +```python +response = tgi_llama2._generate(prompt='What is the capital of Paris?') +print(response) +``` + +*** + + \ No newline at end of file diff --git a/docs/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx b/docs/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx new file mode 100644 index 0000000000..059c2a9be5 --- /dev/null +++ b/docs/docs/deep-dive/language_model_clients/local_models/HFClientVLLM.mdx @@ -0,0 +1,82 @@ +import AuthorDetails from '@site/src/components/AuthorDetails'; + +## [HFClient vLLM](https://github.com/vllm-project/vllm) + +### Prerequisites - Launching vLLM Server locally + +Refer to the [vLLM Server API](/api/local_language_model_clients/vLLM) for setting up the vLLM server locally. + +```bash +#Example vLLM Server Launch + + python -m vllm.entrypoints.api_server --model meta-llama/Llama-2-7b-hf --port 8080 +``` + +This command will start the server and make it accessible at `http://localhost:8080`. + + +### Setting up the vLLM Client + +The constructor initializes the `HFModel` base class to support the handling of prompting models, configuring the client for communicating with the hosted vLLM server to generate requests. This requires the following parameters: + +- `model` (_str_): ID of model connected to the vLLM server. +- `port` (_int_): Port for communicating to the vLLM server. +- `url` (_str_): Base URL of hosted vLLM server. This will often be `"http://localhost"`. +- `**kwargs`: Additional keyword arguments to configure the vLLM client. + +Example of the vLLM constructor: + +```python +class HFClientVLLM(HFModel): + def __init__(self, model, port, url="http://localhost", **kwargs): +``` + +### Under the Hood + +#### `_generate(self, prompt, **kwargs) -> dict` + +**Parameters:** +- `prompt` (_str_): Prompt to send to model hosted on vLLM server. +- `**kwargs`: Additional keyword arguments for completion request. + +**Returns:** +- `dict`: dictionary with `prompt` and list of response `choices`. + +Internally, the method handles the specifics of preparing the request prompt and corresponding payload to obtain the response. + +After generation, the method parses the JSON response received from the server and retrieves the output through `json_response["choices"]` and stored as the `completions` list. + +Lastly, the method constructs the response dictionary with two keys: the original request `prompt` and `choices`, a list of dictionaries representing generated completions with the key `text` holding the response's generated text. + +### Using the vLLM Client + +```python +vllm_llama2 = dspy.HFClientVLLM(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") +``` + +### Sending Requests via vLLM Client + +1) _**Recommended**_ Configure default LM using `dspy.configure`. + +This allows you to define programs in DSPy and simply call modules on your input fields, having DSPy internally call the prompt on the configured LM. + +```python +dspy.configure(lm=vllm_llama2) + +#Example DSPy CoT QA program +qa = dspy.ChainOfThought('question -> answer') + +response = qa(question="What is the capital of Paris?") #Prompted to vllm_llama2 +print(response.answer) +``` + +2) Generate responses using the client directly. + +```python +response = vllm_llama2._generate(prompt='What is the capital of Paris?') +print(response) +``` + +*** + + \ No newline at end of file From 51b323b11141f7e9e4e81cd20d4ae5e21ed8b609 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 11:55:43 -0800 Subject: [PATCH 024/243] Fixed spelling error --- dspy/teleprompt/random_search.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index 43d2c5d4c7..c6118c4214 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -37,7 +37,7 @@ def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_la self.stop_at_score = stop_at_score self.min_num_samples = 1 self.max_num_samples = max_bootstrapped_demos - self.max_erros = max_errors + self.max_errors = max_errors self.num_candidate_sets = num_candidate_programs # self.max_num_traces = 1 + int(max_bootstrapped_demos / 2.0 * self.num_candidate_sets) @@ -49,7 +49,7 @@ def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_la # print("Going to sample", self.max_num_traces, "traces in total.") print("Will attempt to train", self.num_candidate_sets, "candidate sets.") - def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None): + def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None, labeled_sample=True): self.trainset = trainset self.valset = valset or trainset # TODO: FIXME: Note this choice. @@ -70,7 +70,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None elif seed == -2: # labels only - teleprompter = LabeledFewShot(k=self.max_labeled_demos) + teleprompter = LabeledFewShot(k=self.max_labeled_demos, sample=labeled_sample) program2 = teleprompter.compile(student, trainset=trainset2) elif seed == -1: From 3ca2c0fa24f8c4c145f7eae0576fc2fed047ebb0 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 11:59:07 -0800 Subject: [PATCH 025/243] fixed bug in previous commit --- dspy/teleprompt/random_search.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index c6118c4214..da5bf24ce6 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -70,8 +70,8 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None elif seed == -2: # labels only - teleprompter = LabeledFewShot(k=self.max_labeled_demos, sample=labeled_sample) - program2 = teleprompter.compile(student, trainset=trainset2) + teleprompter = LabeledFewShot(k=self.max_labeled_demos) + program2 = teleprompter.compile(student, trainset=trainset2, sample=labeled_sample) elif seed == -1: # unshuffled few-shot From 1c0c47190f756aae921fca634d501a3843175639 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 12:50:39 -0800 Subject: [PATCH 026/243] Some type improvements for floats --- dspy/functional/functional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index f349fa2e40..d6e2e3dba3 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -104,7 +104,7 @@ def _prepare_signature(self): signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") - + (f". Respond with a single {type_.__name__} value"), + + (f" (Respond with a single {type_.__name__} value)" if type_ != str else ""), format=lambda x: x if isinstance(x, str) else str(x), parser=type_, ) @@ -195,7 +195,7 @@ def _format_error(error: Exception): errors.append(f"{e['msg']}: {fields} (error type: {e['type']})") return "; ".join(errors) else: - return str(error) + return repr(error) def _func_to_signature(func): From a3abe5fb6812717a3fd228a77fb50463f4b2effb Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 13:09:48 -0800 Subject: [PATCH 027/243] Updated test to new input format --- tests/functional/test_functional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 5d0f230b16..f515f93266 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -259,7 +259,7 @@ def simple_metric(example, prediction, trace=None): Follow the following format. Input: ${input} - Output: ${output}. Respond with a single str value + Output: ${output} --- From ae96ab93c89ae3d51eec40727bce6c77afe4f7c8 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 13:15:35 -0800 Subject: [PATCH 028/243] Some ignores --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5987d710ca..c01ca1d083 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -274,4 +274,5 @@ line-ending = "auto" convention = "google" [tool.ruff.lint.per-file-ignores] -"**/{test,docs}/*" = ["ALL"] +"**/{tests,docs}/*" = ["ALL"] +"**__init__.py" = ["F401"] \ No newline at end of file From bfd67424837d402e87a6e54a91030a0caf00325e Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 13:18:19 -0800 Subject: [PATCH 029/243] Big reformat push --- dsp/evaluation/utils.py | 2 +- dsp/modules/aws_lm.py | 10 +++---- dsp/modules/azurecognitivesearch.py | 6 ++-- dsp/modules/bedrock.py | 4 +-- dsp/modules/cohere.py | 8 ++--- dsp/modules/colbertv2.py | 6 ++-- dsp/modules/databricks.py | 11 +++---- dsp/modules/finetuning/finetune_hf.py | 9 +++--- dsp/modules/google.py | 19 ++++++------ dsp/modules/gpt3.py | 3 +- dsp/modules/hf.py | 8 ++--- dsp/modules/hf_client.py | 33 ++++++++++----------- dsp/modules/lm.py | 6 ++-- dsp/modules/ollama.py | 8 ++--- dsp/modules/sbert.py | 4 +-- dsp/modules/sentence_vectorizer.py | 18 ++++++------ dsp/primitives/demonstrate.py | 4 +-- dsp/primitives/inspect.py | 4 +-- dsp/primitives/predict.py | 15 +++++----- dsp/primitives/primitives.py | 1 - dsp/primitives/search.py | 2 +- dsp/templates/template_v2.py | 8 ++--- dsp/utils/ann_utils.py | 12 ++++---- dsp/utils/dpr.py | 6 ++-- dsp/utils/settings.py | 4 +-- dsp/utils/utils.py | 4 +-- dspy/datasets/dataloader.py | 13 +++++---- dspy/datasets/gsm8k.py | 1 - dspy/evaluate/evaluate.py | 5 ++-- dspy/evaluate/metrics.py | 1 - dspy/functional/functional.py | 20 ++++++++----- dspy/predict/chain_of_thought.py | 5 ++-- dspy/predict/chain_of_thought_with_hint.py | 3 +- dspy/predict/multi_chain_comparison.py | 5 ++-- dspy/predict/predict.py | 6 ++-- dspy/predict/program_of_thought.py | 33 ++++++++++----------- dspy/predict/react.py | 4 +-- dspy/primitives/assertions.py | 30 +++++++++---------- dspy/primitives/example.py | 1 - dspy/primitives/module.py | 2 +- dspy/primitives/program.py | 2 -- dspy/primitives/python_interpreter.py | 10 ++----- dspy/retrieve/chromadb_rm.py | 10 +++---- dspy/retrieve/clarifai_rm.py | 6 ++-- dspy/retrieve/databricks_rm.py | 8 ++--- dspy/retrieve/deeplake_rm.py | 8 ++--- dspy/retrieve/marqo_rm.py | 6 ++-- dspy/retrieve/mongodb_atlas_rm.py | 10 +++---- dspy/retrieve/pgvector_rm.py | 10 +++---- dspy/retrieve/pinecone_rm.py | 32 ++++++++++---------- dspy/retrieve/qdrant_rm.py | 2 +- dspy/retrieve/vectara_rm.py | 8 ++--- dspy/retrieve/weaviate_rm.py | 5 ++-- dspy/signatures/signature.py | 14 ++++----- dspy/teleprompt/bootstrap.py | 7 ++--- dspy/teleprompt/ensemble.py | 2 -- dspy/teleprompt/finetune.py | 3 +- dspy/teleprompt/random_search.py | 2 -- dspy/teleprompt/signature_opt.py | 12 ++++---- dspy/teleprompt/signature_opt_bayesian.py | 15 +++++----- dspy/teleprompt/teleprompt.py | 4 --- dspy/teleprompt/teleprompt_optuna.py | 4 --- dspy/teleprompt/vanilla.py | 1 - examples/functional/repl.py | 2 +- examples/longformqa/utils.py | 1 - examples/tweets/tweet_metric.py | 2 +- setup.py | 4 +-- testing/optimizer_tester.py | 4 +-- testing/tasks/biodex.py | 34 ++++++++++------------ testing/tasks/scone.py | 2 +- testing/tasks/tweet.py | 5 ++-- testing/tasks/tweet_metric.py | 7 ++--- 72 files changed, 271 insertions(+), 315 deletions(-) diff --git a/dsp/evaluation/utils.py b/dsp/evaluation/utils.py index 8873ec0bee..1bfd740e4f 100644 --- a/dsp/evaluation/utils.py +++ b/dsp/evaluation/utils.py @@ -7,7 +7,7 @@ from IPython.display import display as ipython_display except ImportError: ipython_display = print -from dsp.utils import EM, F1, HotPotF1 +from dsp.utils import EM def evaluateRetrieval(fn, dev, metric=None): diff --git a/dsp/modules/aws_lm.py b/dsp/modules/aws_lm.py index 00906282a8..83674db10a 100644 --- a/dsp/modules/aws_lm.py +++ b/dsp/modules/aws_lm.py @@ -81,7 +81,7 @@ def _call_model(self, body: str) -> str | list[str]: @abstractmethod def _extract_input_parameters( - self, body: dict[Any, Any] + self, body: dict[Any, Any], ) -> dict[str, str | float | int]: pass @@ -94,7 +94,7 @@ def _simple_api_call(self, formatted_prompt: str, **kwargs) -> str | list[str]: else: llm_out = [generated.replace(formatted_prompt, "") for generated in llm_out] self.history.append( - {"prompt": formatted_prompt, "response": llm_out, "kwargs": body} + {"prompt": formatted_prompt, "response": llm_out, "kwargs": body}, ) return llm_out @@ -107,20 +107,20 @@ def basic_request(self, prompt, **kwargs) -> str | list[str]: truncated_prompt: str = self._truncate_prompt(prompt) formatted_prompt = self._format_prompt(truncated_prompt) else: - formatted_prompt = self._format_prompt((prompt)) + formatted_prompt = self._format_prompt(prompt) llm_out: str | list[str] if "n" in kwargs.keys(): if self._batch_n: llm_out = self._simple_api_call( - formatted_prompt=formatted_prompt, **kwargs + formatted_prompt=formatted_prompt, **kwargs, ) else: del kwargs["n"] llm_out = [] for _ in range(0, kwargs["n"]): generated: str | list[str] = self._simple_api_call( - formatted_prompt=formatted_prompt, **kwargs + formatted_prompt=formatted_prompt, **kwargs, ) if isinstance(generated, str): llm_out.append(generated) diff --git a/dsp/modules/azurecognitivesearch.py b/dsp/modules/azurecognitivesearch.py index 537105a0ff..e4ff2e9960 100644 --- a/dsp/modules/azurecognitivesearch.py +++ b/dsp/modules/azurecognitivesearch.py @@ -1,14 +1,14 @@ -from typing import Optional, Union, Any +from typing import Union, Any from dsp.utils import dotdict try: from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient from azure.search.documents._paging import SearchItemPaged -except ImportError as e: +except ImportError: raise ImportError( "You need to install azure-search-documents library" - "Please use the command: pip install azure-search-documents" + "Please use the command: pip install azure-search-documents", ) class AzureCognitiveSearch: diff --git a/dsp/modules/bedrock.py b/dsp/modules/bedrock.py index 64277f65cd..c52ae61970 100644 --- a/dsp/modules/bedrock.py +++ b/dsp/modules/bedrock.py @@ -47,7 +47,7 @@ def _create_body(self, prompt: str, **kwargs) -> dict[str, str | float]: query_args: dict[str, Any] = self._sanitize_kwargs(base_args) query_args["prompt"] = prompt # AWS Bedrock forbids these keys - if "max_tokens" in query_args.keys(): + if "max_tokens" in query_args: max_tokens: int = query_args["max_tokens"] input_tokens: int = self._estimate_tokens(prompt) max_tokens_to_sample: int = max_tokens - input_tokens @@ -67,7 +67,7 @@ def _call_model(self, body: str) -> str: return completion def _extract_input_parameters( - self, body: dict[Any, Any] + self, body: dict[Any, Any], ) -> dict[str, str | float | int]: return body diff --git a/dsp/modules/cohere.py b/dsp/modules/cohere.py index b76bd1a92a..405eaea483 100644 --- a/dsp/modules/cohere.py +++ b/dsp/modules/cohere.py @@ -17,7 +17,7 @@ def backoff_hdlr(details): print( "Backing off {wait:0.1f} seconds after {tries} tries " "calling function {target} with kwargs " - "{kwargs}".format(**details) + "{kwargs}".format(**details), ) @@ -39,7 +39,7 @@ def __init__( model: str = "command-nightly", api_key: Optional[str] = None, stop_sequences: list[str] = [], - **kwargs + **kwargs, ): """ Parameters @@ -66,7 +66,7 @@ def __init__( "frequency_penalty": 0, "presence_penalty": 0, "num_generations": 1, - **kwargs + **kwargs, } self.stop_sequences = stop_sequences self.max_num_generations = 5 @@ -109,7 +109,7 @@ def __call__( prompt: str, only_completed: bool = True, return_sorted: bool = False, - **kwargs + **kwargs, ): assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/colbertv2.py b/dsp/modules/colbertv2.py index 501b84270b..1b33d077a9 100644 --- a/dsp/modules/colbertv2.py +++ b/dsp/modules/colbertv2.py @@ -22,7 +22,7 @@ def __init__( self.url = f"{url}:{port}" if port else url def __call__( - self, query: str, k: int = 10, simplify: bool = False + self, query: str, k: int = 10, simplify: bool = False, ) -> Union[list[str], list[dotdict]]: if self.post_requests: topk: list[dict[str, Any]] = colbertv2_post_request(self.url, query, k) @@ -49,7 +49,7 @@ def colbertv2_get_request_v2(url: str, query: str, k: int): return topk[:k] -@functools.lru_cache(maxsize=None) +@functools.cache @NotebookCacheMemory.cache def colbertv2_get_request_v2_wrapped(*args, **kwargs): return colbertv2_get_request_v2(*args, **kwargs) @@ -67,7 +67,7 @@ def colbertv2_post_request_v2(url: str, query: str, k: int): return res.json()["topk"][:k] -@functools.lru_cache(maxsize=None) +@functools.cache @NotebookCacheMemory.cache def colbertv2_post_request_v2_wrapped(*args, **kwargs): return colbertv2_post_request_v2(*args, **kwargs) diff --git a/dsp/modules/databricks.py b/dsp/modules/databricks.py index 73813a3eeb..8f7d7d6e87 100644 --- a/dsp/modules/databricks.py +++ b/dsp/modules/databricks.py @@ -1,21 +1,18 @@ import logging -from logging.handlers import RotatingFileHandler # Configure logging logging.basicConfig( level=logging.INFO, format='%(message)s', handlers=[ - logging.FileHandler('openai_usage.log') - ] + logging.FileHandler('openai_usage.log'), + ], ) import functools import json -from typing import Any, Literal, Optional, cast +from typing import Literal, Optional -import dsp -import backoff import openai from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory, cache_turn_on @@ -35,7 +32,7 @@ def backoff_hdlr(details): print( "Backing off {wait:0.1f} seconds after {tries} tries " "calling function {target} with kwargs " - "{kwargs}".format(**details) + "{kwargs}".format(**details), ) class Databricks(GPT3): diff --git a/dsp/modules/finetuning/finetune_hf.py b/dsp/modules/finetuning/finetune_hf.py index e899c4c292..0ade213bae 100644 --- a/dsp/modules/finetuning/finetune_hf.py +++ b/dsp/modules/finetuning/finetune_hf.py @@ -5,7 +5,6 @@ import copy import glob import torch -import random import warnings import evaluate import numpy as np @@ -247,7 +246,7 @@ def smart_tokenizer_and_embedding_resize(special_tokens_dict, tokenizer, model): @dataclass -class DataCollatorForSupervisedDataset(object): +class DataCollatorForSupervisedDataset: """ Collate examples for supervised fine-tuning. """ @@ -316,7 +315,7 @@ def finetune_hf(data_path, target, config): # training completed, load best model ckpts = glob.glob(f'{output_dir}/checkpoint*') final_ckpt = sorted(ckpts, key=lambda x: int(x.split('-')[-1]))[-1] - with open(os.path.join(final_ckpt, 'trainer_state.json'), 'r') as f: + with open(os.path.join(final_ckpt, 'trainer_state.json')) as f: state = json.load(f) best_model_checkpoint = state['best_model_checkpoint'] @@ -331,8 +330,8 @@ def finetune_hf(data_path, target, config): encoder_decoder_model = ("ConditionalGeneration" in architecture) or ("T5WithLMHeadModel" in architecture) decoder_only_model = ("CausalLM" in architecture) or ("GPT2LMHeadModel" in architecture) assert encoder_decoder_model or decoder_only_model, f"Unknown HuggingFace model class: {target}" - assert not config['fid'] or encoder_decoder_model, f"Model must be encoder-decoder for Fusion in Decoder" - assert not config['fid'] or not config['peft'], f"FiD and PEFT can't be trained together" + assert not config['fid'] or encoder_decoder_model, "Model must be encoder-decoder for Fusion in Decoder" + assert not config['fid'] or not config['peft'], "FiD and PEFT can't be trained together" # load model AutoModelClass = AutoModelForSeq2SeqLM if encoder_decoder_model else AutoModelForCausalLM diff --git a/dsp/modules/google.py b/dsp/modules/google.py index d7546fee33..9a1c3937c1 100644 --- a/dsp/modules/google.py +++ b/dsp/modules/google.py @@ -1,5 +1,6 @@ import os -from typing import Any, Iterable, Optional +from typing import Any, Optional +from collections.abc import Iterable import backoff from dsp.modules.lm import LM @@ -18,7 +19,7 @@ def backoff_hdlr(details): print( "Backing off {wait:0.1f} seconds after {tries} tries " "calling function {target} with kwargs " - "{kwargs}".format(**details) + "{kwargs}".format(**details), ) @@ -32,19 +33,19 @@ def giveup_hdlr(details): BLOCK_ONLY_HIGH = [ { "category": "HARM_CATEGORY_HARASSMENT", - "threshold": "BLOCK_ONLY_HIGH" + "threshold": "BLOCK_ONLY_HIGH", }, { "category": "HARM_CATEGORY_HATE_SPEECH", - "threshold": "BLOCK_ONLY_HIGH" + "threshold": "BLOCK_ONLY_HIGH", }, { "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", - "threshold": "BLOCK_ONLY_HIGH" + "threshold": "BLOCK_ONLY_HIGH", }, { "category": "HARM_CATEGORY_DANGEROUS_CONTENT", - "threshold": "BLOCK_ONLY_HIGH" + "threshold": "BLOCK_ONLY_HIGH", }, ] @@ -60,7 +61,7 @@ def __init__( model: str = "models/gemini-1.0-pro", api_key: Optional[str] = None, safety_settings: Optional[Iterable] = BLOCK_ONLY_HIGH, - **kwargs + **kwargs, ): """ Parameters @@ -89,7 +90,7 @@ def __init__( "max_output_tokens": 2048, "top_p": 1, "top_k": 1, - **kwargs + **kwargs, } self.config = genai.GenerationConfig(**kwargs) @@ -145,7 +146,7 @@ def __call__( prompt: str, only_completed: bool = True, return_sorted: bool = False, - **kwargs + **kwargs, ): assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/gpt3.py b/dsp/modules/gpt3.py index e19ca40f54..ad440f2348 100644 --- a/dsp/modules/gpt3.py +++ b/dsp/modules/gpt3.py @@ -1,5 +1,4 @@ import logging -from logging.handlers import RotatingFileHandler # Configure logging logging.basicConfig( @@ -43,7 +42,7 @@ def backoff_hdlr(details): print( "Backing off {wait:0.1f} seconds after {tries} tries " "calling function {target} with kwargs " - "{kwargs}".format(**details) + "{kwargs}".format(**details), ) diff --git a/dsp/modules/hf.py b/dsp/modules/hf.py index 807eaf438c..3c9306a8f8 100644 --- a/dsp/modules/hf.py +++ b/dsp/modules/hf.py @@ -1,13 +1,9 @@ -import os -import json # from peft import PeftConfig, PeftModel # from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM, AutoTokenizer, AutoConfig from typing import Optional, Literal from dsp.modules.lm import LM # from dsp.modules.finetuning.finetune_hf import preprocess_prompt -from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory, cache_turn_on -import functools def openai_to_hf(**kwargs): hf_kwargs = {} @@ -51,7 +47,7 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool import torch except ImportError as exc: raise ModuleNotFoundError( - "You need to install Hugging Face transformers library to use HF models." + "You need to install Hugging Face transformers library to use HF models.", ) from exc self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") try: @@ -85,7 +81,7 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool except ValueError: self.model = AutoModelForCausalLM.from_pretrained( model if checkpoint is None else checkpoint, - device_map=self.device_map + device_map=self.device_map, ) self.drop_prompt_from_output = True self.tokenizer = AutoTokenizer.from_pretrained(model) diff --git a/dsp/modules/hf_client.py b/dsp/modules/hf_client.py index 8ce1156d7e..4def628199 100644 --- a/dsp/modules/hf_client.py +++ b/dsp/modules/hf_client.py @@ -1,14 +1,11 @@ -import functools import os import random import requests from dsp.modules.hf import HFModel, openai_to_hf -from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory, cache_turn_on -import os +from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory import subprocess import re import shutil -import time # from dsp.modules.adapter import TurboAdapter, DavinciAdapter, LlamaAdapter @@ -21,7 +18,7 @@ def backoff_hdlr(details): print( "Backing off {wait:0.1f} seconds after {tries} tries " "calling function {target} with kwargs " - "{kwargs}".format(**details) + "{kwargs}".format(**details), ) class HFClientTGI(HFModel): @@ -57,13 +54,13 @@ def _generate(self, prompt, **kwargs): # "max_new_tokens": kwargs.get('max_tokens', kwargs.get('max_new_tokens', 75)), # "stop": ["\n", "\n\n"], **kwargs, - } + }, } payload["parameters"] = openai_to_hf(**payload["parameters"]) payload["parameters"]["temperature"] = max( - 0.1, payload["parameters"]["temperature"] + 0.1, payload["parameters"]["temperature"], ) # print(payload['parameters']) @@ -96,7 +93,7 @@ def _generate(self, prompt, **kwargs): response = {"prompt": prompt, "choices": [{"text": c} for c in completions]} return response - except Exception as e: + except Exception: print("Failed to parse JSON response:", response.text) raise Exception("Received invalid JSON response from server") @@ -147,7 +144,7 @@ def _generate(self, prompt, **kwargs): } return response - except Exception as e: + except Exception: print("Failed to parse JSON response:", response.text) raise Exception("Received invalid JSON response from server") @@ -227,7 +224,7 @@ def __init__(self, model, **kwargs): "repetition_penalty": 1, "n": 1, "stop": stop_default if "stop" not in kwargs else kwargs["stop"], - **kwargs + **kwargs, } @backoff.on_exception( @@ -253,7 +250,7 @@ def _generate(self, prompt, use_chat_api=False, **kwargs): url = f"{self.api_base}/chat/completions" messages = [ {"role": "system", "content": "You are a helpful assistant. You must continue the user text directly without *any* additional interjections."}, - {"role": "user", "content": prompt} + {"role": "user", "content": prompt}, ] body = { "model": self.model, @@ -305,7 +302,7 @@ def __init__(self, model, **kwargs): self.kwargs = { "temperature": 0.0, "n": 1, - **kwargs + **kwargs, } def _generate(self, prompt, use_chat_api=False, **kwargs): @@ -320,20 +317,20 @@ def _generate(self, prompt, use_chat_api=False, **kwargs): url = f"{self.api_base}/chat/completions" messages = [ {"role": "system", "content": "You are a helpful assistant. You must continue the user text directly without *any* additional interjections."}, - {"role": "user", "content": prompt} + {"role": "user", "content": prompt}, ] body = { "model": self.model, "messages": messages, "temperature": temperature, - "max_tokens": max_tokens + "max_tokens": max_tokens, } else: body = { "model": self.model, "prompt": f"[INST]{prompt}[/INST]", "temperature": temperature, - "max_tokens": max_tokens + "max_tokens": max_tokens, } headers = {"Authorization": f"Bearer {self.token}"} @@ -362,7 +359,7 @@ def __init__(self, model, model_path): from mlc_chat import ChatConfig self.cm = ChatModule( - model=model, lib_path=model_path, chat_config=ChatConfig(conv_template="LM") + model=model, lib_path=model_path, chat_config=ChatConfig(conv_template="LM"), ) def _generate(self, prompt, **kwargs): @@ -373,7 +370,7 @@ def _generate(self, prompt, **kwargs): completions = [{"text": output}] response = {"prompt": prompt, "choices": completions} return response - except Exception as e: + except Exception: print("Failed to parse output:", response.text) raise Exception("Received invalid output") @@ -417,7 +414,7 @@ def _generate(self, prompt, **kwargs): } return response - except Exception as e: + except Exception: print("Failed to parse JSON response:", response.text) raise Exception("Received invalid JSON response from server") diff --git a/dsp/modules/lm.py b/dsp/modules/lm.py index e2965d49ac..ad3f883ddb 100644 --- a/dsp/modules/lm.py +++ b/dsp/modules/lm.py @@ -50,8 +50,8 @@ def inspect_history(self, n: int = 1, skip: int = 0): printed.append( ( prompt, - x['response'] - ) + x['response'], + ), ) else: printed.append( @@ -60,7 +60,7 @@ def inspect_history(self, n: int = 1, skip: int = 0): x["response"].generations if provider == "cohere" else x["response"]["choices"], - ) + ), ) last_prompt = prompt diff --git a/dsp/modules/ollama.py b/dsp/modules/ollama.py index faa2c41005..a2e3e01397 100644 --- a/dsp/modules/ollama.py +++ b/dsp/modules/ollama.py @@ -1,10 +1,10 @@ from dsp.modules.lm import LM -from typing import Any, Literal, Optional +from typing import Any, Literal -import os, multiprocessing, datetime, hashlib +import datetime +import hashlib import requests -import json def post_request_metadata(model_name, prompt): @@ -116,7 +116,7 @@ def basic_request(self, prompt: str, **kwargs): "content": "".join(text), }, "finish_reason": "stop", - } + }, ) tot_eval_tokens += response_json.get("eval_count") request_info["additional_kwargs"] = {k: v for k, v in response_json.items() if k not in ["response"]} diff --git a/dsp/modules/sbert.py b/dsp/modules/sbert.py index f8ac3f80c0..21cbdd5aef 100644 --- a/dsp/modules/sbert.py +++ b/dsp/modules/sbert.py @@ -2,13 +2,13 @@ class SentenceTransformersCrossEncoder: """Wrapper for sentence-transformers cross-encoder model. """ def __init__( - self, model_name_or_path: str = "cross-encoder/ms-marco-MiniLM-L-12-v2" + self, model_name_or_path: str = "cross-encoder/ms-marco-MiniLM-L-12-v2", ): try: from sentence_transformers.cross_encoder import CrossEncoder except ImportError: raise ModuleNotFoundError( - "You need to install sentence-transformers library to use SentenceTransformersCrossEncoder." + "You need to install sentence-transformers library to use SentenceTransformersCrossEncoder.", ) self.model = CrossEncoder(model_name_or_path) diff --git a/dsp/modules/sentence_vectorizer.py b/dsp/modules/sentence_vectorizer.py index a4a79de0c7..b878420f70 100644 --- a/dsp/modules/sentence_vectorizer.py +++ b/dsp/modules/sentence_vectorizer.py @@ -40,7 +40,7 @@ def __init__( model_name_or_path: str = 'all-MiniLM-L6-v2', vectorize_bs: int = 256, max_gpu_devices: int = 1, - normalize_embeddings: bool = False + normalize_embeddings: bool = False, ): # this isn't a good practice, but with top-level import the whole DSP # module import will be slow (>5 sec), because SentenceTransformer is doing @@ -48,11 +48,11 @@ def __init__( try: from sentence_transformers import SentenceTransformer - except ImportError as e: + except ImportError: raise ImportError( "You need to install sentence_transformers library to use pretrained embedders. " "Please check the official doc https://www.sbert.net/ " - "or simply run `pip install sentence-transformers" + "or simply run `pip install sentence-transformers", ) from dsp.utils.ann_utils import determine_devices @@ -75,7 +75,7 @@ def __call__(self, inp_examples: List) -> np.ndarray: emb = self.model.encode_multi_process( sentences=text_to_vectorize, pool=pool, - batch_size=self.vectorize_bs + batch_size=self.vectorize_bs, ) self.model.stop_multi_process_pool(pool) # for some reason, multi-GPU setup doesn't accept normalize_embeddings parameter @@ -87,7 +87,7 @@ def __call__(self, inp_examples: List) -> np.ndarray: emb = self.model.encode( sentences=text_to_vectorize, batch_size=self.vectorize_bs, - normalize_embeddings=self.normalize_embeddings + normalize_embeddings=self.normalize_embeddings, ) return emb @@ -121,7 +121,7 @@ def __init__( api_key: str, model: str = 'embed-english-v3.0', embed_batch_size: int = 96, - embedding_type: str = 'search_document' # for details check Cohere embed docs + embedding_type: str = 'search_document', # for details check Cohere embed docs ): self.model = model self.embed_batch_size = embed_batch_size @@ -144,7 +144,7 @@ def __call__(self, inp_examples: List["Example"]) -> np.ndarray: response = self.client.embed( texts=cur_batch, model=self.model, - input_type=self.embedding_type + input_type=self.embedding_type, ) embeddings_list.extend(response.embeddings) @@ -169,7 +169,7 @@ def __init__( self, model: str = 'text-embedding-ada-002', embed_batch_size: int = 1024, - api_key: Optional[str] = None + api_key: Optional[str] = None, ): self.model = model self.embed_batch_size = embed_batch_size @@ -195,7 +195,7 @@ def __call__(self, inp_examples: List["Example"]) -> np.ndarray: # OpenAI API call: response = self.Embedding.create( model=self.model, - input=cur_batch + input=cur_batch, ) cur_batch_embeddings = [cur_obj['embedding'] for cur_obj in response['data']] diff --git a/dsp/primitives/demonstrate.py b/dsp/primitives/demonstrate.py index 525314105f..ae08e28bae 100644 --- a/dsp/primitives/demonstrate.py +++ b/dsp/primitives/demonstrate.py @@ -148,7 +148,7 @@ def cast_naive_get_question_and_answer(inp_example: Example) -> Example: def knn( train: list[Example], cast: Callable[[Example], Example] = cast_naive_get_only_question_text, - **knn_args + **knn_args, ) -> Callable[[Example, int], list[Example]]: """ A function that vectorizes train data using `dsm.settings.vectorizer`, then build an ANN/KNN @@ -171,7 +171,7 @@ def knn( all_vectors = vectorizer(train_casted_to_vectorize).astype(np.float32) index = create_faiss_index( - emb_dim=all_vectors.shape[1], n_objects=len(train), **knn_args + emb_dim=all_vectors.shape[1], n_objects=len(train), **knn_args, ) index.train(all_vectors) index.add(all_vectors) diff --git a/dsp/primitives/inspect.py b/dsp/primitives/inspect.py index 2b6202bbb3..3fbb05c55a 100644 --- a/dsp/primitives/inspect.py +++ b/dsp/primitives/inspect.py @@ -41,9 +41,7 @@ def parse(self, obj, delete_empty=False): if isinstance(obj, dict): to_delete = [] for key in obj: - if delete_empty and not obj[key]: - to_delete.append(key) - elif key == "completions": + if delete_empty and not obj[key] or key == "completions": to_delete.append(key) else: self.parse(obj[key], delete_empty) diff --git a/dsp/primitives/predict.py b/dsp/primitives/predict.py index 6f442f6437..c89256b916 100644 --- a/dsp/primitives/predict.py +++ b/dsp/primitives/predict.py @@ -3,7 +3,6 @@ import dsp from dsp.utils import zipstar, normalize_text -from dsp.primitives.inspect import FuncInspector from dsp.utils.utils import dotdict from dsp.templates.template_v3 import Template from dsp.primitives.demonstrate import Example @@ -63,7 +62,7 @@ def _generate(template: Template, **kwargs) -> Callable: generator = dsp.settings.lm def do_generate( - example: Example, stage: str, max_depth: int = 2, original_example=None + example: Example, stage: str, max_depth: int = 2, original_example=None, ): if not dsp.settings.lm: raise AssertionError("No LM is loaded.") @@ -143,7 +142,7 @@ def do_generate( "template": template, "inputs": inputs, "outputs": outputs, - } + }, ) else: # assert not dsp.settings.compiling, "TODO: At this point, cannot compile n>1 generations" @@ -155,7 +154,7 @@ def do_generate( def generate_sc( - example, prompt, normalize=True, extract=None, prediction_field=None, **kwargs + example, prompt, normalize=True, extract=None, prediction_field=None, **kwargs, ): if not dsp.settings.lm: raise AssertionError("No LM is loaded.") @@ -164,7 +163,7 @@ def generate_sc( completions = dsp.settings.lm(prompt, **kwargs) completions = extract_final_answer(example, completions, extract=extract) return majority_vote_( - completions, normalize=normalize, prediction_field=prediction_field + completions, normalize=normalize, prediction_field=prediction_field, ) @@ -180,14 +179,14 @@ def extract_final_answer(example, completions, extract=None): # TODO: make thread-safe? dsp.settings.lm.history.append( - {**dsp.settings.lm.history[-1], "completions": completions} + {**dsp.settings.lm.history[-1], "completions": completions}, ) return completions def majority( - completions: Completions, normalize: bool = True, field: Optional[str] = None + completions: Completions, normalize: bool = True, field: Optional[str] = None, ): """Returns the most common completion for the target field or the last field in the template.""" field = completions.template.fields[-1].output_variable if field is None else field @@ -231,7 +230,7 @@ def majority_vote_(completions: Completions, normalize: bool, prediction_field: pred = normalized_to_original[pred] dsp.settings.lm.history.append( - {**dsp.settings.lm.history[-1], "topk": topk, "completions": [pred]} + {**dsp.settings.lm.history[-1], "topk": topk, "completions": [pred]}, ) return [pred] diff --git a/dsp/primitives/primitives.py b/dsp/primitives/primitives.py index fd839dea25..a7851ea33d 100644 --- a/dsp/primitives/primitives.py +++ b/dsp/primitives/primitives.py @@ -1,5 +1,4 @@ import dsp -import copy from functools import wraps # applied right to left (innermost first, like function calls) diff --git a/dsp/primitives/search.py b/dsp/primitives/search.py index a2071327d8..1ca4b966dd 100644 --- a/dsp/primitives/search.py +++ b/dsp/primitives/search.py @@ -35,7 +35,7 @@ def retrieveRerankEnsemble(queries: list[str], k: int) -> list[str]: for idx in np.argsort(passages_cs_scores)[::-1]: psg = retrieved_passages[idx] passages[psg.long_text] = passages.get(psg.long_text, []) + [ - passages_cs_scores[idx] + passages_cs_scores[idx], ] passages = [(np.average(score), text) for text, score in passages.items()] diff --git a/dsp/templates/template_v2.py b/dsp/templates/template_v2.py index 656f490748..d6e61642ea 100644 --- a/dsp/templates/template_v2.py +++ b/dsp/templates/template_v2.py @@ -44,7 +44,7 @@ def __init__( variable = match.group(3) description = None else: - raise ValueError(f"Could not parse template") + raise ValueError("Could not parse template") var_match = re.match("(.*) -> (.*)", variable) if var_match is not None: @@ -61,7 +61,7 @@ def __init__( input_variable=input_variable, output_variable=output_variable, description=description, - ) + ), ) template = template[len(match.group(0)) :].strip() @@ -99,7 +99,7 @@ def format_handler(x): separator = '\n' if field.separator == ' ' and '\n' in formatted_value else field.separator result.append( - f"{field.name}{separator}{formatted_value}" + f"{field.name}{separator}{formatted_value}", ) if self._has_augmented_guidelines() and (example.get('augmented', False)): @@ -130,7 +130,7 @@ def _has_augmented_guidelines(self): ) def extract( - self, example: Union[Example, dict[str, Any]], raw_pred: str + self, example: Union[Example, dict[str, Any]], raw_pred: str, ) -> Example: """Extracts the answer from the LM raw prediction using the template structure diff --git a/dsp/utils/ann_utils.py b/dsp/utils/ann_utils.py index 106db06bff..dcd3f09ce1 100644 --- a/dsp/utils/ann_utils.py +++ b/dsp/utils/ann_utils.py @@ -3,10 +3,10 @@ try: import faiss from faiss import Index -except ImportError as e: +except ImportError: raise ImportError( "You need to install FAISS library to perform ANN/KNN. Please check the official doc: " - "https://github.com/facebookresearch/faiss/blob/main/INSTALL.md" + "https://github.com/facebookresearch/faiss/blob/main/INSTALL.md", ) @@ -48,7 +48,7 @@ def _get_ivf_index( n_objects: int, in_list_dist_type: str, centroid_dist_type: str, - encode_residuals: bool + encode_residuals: bool, ) -> Index: # according to the FAISS doc, this should be OK n_list = int(4 * (n_objects ** 0.5)) @@ -73,7 +73,7 @@ def _get_ivf_index( n_list, faiss.ScalarQuantizer.QT_fp16, # TODO: should be optional? centroid_metric, - encode_residuals + encode_residuals, ) return index @@ -85,7 +85,7 @@ def create_faiss_index( max_gpu_devices: int = 0, encode_residuals: bool = True, in_list_dist_type: str = 'L2', - centroid_dist_type: str = 'L2' + centroid_dist_type: str = 'L2', ) -> Index: """ Create IVF index (with IP or L2 dist), without adding data and training @@ -118,7 +118,7 @@ def create_faiss_index( n_objects=n_objects, in_list_dist_type=in_list_dist_type, centroid_dist_type=centroid_dist_type, - encode_residuals=encode_residuals + encode_residuals=encode_residuals, ) index.nprobe = n_probe diff --git a/dsp/utils/dpr.py b/dsp/utils/dpr.py index 39e6d7eca9..2174ea4da6 100644 --- a/dsp/utils/dpr.py +++ b/dsp/utils/dpr.py @@ -8,7 +8,7 @@ import unicodedata -class Tokens(object): +class Tokens: """A class to represent a list of tokenized text.""" TEXT = 0 TEXT_WS = 1 @@ -125,7 +125,7 @@ def entity_groups(self): return groups -class Tokenizer(object): +class Tokenizer: """Base tokenizer class. Tokenizers implement tokenize, which should return a Tokens class. """ @@ -151,7 +151,7 @@ def __init__(self, **kwargs): """ self._regexp = regex.compile( '(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS), - flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE + flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE, ) if len(kwargs.get('annotators', {})) > 0: logger.warning('%s only tokenizes! Skipping annotators: %s' % diff --git a/dsp/utils/settings.py b/dsp/utils/settings.py index ebf89b0fee..462f99d429 100644 --- a/dsp/utils/settings.py +++ b/dsp/utils/settings.py @@ -3,7 +3,7 @@ import threading -class Settings(object): +class Settings: """DSP configuration settings.""" _instance = None @@ -42,7 +42,7 @@ def __new__(cls): bypass_suggest=False, assert_failures=0, suggest_failures=0, - langchain_history=[] + langchain_history=[], ) cls._instance.__append(config) diff --git a/dsp/utils/utils.py b/dsp/utils/utils.py index 7e93933e44..aeba5c9700 100644 --- a/dsp/utils/utils.py +++ b/dsp/utils/utils.py @@ -27,7 +27,7 @@ def file_tqdm(file): print(f"#> Reading {file.name}") with tqdm.tqdm( - total=os.path.getsize(file.name) / 1024.0 / 1024.0, unit="MiB" + total=os.path.getsize(file.name) / 1024.0 / 1024.0, unit="MiB", ) as pbar: for line in file: yield line @@ -214,7 +214,7 @@ def lengths2offsets(lengths): # see https://stackoverflow.com/a/45187287 -class NullContextManager(object): +class NullContextManager: def __init__(self, dummy_resource=None): self.dummy_resource = dummy_resource diff --git a/dspy/datasets/dataloader.py b/dspy/datasets/dataloader.py index 2ac6358c4e..8900c107f5 100644 --- a/dspy/datasets/dataloader.py +++ b/dspy/datasets/dataloader.py @@ -3,7 +3,8 @@ from dspy.datasets import Dataset from datasets import load_dataset -from typing import Union, List, Mapping, Tuple +from typing import Union, List, Tuple +from collections.abc import Mapping class DataLoader(Dataset): def __init__(self,): @@ -15,13 +16,13 @@ def from_huggingface( *args, input_keys: Tuple[str] = (), fields: Tuple[str] = None, - **kwargs + **kwargs, ) -> Union[Mapping[str, List[dspy.Example]], List[dspy.Example]]: if fields and not isinstance(fields, tuple): - raise ValueError(f"Invalid fields provided. Please provide a tuple of fields.") + raise ValueError("Invalid fields provided. Please provide a tuple of fields.") if not isinstance(input_keys, tuple): - raise ValueError(f"Invalid input keys provided. Please provide a tuple of input keys.") + raise ValueError("Invalid input keys provided. Please provide a tuple of input keys.") dataset = load_dataset(dataset_name, *args, **kwargs) @@ -65,7 +66,7 @@ def sample( dataset: List[dspy.Example], n: int, *args, - **kwargs + **kwargs, ) -> List[dspy.Example]: if not isinstance(dataset, list): raise ValueError(f"Invalid dataset provided of type {type(dataset)}. Please provide a list of examples.") @@ -77,7 +78,7 @@ def train_test_split( dataset: List[dspy.Example], train_size: Union[int, float] = 0.75, test_size: Union[int, float] = None, - random_state: int = None + random_state: int = None, ) -> Mapping[str, List[dspy.Example]]: if random_state is not None: random.seed(random_state) diff --git a/dspy/datasets/gsm8k.py b/dspy/datasets/gsm8k.py index 0795518e6d..3b3514862d 100644 --- a/dspy/datasets/gsm8k.py +++ b/dspy/datasets/gsm8k.py @@ -2,7 +2,6 @@ import random from datasets import load_dataset -from dspy.datasets.dataset import Dataset class GSM8K: def __init__(self) -> None: diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 27358ed4fa..0be330375b 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -11,7 +11,6 @@ HTML = lambda x: x from concurrent.futures import ThreadPoolExecutor, as_completed -from dsp.utils import EM from dsp.evaluation.utils import * """ @@ -219,12 +218,12 @@ def configure_dataframe_display(df, metric_name): # Return styled DataFrame return df.style.set_table_styles([ {'selector': 'th', 'props': [('text-align', 'left')]}, - {'selector': 'td', 'props': [('text-align', 'left')]} + {'selector': 'td', 'props': [('text-align', 'left')]}, ]).set_properties(**{ 'text-align': 'left', 'white-space': 'pre-wrap', 'word-wrap': 'break-word', - 'max-width': '400px' + 'max-width': '400px', }) # FIXME: TODO: The merge_dicts stuff above is way too quick and dirty. diff --git a/dspy/evaluate/metrics.py b/dspy/evaluate/metrics.py index b965b33504..79c6208af6 100644 --- a/dspy/evaluate/metrics.py +++ b/dspy/evaluate/metrics.py @@ -1,7 +1,6 @@ # TODO: This should move internally. Same for passage_match. dspy.metrics.answer_exact_match, dspy.metrics.answer_passage_match import dsp -from dsp.utils import EM, normalize_text def answer_exact_match(example, pred, trace=None, frac=1.0): assert(type(example.answer) is str or type(example.answer) is list) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index d6e2e3dba3..d1d9bf7cac 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -1,6 +1,10 @@ -import inspect, os, openai, dspy, typing, pydantic -from typing import Annotated, List, Tuple +import inspect +import os +import openai +import dspy import typing +import pydantic +from typing import Annotated, List, Tuple from dsp.templates import passages2text import json @@ -58,7 +62,7 @@ def TypedChainOfThought(signature): prefix="Reasoning: Let's think step by step in order to", desc="${produce the " + output_keys + "}. We ...", ), - ) + ), ) @@ -78,7 +82,7 @@ def _make_example(type_): dspy.Signature( "json_schema -> json_object", "Make a very succinct json object that validates with the following schema", - ) + ), )(json_schema=json.dumps(type_.model_json_schema())).json_object # We use the model_validate_json method to make sure the example is valid try: @@ -120,7 +124,7 @@ def _prepare_signature(self): name, desc=field.json_schema_extra.get("desc", "") + ( - f". Respond with a single JSON object. JSON Schema: " + ". Respond with a single JSON object. JSON Schema: " + json.dumps(type_.model_json_schema()) ), format=lambda x: (x if isinstance(x, str) else wrap(x).model_dump_json()), @@ -164,7 +168,7 @@ def forward(self, **kwargs): if try_i + 1 < MAX_RETRIES and prefix not in current_desc: if example := self._make_example(field.annotation): signature = signature.with_updated_fields( - name, desc=current_desc + "\n" + prefix + example + "\n" + suffix + name, desc=current_desc + "\n" + prefix + example + "\n" + suffix, ) if errors: # Add new fields for each error @@ -173,7 +177,7 @@ def forward(self, **kwargs): signature = signature.append( f"error_{name}_{try_i}", dspy.InputField( - prefix=f"Past Error " + (f"({name}):" if try_i == 0 else f"({name}, {try_i+1}):"), + prefix="Past Error " + (f"({name}):" if try_i == 0 else f"({name}, {try_i+1}):"), desc="An error to avoid in the future", ), ) @@ -183,7 +187,7 @@ def forward(self, **kwargs): setattr(result, name, value) return result raise ValueError( - "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", errors + "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", errors, ) diff --git a/dspy/predict/chain_of_thought.py b/dspy/predict/chain_of_thought.py index d49c981921..f374aaabe5 100644 --- a/dspy/predict/chain_of_thought.py +++ b/dspy/predict/chain_of_thought.py @@ -1,7 +1,8 @@ -import dsp, dspy +import dsp +import dspy from dspy.signatures.signature import ensure_signature -from .predict import Predict, signature_to_template +from .predict import Predict # TODO: FIXME: Insert this right before the *first* output field. Also rewrite this to use the new signature system. diff --git a/dspy/predict/chain_of_thought_with_hint.py b/dspy/predict/chain_of_thought_with_hint.py index 83d5b5b4b4..6f78566321 100644 --- a/dspy/predict/chain_of_thought_with_hint.py +++ b/dspy/predict/chain_of_thought_with_hint.py @@ -1,4 +1,5 @@ -import dsp, dspy +import dsp +import dspy from .predict import Predict diff --git a/dspy/predict/multi_chain_comparison.py b/dspy/predict/multi_chain_comparison.py index 89fc732979..ad69b646ee 100644 --- a/dspy/predict/multi_chain_comparison.py +++ b/dspy/predict/multi_chain_comparison.py @@ -3,7 +3,6 @@ from .predict import Predict from ..primitives.program import Module -import dsp class MultiChainComparison(Module): @@ -19,7 +18,7 @@ def __init__(self, signature, M=3, temperature=0.7, **config): signature = signature.append( f"reasoning_attempt_{idx+1}", dspy.InputField( - prefix=f"Student Attempt #{idx+1}:", desc="${reasoning attempt}" + prefix=f"Student Attempt #{idx+1}:", desc="${reasoning attempt}", ), ) @@ -40,7 +39,7 @@ def forward(self, completions, **kwargs): rationale = c.rationale.strip().split("\n")[0].strip() answer = c[self.last_key].strip().split("\n")[0].strip() attempts.append( - f"«I'm trying to {rationale} I'm not sure but my prediction is {answer}»" + f"«I'm trying to {rationale} I'm not sure but my prediction is {answer}»", ) assert len(attempts) == self.M, len(attempts) diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index 0123122310..60e668a836 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -60,10 +60,10 @@ def forward(self, **kwargs): assert lm is not None, "No LM is loaded." # If temperature is 0.0 but its n > 1, set temperature to 0.7. - temperature = config.get("temperature", None) + temperature = config.get("temperature") temperature = lm.kwargs["temperature"] if temperature is None else temperature - num_generations = config.get("n", None) + num_generations = config.get("n") if num_generations is None: num_generations = lm.kwargs.get("n", lm.kwargs.get("num_generations", None)) @@ -103,7 +103,7 @@ def forward(self, **kwargs): for field in template.fields: if field.output_variable not in kwargs.keys(): completions[-1][field.output_variable] = getattr( - c, field.output_variable + c, field.output_variable, ) pred = Prediction.from_completions(completions, signature=signature) diff --git a/dspy/predict/program_of_thought.py b/dspy/predict/program_of_thought.py index 516c1129cf..f7e80eed71 100644 --- a/dspy/predict/program_of_thought.py +++ b/dspy/predict/program_of_thought.py @@ -1,4 +1,3 @@ -import dsp import dspy from dspy.signatures.signature import ensure_signature from ..primitives.program import Module @@ -16,23 +15,23 @@ def __init__(self, signature, max_iters=3): self.output_fields = signature.output_fields inputs_ = ", ".join( - [f"`{field_name}`" for field_name in self.input_fields.keys()] + [f"`{field_name}`" for field_name in self.input_fields.keys()], ) outputs_ = ", ".join( - [f"`{field_name}`" for field_name in self.output_fields.keys()] + [f"`{field_name}`" for field_name in self.output_fields.keys()], ) assert len(self.output_fields) == 1, "PoT only supports one output field." instr = [] instr.append( - f"You will be given {inputs_} and you will respond with {outputs_}." + f"You will be given {inputs_} and you will respond with {outputs_}.", ) instr.append( - f"Generating executable Python code that programmatically computes the correct {outputs_}." + f"Generating executable Python code that programmatically computes the correct {outputs_}.", ) instr.append( - f"After you're done with the computation, make sure the last line in your code evaluates to the correct value for {outputs_}." + f"After you're done with the computation, make sure the last line in your code evaluates to the correct value for {outputs_}.", ) instr = "\n".join(instr) @@ -40,19 +39,19 @@ def __init__(self, signature, max_iters=3): dspy.Signature( self._generate_signature("generate").fields, self._generate_instruction("generate"), - ) + ), ) self.code_regenerate = dspy.ChainOfThought( dspy.Signature( self._generate_signature("regenerate").fields, self._generate_instruction("regenerate"), - ) + ), ) self.generate_answer = dspy.ChainOfThought( dspy.Signature( self._generate_signature("answer").fields, self._generate_instruction("answer"), - ) + ), ) def _generate_signature(self, mode): @@ -63,7 +62,7 @@ def _generate_signature(self, mode): prefix="Code:", desc="python code that answers the question", format=str, - ) + ), }, "regenerate": { "previous_code": dspy.InputField( @@ -102,13 +101,13 @@ def _generate_instruction(self, mode): [ f"`{field_name}`" for field_name in self._generate_signature(mode).input_fields - ] + ], ) mode_outputs = ", ".join( [ f"`{field_name}`" for field_name in self._generate_signature(mode).output_fields - ] + ], ) if mode == "generate": instr = [ @@ -123,7 +122,7 @@ def _generate_instruction(self, mode): ] else: # mode == 'answer' instr = [ - f"Given the final code {mode_inputs}, provide the final {mode_outputs}." + f"Given the final code {mode_inputs}, provide the final {mode_outputs}.", ] return "\n".join(instr) @@ -144,10 +143,10 @@ def parse_code(self, code_data): code_block += "\n" + last_line_match.group(1) else: code_block = re.sub( - r"([a-zA-Z_]\w* *=.*?)(?=[a-zA-Z_]\w* *=)", r"\1\n", code_block + r"([a-zA-Z_]\w* *=.*?)(?=[a-zA-Z_]\w* *=)", r"\1\n", code_block, ) code_block = re.sub( - r"([a-zA-Z_]\w* *=.*?)([a-zA-Z_]\w*)$", r"\1\n\2", code_block + r"([a-zA-Z_]\w* *=.*?)([a-zA-Z_]\w*)$", r"\1\n\2", code_block, ) return code_block, None @@ -171,7 +170,7 @@ def forward(self, **kwargs): while hop < self.max_iters and error: print("Error in code execution") code_data = self.code_regenerate( - question=kwargs["question"], previous_code=code, error=error + question=kwargs["question"], previous_code=code, error=error, ) parsed_code, error = self.parse_code(code_data) # FIXME: Don't try to execute the code if it didn't parse @@ -181,6 +180,6 @@ def forward(self, **kwargs): print("Max hops reached. Error persists.") return None answer_gen_result = self.generate_answer( - question=kwargs["question"], final_generated_code=code, code_output=output + question=kwargs["question"], final_generated_code=code, code_output=output, ) return answer_gen_result diff --git a/dspy/predict/react.py b/dspy/predict/react.py index ef24c6aca0..cf3be8b73c 100644 --- a/dspy/predict/react.py +++ b/dspy/predict/react.py @@ -40,7 +40,7 @@ def __init__(self, signature, max_iters=5, num_results=3, tools=None): for idx, tool in enumerate(self.tools): tool = self.tools[tool] instr.append( - f"({idx+1}) {tool.name}[{tool.input_variable}], which {tool.desc}" + f"({idx+1}) {tool.name}[{tool.input_variable}], which {tool.desc}", ) instr = "\n".join(instr) @@ -65,7 +65,7 @@ def _generate_signature(self, iters): f"{tool.name}[{tool.input_variable}]" for tool in self.tools.values() if tool.name != "Finish" - ] + ], ) signature_dict[f"Action_{j}"] = dspy.OutputField( prefix=f"Action {j}:", diff --git a/dspy/primitives/assertions.py b/dspy/primitives/assertions.py index feb54005c3..140f8bf6a2 100644 --- a/dspy/primitives/assertions.py +++ b/dspy/primitives/assertions.py @@ -1,5 +1,5 @@ import inspect -from typing import Any, Callable +from typing import Any import dsp import dspy import logging @@ -16,7 +16,7 @@ def setup_logger(): fileHandler.setLevel(logging.DEBUG) formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + "%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) fileHandler.setFormatter(formatter) @@ -80,7 +80,7 @@ def __init__( class Constraint: def __init__( - self, result: bool, msg: str = "", target_module=None, is_metric: bool = False + self, result: bool, msg: str = "", target_module=None, is_metric: bool = False, ): self.id = str(uuid.uuid4()) self.result = result @@ -187,7 +187,7 @@ def assert_no_except_handler(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) - except DSPyAssertionError as e: + except DSPyAssertionError: return None return wrapper @@ -216,7 +216,7 @@ def wrapper(*args, **kwargs): if i > 0 and dspy.settings.backtrack_to is not None: # generate values for new fields feedback_msg = _build_error_msg( - dspy.settings.predictor_feedbacks[dspy.settings.backtrack_to] + dspy.settings.predictor_feedbacks[dspy.settings.backtrack_to], ) dspy.settings.backtrack_to_args = { @@ -274,7 +274,7 @@ def wrapper(*args, **kwargs): if ( error_msg not in dspy.settings.predictor_feedbacks.setdefault( - dspy.settings.backtrack_to, [] + dspy.settings.backtrack_to, [], ) ): dspy.settings.predictor_feedbacks[ @@ -286,7 +286,7 @@ def wrapper(*args, **kwargs): past_outputs = {} for field_name in output_fields.keys(): past_outputs[field_name] = getattr( - error_state[2], field_name, None + error_state[2], field_name, None, ) # save latest failure trace for predictor per suggestion @@ -297,7 +297,7 @@ def wrapper(*args, **kwargs): else: logger.error( - f"UNREACHABLE: No trace available, this should not happen. Is this run time?" + "UNREACHABLE: No trace available, this should not happen. Is this run time?", ) return result @@ -323,37 +323,37 @@ def forward(self, *args, **kwargs): def assert_transform_module( - module, assertion_handler=default_assertion_handler, **handler_args + module, assertion_handler=default_assertion_handler, **handler_args, ): """ Transform a module to handle assertions. """ if not getattr(module, "forward", False): raise ValueError( - "Module must have a forward method to have assertions handled." + "Module must have a forward method to have assertions handled.", ) if getattr(module, "_forward", False): logger.info( - f"Module {module.__class__.__name__} already has a _forward method. Skipping..." + f"Module {module.__class__.__name__} already has a _forward method. Skipping...", ) pass # TODO warning: might be overwriting a previous _forward method module._forward = module.forward module.forward = handle_assert_forward(assertion_handler, **handler_args).__get__( - module + module, ) if all( - map(lambda p: isinstance(p[1], dspy.retry.Retry), module.named_predictors()) + map(lambda p: isinstance(p[1], dspy.retry.Retry), module.named_predictors()), ): pass # we already applied the Retry mapping outside elif all( - map(lambda p: not isinstance(p[1], dspy.retry.Retry), module.named_predictors()) + map(lambda p: not isinstance(p[1], dspy.retry.Retry), module.named_predictors()), ): module.map_named_predictors(dspy.retry.Retry) else: raise RuntimeError("Module has mixed predictors, can't apply Retry mapping.") - setattr(module, "_assert_transformed", True) + module._assert_transformed = True return module diff --git a/dspy/primitives/example.py b/dspy/primitives/example.py index dbc16c1e1c..55b06ad8d1 100644 --- a/dspy/primitives/example.py +++ b/dspy/primitives/example.py @@ -1,4 +1,3 @@ -import copy class Example: def __init__(self, base=None, **kwargs): diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 03543f4c80..1b4a342f71 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -67,5 +67,5 @@ def save(self, path): f.write(ujson.dumps(self.dump_state(), indent=2)) def load(self, path): - with open(path, "r") as f: + with open(path) as f: self.load_state(ujson.loads(f.read())) diff --git a/dspy/primitives/program.py b/dspy/primitives/program.py index 1e85a35263..5e063bfb00 100644 --- a/dspy/primitives/program.py +++ b/dspy/primitives/program.py @@ -1,5 +1,3 @@ -import copy -import inspect from dspy.primitives.module import BaseModule from dspy.primitives.assertions import * diff --git a/dspy/primitives/python_interpreter.py b/dspy/primitives/python_interpreter.py index 11ae2795a7..fd6166db5e 100644 --- a/dspy/primitives/python_interpreter.py +++ b/dspy/primitives/python_interpreter.py @@ -16,19 +16,15 @@ import importlib import re import typing -import inspect from typing import ( Any, - Callable, Dict, - Mapping, List, Optional, Set, Tuple, - TypeVar, - Union, ) +from collections.abc import Mapping import builtins @@ -40,7 +36,7 @@ class InterpreterError(ValueError): pass -class PythonInterpreter(): +class PythonInterpreter: r"""A customized python interpreter to control the execution of LLM-generated codes. The interpreter makes sure the code can only execute functions given in action space and import white list. It also supports @@ -572,7 +568,7 @@ def set_code_type(self, code_type: str) -> None: def execute( self, interpreter: Optional[PythonInterpreter] = None, - user_variable: Optional[Dict[str, Any]] = None + user_variable: Optional[Dict[str, Any]] = None, ) -> Tuple[Any, PythonInterpreter]: r"""Executes the code string by a given python interpreter. diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index 1338d48d52..b4fc0bcf98 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -20,7 +20,7 @@ from chromadb.utils import embedding_functions from chromadb.api.types import ( Embeddable, - EmbeddingFunction + EmbeddingFunction, ) import chromadb.utils.embedding_functions as ef except ImportError: @@ -28,7 +28,7 @@ if chromadb is None: raise ImportError( - "The chromadb library is required to use ChromadbRM. Install it with `pip install dspy-ai[chromadb]`" + "The chromadb library is required to use ChromadbRM. Install it with `pip install dspy-ai[chromadb]`", ) @@ -97,7 +97,7 @@ def _init_chromadb( Settings( persist_directory=persist_directory, is_persistent=True, - ) + ), ) self._chromadb_collection = self._chromadb_client.get_or_create_collection( name=collection_name, @@ -120,7 +120,7 @@ def _get_embeddings(self, queries: List[str]) -> List[List[float]]: return self.ef(queries) def forward( - self, query_or_queries: Union[str, List[str]], k: Optional[int] = None + self, query_or_queries: Union[str, List[str]], k: Optional[int] = None, ) -> dspy.Prediction: """Search with db for self.k top passages for query @@ -140,7 +140,7 @@ def forward( k = self.k if k is None else k results = self._chromadb_collection.query( - query_embeddings=embeddings, n_results=k + query_embeddings=embeddings, n_results=k, ) passages = [dotdict({"long_text": x}) for x in results["documents"][0]] diff --git a/dspy/retrieve/clarifai_rm.py b/dspy/retrieve/clarifai_rm.py index 37846cfadc..654234d2cf 100644 --- a/dspy/retrieve/clarifai_rm.py +++ b/dspy/retrieve/clarifai_rm.py @@ -12,7 +12,7 @@ from clarifai.client.search import Search except ImportError as err: raise ImportError( - "Clarifai is not installed. Install it using `pip install clarifai`" + "Clarifai is not installed. Install it using `pip install clarifai`", ) from err @@ -45,7 +45,7 @@ def __init__( ) self.k = k self.clarifai_search = Search( - user_id=self.user_id, app_id=self.app_id, top_k=k, pat=self.pat + user_id=self.user_id, app_id=self.app_id, top_k=k, pat=self.pat, ) super().__init__(k=k) @@ -57,7 +57,7 @@ def retrieve_hits(self, hits): return requested_text def forward( - self, query_or_queries: Union[str, List[str]], k: Optional[int] = None + self, query_or_queries: Union[str, List[str]], k: Optional[int] = None, ) -> dspy.Prediction: """Uses clarifai-python SDK search function and retrieves top_k similar passages for given query, Args: diff --git a/dspy/retrieve/databricks_rm.py b/dspy/retrieve/databricks_rm.py index af2d42ff21..74066ec5b9 100644 --- a/dspy/retrieve/databricks_rm.py +++ b/dspy/retrieve/databricks_rm.py @@ -1,7 +1,7 @@ import dspy import os import requests -from typing import Union, List, Optional +from typing import Union, List from collections import defaultdict from dspy.primitives.prediction import Prediction @@ -91,11 +91,11 @@ def forward(self, query: Union[str, List[float]], query_type: str = 'vector') -> """ headers = { "Authorization": f"Bearer {self.databricks_token}", - "Content-Type": "application/json" + "Content-Type": "application/json", } payload = { "columns": self.columns, - "num_results": self.k + "num_results": self.k, } if query_type == 'vector': if not isinstance(query, list): @@ -112,7 +112,7 @@ def forward(self, query: Union[str, List[float]], query_type: str = 'vector') -> response = requests.post( f"{self.databricks_endpoint}/api/2.0/vector-search/indexes/{self.databricks_index_name}/query", json=payload, - headers=headers + headers=headers, ) results = response.json() diff --git a/dspy/retrieve/deeplake_rm.py b/dspy/retrieve/deeplake_rm.py index 37b008ee7d..87beb48565 100644 --- a/dspy/retrieve/deeplake_rm.py +++ b/dspy/retrieve/deeplake_rm.py @@ -2,7 +2,7 @@ Retriever model for deeplake """ -from typing import Optional, List, Union, Type +from typing import Optional, List, Union import openai import dspy from collections import defaultdict @@ -59,7 +59,7 @@ def __init__( from deeplake import VectorStore except ImportError: raise ImportError( - "The 'deeplake' extra is required to use DeepLakeRM. Install it with `pip install dspy-ai[deeplake]`" + "The 'deeplake' extra is required to use DeepLakeRM. Install it with `pip install dspy-ai[deeplake]`", ) self._deeplake_vectorstore_name = deeplake_vectorstore_name self._deeplake_client = deeplake_client @@ -77,7 +77,7 @@ def embedding_function(self, texts, model="text-embedding-ada-002"): ] def forward( - self, query_or_queries: Union[str, List[str]], k: Optional[int] + self, query_or_queries: Union[str, List[str]], k: Optional[int], ) -> dspy.Prediction: """Search with DeepLake for self.k top passages for query @@ -103,7 +103,7 @@ def forward( for query in queries: results = self._deeplake_client( path=self._deeplake_vectorstore_name, - embedding_function=self.embedding_function + embedding_function=self.embedding_function, ).search(query, k=k) for score,text in zip(results.get('score',0.0),results.get('text',"")): diff --git a/dspy/retrieve/marqo_rm.py b/dspy/retrieve/marqo_rm.py index d85aa3c87c..a090e47f52 100644 --- a/dspy/retrieve/marqo_rm.py +++ b/dspy/retrieve/marqo_rm.py @@ -7,7 +7,7 @@ import marqo except ImportError: raise ImportError( - "The 'marqo' extra is required to use MarqoRM. Install it with `pip install dspy-ai[marqo]`" + "The 'marqo' extra is required to use MarqoRM. Install it with `pip install dspy-ai[marqo]`", ) class MarqoRM(dspy.Retrieve): @@ -48,7 +48,7 @@ def __init__( marqo_client: marqo.client.Client, k: int = 3, page_content: str = 'document', - filter_string: str = None + filter_string: str = None, ): self._marqo_index_name = marqo_index_name self._marqo_client = marqo_client @@ -80,7 +80,7 @@ def forward(self, query_or_queries: Union[str, List[str]], k=None, **kwargs) -> q=query, limit=limit, filter_string=self.filter_string, - **kwargs + **kwargs, ) all_query_results.append(_result) diff --git a/dspy/retrieve/mongodb_atlas_rm.py b/dspy/retrieve/mongodb_atlas_rm.py index 6577a6c237..70e7847303 100644 --- a/dspy/retrieve/mongodb_atlas_rm.py +++ b/dspy/retrieve/mongodb_atlas_rm.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Union, Any +from typing import List, Any import dspy import os from openai import ( @@ -21,12 +21,12 @@ ) except ImportError: raise ImportError( - "Please install the pymongo package by running `pip install dspy-ai[mongodb]`" + "Please install the pymongo package by running `pip install dspy-ai[mongodb]`", ) def build_vector_search_pipeline( - index_name: str, query_vector: List[float], num_candidates: int, limit: int + index_name: str, query_vector: List[float], num_candidates: int, limit: int, ) -> List[dict[str, Any]]: return [ { @@ -36,7 +36,7 @@ def build_vector_search_pipeline( "queryVector": query_vector, "numCandidates": num_candidates, "limit": limit, - } + }, }, {"$project": {"_id": 0, "text": 1, "score": {"$meta": "vectorSearchScore"}}}, ] @@ -92,7 +92,7 @@ def __init__( try: self.client = MongoClient( f"mongodb+srv://{self.username}:{self.password}@{self.cluster_url}/{self.db_name}" - "?retryWrites=true&w=majority" + "?retryWrites=true&w=majority", ) except ( InvalidURI, diff --git a/dspy/retrieve/pgvector_rm.py b/dspy/retrieve/pgvector_rm.py index da28b4269b..8403efb2e3 100644 --- a/dspy/retrieve/pgvector_rm.py +++ b/dspy/retrieve/pgvector_rm.py @@ -1,6 +1,6 @@ import dspy import openai -from typing import List, Union, Optional +from typing import List, Optional try: from pgvector.psycopg2 import register_vector @@ -8,7 +8,7 @@ from psycopg2 import sql except ImportError: raise ImportError( - "The 'pgvector' extra is required to use PgVectorRM. Install it with `pip install dspy-ai[pgvector]`" + "The 'pgvector' extra is required to use PgVectorRM. Install it with `pip install dspy-ai[pgvector]`", ) @@ -61,7 +61,7 @@ def __init__( openai_client: openai.OpenAI, k: Optional[int]=20, embedding_field: str = "embedding", - fields: List[str] = ['text'] + fields: List[str] = ['text'], ): """ k = 20 is the number of paragraphs to retrieve @@ -89,7 +89,7 @@ def forward(self, query: str, k: Optional[int]=20): query_embedding = self.openai_client.embeddings.create( model="text-embedding-ada-002", input=query, - encoding_format="float" + encoding_format="float", ).data[0].embedding related_paragraphs = [] @@ -101,7 +101,7 @@ def forward(self, query: str, k: Optional[int]=20): for f in self.fields ]), table=sql.Identifier(self.pg_table_name), - embedding_field=sql.Identifier(self.embedding_field) + embedding_field=sql.Identifier(self.embedding_field), ) with self.conn as conn: diff --git a/dspy/retrieve/pinecone_rm.py b/dspy/retrieve/pinecone_rm.py index 65645f8cd7..b520ed5fac 100644 --- a/dspy/retrieve/pinecone_rm.py +++ b/dspy/retrieve/pinecone_rm.py @@ -15,7 +15,7 @@ if pinecone is None: raise ImportError( - "The pinecone library is required to use PineconeRM. Install it with `pip install dspy-ai[pinecone]`" + "The pinecone library is required to use PineconeRM. Install it with `pip install dspy-ai[pinecone]`", ) import openai @@ -81,7 +81,7 @@ def __init__( from transformers import AutoModel, AutoTokenizer except ImportError as exc: raise ModuleNotFoundError( - "You need to install Hugging Face transformers library to use a local embedding model with PineconeRM." + "You need to install Hugging Face transformers library to use a local embedding model with PineconeRM.", ) from exc self._local_embed_model = AutoModel.from_pretrained(local_embed_model) @@ -90,7 +90,7 @@ def __init__( self.device = torch.device( 'cuda:0' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() - else 'cpu' + else 'cpu', ) elif openai_embed_model is not None: self._openai_embed_model = openai_embed_model @@ -102,11 +102,11 @@ def __init__( openai.organization = openai_org else: raise ValueError( - "Either local_embed_model or openai_embed_model must be provided." + "Either local_embed_model or openai_embed_model must be provided.", ) self._pinecone_index = self._init_pinecone( - pinecone_index_name, pinecone_api_key, pinecone_env + pinecone_index_name, pinecone_api_key, pinecone_env, ) super().__init__(k=k) @@ -145,7 +145,7 @@ def _init_pinecone( if index_name not in active_indexes: if dimension is None and distance_metric is None: raise ValueError( - "dimension and distance_metric must be provided since the index provided does not exist." + "dimension and distance_metric must be provided since the index provided does not exist.", ) pinecone.create_index( @@ -159,13 +159,13 @@ def _init_pinecone( def _mean_pooling( self, model_output, - attention_mask + attention_mask, ): try: import torch except ImportError as exc: raise ModuleNotFoundError( - "You need to install torch to use a local embedding model with PineconeRM." + "You need to install torch to use a local embedding model with PineconeRM.", ) from exc token_embeddings = model_output[0] # First element of model_output contains all token embeddings @@ -179,7 +179,7 @@ def _mean_pooling( ) def _get_embeddings( self, - queries: List[str] + queries: List[str], ) -> List[List[float]]: """Return query vector after creating embedding using OpenAI @@ -193,17 +193,17 @@ def _get_embeddings( import torch except ImportError as exc: raise ModuleNotFoundError( - "You need to install torch to use a local embedding model with PineconeRM." + "You need to install torch to use a local embedding model with PineconeRM.", ) from exc if not self.use_local_model: if OPENAI_LEGACY: embedding = openai.Embedding.create( - input=queries, model=self._openai_embed_model + input=queries, model=self._openai_embed_model, ) else: embedding = openai.embeddings.create( - input=queries, model=self._openai_embed_model + input=queries, model=self._openai_embed_model, ).model_dump() return [embedding["embedding"] for embedding in embedding["data"]] @@ -239,12 +239,12 @@ def forward(self, query_or_queries: Union[str, List[str]]) -> dspy.Prediction: # For single query, just look up the top k passages if len(queries) == 1: results_dict = self._pinecone_index.query( - embeddings[0], top_k=self.k, include_metadata=True + embeddings[0], top_k=self.k, include_metadata=True, ) # Sort results by score sorted_results = sorted( - results_dict["matches"], key=lambda x: x.get("scores", 0.0), reverse=True + results_dict["matches"], key=lambda x: x.get("scores", 0.0), reverse=True, ) passages = [result["metadata"]["text"] for result in sorted_results] passages = [dotdict({"long_text": passage for passage in passages})] @@ -255,7 +255,7 @@ def forward(self, query_or_queries: Union[str, List[str]]) -> dspy.Prediction: passage_scores = {} for embedding in embeddings: results_dict = self._pinecone_index.query( - embedding, top_k=self.k * 3, include_metadata=True + embedding, top_k=self.k * 3, include_metadata=True, ) for result in results_dict["matches"]: passage_scores[result["metadata"]["text"]] = ( @@ -264,6 +264,6 @@ def forward(self, query_or_queries: Union[str, List[str]]) -> dspy.Prediction: ) sorted_passages = sorted( - passage_scores.items(), key=lambda x: x[1], reverse=True + passage_scores.items(), key=lambda x: x[1], reverse=True, )[: self.k] return dspy.Prediction(passages=[dotdict({"long_text": passage}) for passage, _ in sorted_passages]) diff --git a/dspy/retrieve/qdrant_rm.py b/dspy/retrieve/qdrant_rm.py index 61ca0f3ad7..46977a2524 100644 --- a/dspy/retrieve/qdrant_rm.py +++ b/dspy/retrieve/qdrant_rm.py @@ -8,7 +8,7 @@ import fastembed except ImportError: raise ImportError( - "The 'qdrant' extra is required to use QdrantRM. Install it with `pip install dspy-ai[qdrant]`" + "The 'qdrant' extra is required to use QdrantRM. Install it with `pip install dspy-ai[qdrant]`", ) diff --git a/dspy/retrieve/vectara_rm.py b/dspy/retrieve/vectara_rm.py index 8aec7dcd4e..4d72ed7e87 100644 --- a/dspy/retrieve/vectara_rm.py +++ b/dspy/retrieve/vectara_rm.py @@ -76,7 +76,7 @@ def _vectara_query( corpus_key = { "customerId": self._vectara_customer_id, "corpusId": self._vectara_corpus_id, - "lexicalInterpolationConfig": {"lambda": 0.025 } + "lexicalInterpolationConfig": {"lambda": 0.025 }, } data = { @@ -92,8 +92,8 @@ def _vectara_query( "endTag": END_SNIPPET, }, "corpusKey": [corpus_key], - } - ] + }, + ], } headers = { @@ -124,7 +124,7 @@ def _vectara_query( res = [ { "text": remove_snippet(x["text"]), - "score": x["score"] + "score": x["score"], } for x in responses ] return res diff --git a/dspy/retrieve/weaviate_rm.py b/dspy/retrieve/weaviate_rm.py index 7a8f3f7d77..1ef7950c1f 100644 --- a/dspy/retrieve/weaviate_rm.py +++ b/dspy/retrieve/weaviate_rm.py @@ -1,4 +1,3 @@ -from collections import defaultdict from typing import List, Union import dspy from dsp.utils import dotdict @@ -8,7 +7,7 @@ import weaviate except ImportError: raise ImportError( - "The 'weaviate' extra is required to use WeaviateRM. Install it with `pip install dspy-ai[weaviate]`" + "The 'weaviate' extra is required to use WeaviateRM. Install it with `pip install dspy-ai[weaviate]`", ) @@ -47,7 +46,7 @@ def __init__(self, weaviate_collection_name: str, weaviate_client: weaviate.Client, k: int = 3, - weaviate_collection_text_key: Optional[str] = "content" + weaviate_collection_text_key: Optional[str] = "content", ): self._weaviate_collection_name = weaviate_collection_name self._weaviate_client = weaviate_client diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index a5f6b976a7..7cd2f170cb 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -56,7 +56,7 @@ def _validate_fields(cls): field_type = extra.get("__dspy_field_type") if field_type not in ["input", "output"]: raise TypeError( - f"Field '{name}' in '{cls.__name__}' must be declared with InputField or OutputField." + f"Field '{name}' in '{cls.__name__}' must be declared with InputField or OutputField.", ) @property @@ -179,14 +179,14 @@ def __call__( fixed_fields = {} for name, type_field in fields.items(): assert isinstance( - name, str + name, str, ), f"Field names must be strings, not {type(name)}" if isinstance(type_field, FieldInfo): type_ = type_field.annotation field = type_field else: assert isinstance( - type_field, tuple + type_field, tuple, ), f"Field values must be tuples, not {type(type_field)}" type_, field = type_field # It might be better to be explicit about the type, but it currently would break @@ -194,10 +194,10 @@ def __call__( if type_ is None: type_ = str assert isinstance(type_, type) or isinstance( - typing.get_origin(type_), type + typing.get_origin(type_), type, ), f"Field types must be types, not {type(type_)}" assert isinstance( - field, FieldInfo + field, FieldInfo, ), f"Field values must be Field instances, not {type(field)}" fixed_fields[name] = (type_, field) @@ -266,10 +266,10 @@ def infer_prefix(attribute_name: str) -> str: # Insert underscores around numbers to ensure spaces in the final output with_underscores_around_numbers = re.sub( - r"([a-zA-Z])(\d)", r"\1_\2", intermediate_name + r"([a-zA-Z])(\d)", r"\1_\2", intermediate_name, ) with_underscores_around_numbers = re.sub( - r"(\d)([a-zA-Z])", r"\1_\2", with_underscores_around_numbers + r"(\d)([a-zA-Z])", r"\1_\2", with_underscores_around_numbers, ) # Convert snake_case to 'Proper Title Case', but ensure acronyms are uppercased diff --git a/dspy/teleprompt/bootstrap.py b/dspy/teleprompt/bootstrap.py index 46b2825645..5aaefa271f 100644 --- a/dspy/teleprompt/bootstrap.py +++ b/dspy/teleprompt/bootstrap.py @@ -4,14 +4,12 @@ import threading import dspy -from dspy.predict.retry import Retry from dspy.primitives import Example from .teleprompt import Teleprompter from .vanilla import LabeledFewShot -from dspy.evaluate.evaluate import Evaluate # TODO: metrics should return an object with __bool__ basically, but fine if they're more complex. # They can also be sortable. @@ -58,8 +56,8 @@ def compile(self, student, *, teacher=None, trainset, valset=None): self.student._compiled = True # set assert_failures and suggest_failures as attributes of student w/ value 0 - setattr(self.student, '_assert_failures', 0) - setattr(self.student, '_suggest_failures', 0) + self.student._assert_failures = 0 + self.student._suggest_failures = 0 return self.student @@ -204,7 +202,6 @@ def _train(self): raw_demos = rng.sample(raw_demos, sample_size) - import dspy if dspy.settings.release >= 20230928: predictor.demos = raw_demos + augmented_demos else: diff --git a/dspy/teleprompt/ensemble.py b/dspy/teleprompt/ensemble.py index f52dc10214..5e0db9bcac 100644 --- a/dspy/teleprompt/ensemble.py +++ b/dspy/teleprompt/ensemble.py @@ -1,5 +1,3 @@ -import dsp -import tqdm import random from dspy.teleprompt.teleprompt import Teleprompter diff --git a/dspy/teleprompt/finetune.py b/dspy/teleprompt/finetune.py index a56adaecbe..29b7887ab5 100644 --- a/dspy/teleprompt/finetune.py +++ b/dspy/teleprompt/finetune.py @@ -1,7 +1,6 @@ import os import time import dsp -import tqdm import random import ujson @@ -130,7 +129,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, 'batch_size': bsize, 'epochs': epochs, 'gradient_accumulation_steps': accumsteps, # 2, - 'lr': lr + 'lr': lr, } compiler_config['save'] = os.path.join(path_prefix, compiler_config['save']) if path_prefix else compiler_config['save'] diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index da5bf24ce6..3f7f2cd636 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -1,5 +1,3 @@ -import dsp -import tqdm import random from dspy.teleprompt.teleprompt import Teleprompter diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index f38eccf1a2..3163acf3f2 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -60,11 +60,11 @@ def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_tem def _check_candidates_equal(self, candidate1, candidate2): for p1, p2 in zip(candidate1["program"].predictors(), candidate2["program"].predictors()): - if not p1.extended_signature.instructions == p2.extended_signature.instructions: + if p1.extended_signature.instructions != p2.extended_signature.instructions: return False *_, p1_last_field = p1.extended_signature.fields.values() *_, p2_last_field = p2.extended_signature.fields.values() - if not p1_last_field == p2_last_field: + if p1_last_field != p2_last_field: return False return True @@ -177,7 +177,7 @@ def compile(self, student, *, devset, eval_kwargs): .with_updated_fields(last_key, prefix=prefix) # Score the instruction / prefix - if self.verbose: print(f"----------------") + if self.verbose: print("----------------") for i,predictor in enumerate(module_clone.predictors()): if self.verbose: print(f"Predictor {i}") self._print_signature(predictor) @@ -185,7 +185,7 @@ def compile(self, student, *, devset, eval_kwargs): score = evaluate(module_clone, devset=devset, **eval_kwargs) if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") total_calls += 1 - if self.verbose: print(f"----------------") + if self.verbose: print("----------------") replace_entry = True if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") @@ -202,7 +202,7 @@ def compile(self, student, *, devset, eval_kwargs): "program": module_clone.deepcopy(), "instruction": instruction, "prefix": prefix, - "depth": d + "depth": d, } if (len(candidates_)-self.breadth <= c_i): @@ -233,7 +233,7 @@ def compile(self, student, *, devset, eval_kwargs): .with_instructions(best_candidate["instruction"]) \ .with_updated_fields(last_key2, prefix=best_candidate["prefix"]) if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") - if self.verbose: print(f"Full predictor with update: ") + if self.verbose: print("Full predictor with update: ") for i,predictor in enumerate(module_clone.predictors()): if self.verbose: print(f"Predictor {i}") self._print_signature(predictor) diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index d5245bc931..85298276ab 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -7,7 +7,6 @@ from collections import defaultdict import random from dspy.teleprompt import BootstrapFewShot -import numpy as np import optuna import math @@ -221,11 +220,11 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo new_instruct = dspy.Predict( BasicGenerateInstructionWithExamplesAndDataObservations, n=1, - temperature=self.init_temperature + temperature=self.init_temperature, )( basic_instruction=basic_instruction, observations=self.observations, - examples=example_sets[id(predictor)][i] + examples=example_sets[id(predictor)][i], ) if not instruct: instruct = new_instruct @@ -242,10 +241,10 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo new_instruct = dspy.Predict( BasicGenerateInstructionWithExamples, n=1, - temperature=self.init_temperature + temperature=self.init_temperature, )( basic_instruction=basic_instruction, - examples=example_sets[id(predictor)][i] + examples=example_sets[id(predictor)][i], ) if not instruct: instruct = new_instruct @@ -279,7 +278,7 @@ def compile(self, student, *, devset, optuna_trials_num, max_bootstrapped_demos, for i in range(self.n): if i == 0: # Story empty set of demos as default for index 0 for module_p in module.predictors(): - if id(module_p) not in demo_candidates.keys(): + if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append([]) else: @@ -294,7 +293,7 @@ def compile(self, student, *, devset, optuna_trials_num, max_bootstrapped_demos, # Store the candidate demos for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): - if id(module_p) not in demo_candidates.keys(): + if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append(candidate_p.demos) @@ -374,7 +373,7 @@ def objective(trial): # Handle pruning based on the intermediate value. if trial.should_prune(): - if self.verbose: print(f"Optuna decided to prune!") + if self.verbose: print("Optuna decided to prune!") trial_logs[trial_num]["score"] = curr_weighted_avg_score trial_logs[trial_num]["pruned"] = True trial_num += 1 diff --git a/dspy/teleprompt/teleprompt.py b/dspy/teleprompt/teleprompt.py index 949a6f89c4..ab3ae060c4 100644 --- a/dspy/teleprompt/teleprompt.py +++ b/dspy/teleprompt/teleprompt.py @@ -1,8 +1,4 @@ -import tqdm -import random -import dsp -from dspy.evaluate.evaluate import Evaluate class Teleprompter: diff --git a/dspy/teleprompt/teleprompt_optuna.py b/dspy/teleprompt/teleprompt_optuna.py index 4847ce9395..3686e66b7d 100644 --- a/dspy/teleprompt/teleprompt_optuna.py +++ b/dspy/teleprompt/teleprompt_optuna.py @@ -1,12 +1,8 @@ -import dsp -import tqdm -import random import optuna from dspy.teleprompt.teleprompt import Teleprompter from .bootstrap import BootstrapFewShot -from .vanilla import LabeledFewShot from dspy.evaluate.evaluate import Evaluate diff --git a/dspy/teleprompt/vanilla.py b/dspy/teleprompt/vanilla.py index 068343fa0d..ced8312116 100644 --- a/dspy/teleprompt/vanilla.py +++ b/dspy/teleprompt/vanilla.py @@ -1,4 +1,3 @@ -import dsp import random from .teleprompt import Teleprompter diff --git a/examples/functional/repl.py b/examples/functional/repl.py index f0fc1801f5..b0e60203c4 100644 --- a/examples/functional/repl.py +++ b/examples/functional/repl.py @@ -14,7 +14,7 @@ def worker(cls, code: str, globals, locals, queue): def run(self, command: str, timeout=5, globals={}, locals=None) -> Optional[str]: queue: multiprocessing.Queue = multiprocessing.Queue() p = multiprocessing.Process( - target=self.worker, args=(command, globals, locals, queue) + target=self.worker, args=(command, globals, locals, queue), ) p.start() p.join(timeout) diff --git a/examples/longformqa/utils.py b/examples/longformqa/utils.py index 4f5f60e70f..e4ebadc829 100644 --- a/examples/longformqa/utils.py +++ b/examples/longformqa/utils.py @@ -1,5 +1,4 @@ import regex as re -import os import nltk nltk.download('punkt') from nltk.tokenize import sent_tokenize diff --git a/examples/tweets/tweet_metric.py b/examples/tweets/tweet_metric.py index c4f11709c8..a2c522a754 100644 --- a/examples/tweets/tweet_metric.py +++ b/examples/tweets/tweet_metric.py @@ -38,7 +38,7 @@ def metric(gold, pred, trace=None): correct = dspy.Predict(Assess)(context='N/A', assessed_text=tweet, assessment_question=correct) engaging = dspy.Predict(Assess)(context='N/A', assessed_text=tweet, assessment_question=engaging) - correct, engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]] + correct, engaging, faithful = (m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]) score = (correct + engaging + faithful) if correct and (len(tweet) <= 280) else 0 if METRIC is not None: diff --git a/setup.py b/setup.py index e8c20f052d..96e6ff5ab4 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,11 @@ from setuptools import setup, find_packages # Read the content of the README file -with open('README.md', 'r', encoding='utf-8') as f: +with open('README.md', encoding='utf-8') as f: long_description = f.read() # Read the content of the requirements.txt file -with open('requirements.txt', 'r', encoding='utf-8') as f: +with open('requirements.txt', encoding='utf-8') as f: requirements = f.read().splitlines() setup( diff --git a/testing/optimizer_tester.py b/testing/optimizer_tester.py index ffcf3b2627..e55f4dd4c4 100644 --- a/testing/optimizer_tester.py +++ b/testing/optimizer_tester.py @@ -142,7 +142,7 @@ def test_baseline(self, datasets=datasets, test_name="baseline"): 'view_data': False, 'optimizer_log_dir': 'NA', 'additional_notes': '', - 'misc': '' + 'misc': '', }) def test_optimizer_default(self, optimizer_function, datasets=datasets, test_name="default"): @@ -195,7 +195,7 @@ def test_optimizer_default(self, optimizer_function, datasets=datasets, test_nam 'view_data': False, 'optimizer_log_dir': log_dir, 'additional_notes': '', - 'misc': '' + 'misc': '', } output.update(output_dict) diff --git a/testing/tasks/biodex.py b/testing/tasks/biodex.py index 43b2d2e4a4..f98da11415 100644 --- a/testing/tasks/biodex.py +++ b/testing/tasks/biodex.py @@ -1,12 +1,10 @@ from __future__ import annotations from .base_task import BaseTask -import sys import dspy from dspy.evaluate import Evaluate from dsp.utils import deduplicate import tqdm import datasets -import sys import math from functools import lru_cache import os @@ -80,19 +78,19 @@ class Node(BaseModel): level: Level count: int = 0 - parent: 'Node' = None - children: Dict[str, 'Node'] = {} + parent: Node = None + children: Dict[str, Node] = {} # maps for efficient nested children lookups # typically only used for the root node - id_to_nodes: Optional[DefaultDict[str, List['Node']]] = None - term_to_nodes: Optional[DefaultDict[str, List['Node']]] = None + id_to_nodes: Optional[DefaultDict[str, List[Node]]] = None + term_to_nodes: Optional[DefaultDict[str, List[Node]]] = None # text normalizer to build lookup tables # typically only used for the root node normalizer: function = lambda x: x - def get_parent_at_level(self, target_level: Level) -> Union['Node', None]: + def get_parent_at_level(self, target_level: Level) -> Union[Node, None]: # check if current node is deeper than target_level level_diff = target_level - self.level if level_diff > 0: @@ -103,10 +101,10 @@ def get_parent_at_level(self, target_level: Level) -> Union['Node', None]: node = node.parent return node - def get_children_at_level(self, target_level: Level) -> List['Node']: + def get_children_at_level(self, target_level: Level) -> List[Node]: raise NotImplementedError - def lookup_id(self, id: str) -> Union[List['Node'], None]: + def lookup_id(self, id: str) -> Union[List[Node], None]: # create lookup tables if they do not exists if self.id_to_nodes == None: self.set_lookup_tables() @@ -117,7 +115,7 @@ def lookup_id(self, id: str) -> Union[List['Node'], None]: else: return None - def lookup_term(self, term: str) -> Union[List['Node'], None]: + def lookup_term(self, term: str) -> Union[List[Node], None]: # create lookup tables if they do not exists if self.term_to_nodes == None: raise RuntimeError("First set the lookup tables using 'set_lookup_tables'.") @@ -198,9 +196,9 @@ def set_lookup_tables(self, normalizer: function = lambda x: x) -> None: # add candidate's children to candidate list if candidate.children: candidates.extend(candidate.children.values()) - return None + return - def is_equivalent_node(self, other: 'Node') -> bool: + def is_equivalent_node(self, other: Node) -> bool: # return true if two nodes are the same, siblings or in a parent-child relation return ( (self == other) @@ -229,7 +227,7 @@ def __key(self): def __hash__(self): return hash(self.__key()) - def __eq__(self, other: 'Node') -> bool: + def __eq__(self, other: Node) -> bool: # undeep equals operator, does not check children return self.__key() == other.__key() @@ -258,7 +256,7 @@ class Match(BaseModel): query: str def _create_child_if_not_exists( - parent: Node, child_id: str, child_term: str, term_to_count: dict + parent: Node, child_id: str, child_term: str, term_to_count: dict, ): parent_level = parent.level # get child level if still appropriate @@ -276,7 +274,7 @@ def _create_child_if_not_exists( else: count = 0 child = Node( - id=child_id, term=child_term, level=child_level, parent=parent, count=count + id=child_id, term=child_term, level=child_level, parent=parent, count=count, ) parent.children.update({child_id: child}) return child @@ -297,7 +295,7 @@ def parse_mdhier(data_dir): # get data lines = [] - with open(file, "r") as fp: + with open(file) as fp: lines = fp.readlines() text = "\n".join(lines) @@ -306,7 +304,7 @@ def parse_mdhier(data_dir): print(f"Read {len(lines)} lines.") if len(lines) != len(matches): raise RuntimeError( - f"Error parsing mdhier.asc. Expected number of matches to match number of lines." + "Error parsing mdhier.asc. Expected number of matches to match number of lines.", ) # load counts @@ -562,7 +560,7 @@ def extract_reactions_from_strings(reactions): return reactions class PredictReactions(dspy.Signature): - __doc__ = f"""Given a snippet from a medical article, identify the adverse drug reactions affecting the patient. If none are mentioned in the snippet, say N/A.""" + __doc__ = """Given a snippet from a medical article, identify the adverse drug reactions affecting the patient. If none are mentioned in the snippet, say N/A.""" title = dspy.InputField() context = dspy.InputField() diff --git a/testing/tasks/scone.py b/testing/tasks/scone.py index e4cbad8097..0ee5f0391f 100644 --- a/testing/tasks/scone.py +++ b/testing/tasks/scone.py @@ -27,7 +27,7 @@ def as_example(row): "context": row['sentence1' + suffix], "question": question, "answer": label, - "category": row['category'] + "category": row['category'], }).with_inputs("context", "question") return list(data_df.apply(as_example, axis=1).values) diff --git a/testing/tasks/tweet.py b/testing/tasks/tweet.py index aeeddc6a7a..30bdef68a5 100644 --- a/testing/tasks/tweet.py +++ b/testing/tasks/tweet.py @@ -1,7 +1,6 @@ import dspy from dspy.datasets import HotPotQA from .base_task import BaseTask -from dspy.evaluate import Evaluate from functools import lru_cache import openai from dotenv import load_dotenv @@ -45,7 +44,7 @@ class Assess(dspy.Signature): assessment_question = dspy.InputField() assessment_answer = dspy.OutputField(desc="Yes or No") -@lru_cache() +@lru_cache def load_models(): load_dotenv() # This will load the .env file's variables @@ -74,7 +73,7 @@ def metric(gold, pred, trace=None): correct = dspy.Predict(Assess)(context='N/A', assessed_text=tweet, assessment_question=correct) engaging = dspy.Predict(Assess)(context='N/A', assessed_text=tweet, assessment_question=engaging) - correct, engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]] + correct, engaging, faithful = (m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]) score = (correct + engaging + faithful) if correct and (len(tweet) <= 280) else 0 if METRIC is not None: diff --git a/testing/tasks/tweet_metric.py b/testing/tasks/tweet_metric.py index ea51d71670..baa7024ab7 100644 --- a/testing/tasks/tweet_metric.py +++ b/testing/tasks/tweet_metric.py @@ -1,7 +1,6 @@ import dspy from dspy.datasets import HotPotQA from .base_task import BaseTask -from dspy.evaluate import Evaluate from dspy import Example from functools import lru_cache import openai @@ -48,7 +47,7 @@ class Assess(dspy.Signature): assessment_question = dspy.InputField() assessment_answer = dspy.OutputField(desc="Yes or No") -@lru_cache() +@lru_cache def load_models(): load_dotenv() # This will load the .env file's variables @@ -76,7 +75,7 @@ def metric(gold, pred, trace=None): correct = dspy.Predict(Assess)(context='N/A', assessed_text=tweet, assessment_question=correct) engaging = dspy.Predict(Assess)(context='N/A', assessed_text=tweet, assessment_question=engaging) - correct, engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]] + correct, engaging, faithful = (m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]) score = (correct + engaging + faithful) if correct and (len(tweet) <= 280) else 0 return 1 - abs(score - score_pred) # We want a score we can maximize, so take the negative L1 norm and add 1 @@ -98,7 +97,7 @@ def forward (self, tweet, context, question, answer) : correct = self.correct(context='N/A', assessed_text=tweet, assessment_question=correct) engaging = self.engaging(context='N/A', assessed_text=tweet, assessment_question=engaging) - correct, engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]] + correct, engaging, faithful = (m.assessment_answer.split()[0].lower() == 'yes' for m in [correct, engaging, faithful]) score = (correct + engaging + faithful) if correct and (len(tweet) <= 280) else 0 return dspy.Prediction(score= score/3.0) From 09368843e12cb546c668dd2a9ba81eabdc38a9aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20=C5=A0uppa?= Date: Fri, 1 Mar 2024 23:43:47 +0200 Subject: [PATCH 030/243] fix: import of ChromaRM --- docs/api/retrieval_model_clients/ChromadbRM.md | 2 +- docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/retrieval_model_clients/ChromadbRM.md b/docs/api/retrieval_model_clients/ChromadbRM.md index 9b40759a51..42f6f42e7f 100644 --- a/docs/api/retrieval_model_clients/ChromadbRM.md +++ b/docs/api/retrieval_model_clients/ChromadbRM.md @@ -41,7 +41,7 @@ Search the chromadb collection for the top `k` passages matching the given query ChromadbRM have the flexibility from a variety of embedding functions as outlined in the [chromadb embeddings documentation](https://docs.trychroma.com/embeddings). While different options are available, this example demonstrates how to utilize OpenAI embeddings specifically. ```python -from dspy.retrieve import ChromadbRM +from dspy.retrieve.chroma_rm import ChromadbRM import os import openai from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction diff --git a/docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx b/docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx index a5883ac083..f02102f46d 100644 --- a/docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx +++ b/docs/docs/deep-dive/retrieval_models_clients/ChromadbRM.mdx @@ -45,7 +45,7 @@ Search the chromadb collection for the top `k` passages matching the given query ## Sending Retrieval Requests via ChromadbRM Client ```python -from dspy.retrieve import ChromadbRM +from dspy.retrieve.chromadb_rm import ChromadbRM import os import openai from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction From 182a6a6c7f6752df1c1f57ef7cff7e48414bbdfa Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Fri, 1 Mar 2024 13:56:31 -0800 Subject: [PATCH 031/243] Fixes lint issues in functional.py --- dspy/functional/functional.py | 74 ++++++++++++++++++----------------- pyproject.toml | 9 +++++ 2 files changed, 48 insertions(+), 35 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index d1d9bf7cac..96c218ebbd 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -4,7 +4,7 @@ import dspy import typing import pydantic -from typing import Annotated, List, Tuple +from typing import Annotated, List, Tuple # noqa: UP035 from dsp.templates import passages2text import json @@ -14,13 +14,15 @@ MAX_RETRIES = 3 -def predictor(func): +def predictor(func) -> dspy.Module: + """Decorator that creates a predictor module based on the provided function.""" signature = _func_to_signature(func) *_, output_key = signature.output_fields.keys() return _StripOutput(TypedPredictor(signature), output_key) -def cot(func): +def cot(func) -> dspy.Module: + """Decorator that creates a chain of thought module based on the provided function.""" signature = _func_to_signature(func) *_, output_key = signature.output_fields.keys() return _StripOutput(TypedChainOfThought(signature), output_key) @@ -51,7 +53,7 @@ def __init__(self): self.__dict__[name] = attr.copy() -def TypedChainOfThought(signature): +def TypedChainOfThought(signature) -> dspy.Module: # noqa: N802 """Just like TypedPredictor, but adds a ChainOfThought OutputField.""" signature = ensure_signature(signature) output_keys = ", ".join(signature.output_fields.keys()) @@ -72,11 +74,11 @@ def __init__(self, signature): self.signature = signature self.predictor = dspy.Predict(signature) - def copy(self): + def copy(self) -> "TypedPredictor": return TypedPredictor(self.signature) @staticmethod - def _make_example(type_): + def _make_example(type_) -> str: # Note: DSPy will cache this call so we only pay the first time TypedPredictor is called. json_object = dspy.Predict( dspy.Signature( @@ -96,9 +98,10 @@ def _make_example(type_): # TODO: Instead of using a language model to create the example, we can also just use a # library like https://pypi.org/project/polyfactory/ that's made exactly to do this. - def _prepare_signature(self): + def _prepare_signature(self) -> dspy.Signature: """Add formats and parsers to the signature fields, based on the type - annotations of the fields.""" + annotations of the fields. + """ signature = self.signature for name, field in self.signature.fields.items(): is_output = field.json_schema_extra["__dspy_field_type"] == "output" @@ -114,12 +117,12 @@ def _prepare_signature(self): ) else: # Anything else we wrap in a pydantic object - unwrap = lambda x: x - wrap = lambda x: x + to_json = lambda x: x.model_dump_json() + from_json = lambda x, type_=type_: type_.model_validate_json(x) if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) - wrap = lambda x: type_(value=x) - unwrap = lambda x: x.value + to_json = lambda x, type_=type_: type_(value=x).model_dump_json() + from_json = lambda x, type_=type_: type_.model_validate_json(x).value signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") @@ -127,21 +130,21 @@ def _prepare_signature(self): ". Respond with a single JSON object. JSON Schema: " + json.dumps(type_.model_json_schema()) ), - format=lambda x: (x if isinstance(x, str) else wrap(x).model_dump_json()), - parser=lambda x: unwrap(type_.model_validate_json(_unwrap_json(x))), + format=lambda x, to_json=to_json: (x if isinstance(x, str) else to_json(x)), + parser=lambda x, from_json=from_json: from_json(_unwrap_json(x)), type_=type_, ) else: # If input field - format = lambda x: x if isinstance(x, str) else str(x) + format_ = lambda x: x if isinstance(x, str) else str(x) if type_ in (List[str], list[str], Tuple[str], tuple[str]): - format = passages2text + format_ = passages2text elif inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): - format = lambda x: x if isinstance(x, str) else x.model_dump_json() - signature = signature.with_updated_fields(name, format=format) + format_ = lambda x: x if isinstance(x, str) else x.model_dump_json() + signature = signature.with_updated_fields(name, format=format_) return signature - def forward(self, **kwargs): + def forward(self, **kwargs) -> dspy.Prediction: modified_kwargs = kwargs.copy() # We have to re-prepare the signature on every forward call, because the base # signature might have been modified by an optimizer or something like that. @@ -165,11 +168,12 @@ def forward(self, **kwargs): continue # Only add examples to JSON objects suffix, current_desc = current_desc[i:], current_desc[:i] prefix = "You MUST use this format: " - if try_i + 1 < MAX_RETRIES and prefix not in current_desc: - if example := self._make_example(field.annotation): - signature = signature.with_updated_fields( - name, desc=current_desc + "\n" + prefix + example + "\n" + suffix, - ) + if try_i + 1 < MAX_RETRIES \ + and prefix not in current_desc \ + and (example := self._make_example(field.annotation)): + signature = signature.with_updated_fields( + name, desc=current_desc + "\n" + prefix + example + "\n" + suffix, + ) if errors: # Add new fields for each error for name, error in errors.items(): @@ -249,7 +253,7 @@ def _unwrap_json(output): ################################################################################ -def main(): +def main() -> None: class Answer(pydantic.BaseModel): value: float certainty: float @@ -277,8 +281,8 @@ def forward(self, **kwargs): question, answer = qa(topic="Physics") # lm.inspect_history(n=5) - print("Question:", question) - print("Answer:", answer) + print("Question:", question) # noqa: T201 + print("Answer:", answer) # noqa: T201 ################################################################################ @@ -286,7 +290,7 @@ def forward(self, **kwargs): ################################################################################ -def validate_context_and_answer_and_hops(example, pred, trace=None): +def validate_context_and_answer_and_hops(example, pred, trace=None) -> bool: if not dspy.evaluate.answer_exact_match(example, pred): return False if not dspy.evaluate.answer_passage_match(example, pred): @@ -302,25 +306,25 @@ def validate_context_and_answer_and_hops(example, pred, trace=None): return True -def gold_passages_retrieved(example, pred, trace=None): +def gold_passages_retrieved(example, pred, _trace=None) -> bool: gold_titles = set(map(dspy.evaluate.normalize_text, example["gold_titles"])) found_titles = set(map(dspy.evaluate.normalize_text, [c.split(" | ")[0] for c in pred.context])) return gold_titles.issubset(found_titles) -def hotpot(): +def hotpot() -> None: from dsp.utils import deduplicate import dspy.evaluate from dspy.datasets import HotPotQA from dspy.evaluate.evaluate import Evaluate from dspy.teleprompt.bootstrap import BootstrapFewShot - print("Load the dataset.") + print("Load the dataset.") # noqa: T201 dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) trainset = [x.with_inputs("question") for x in dataset.train] devset = [x.with_inputs("question") for x in dataset.dev] - print("Done") + print("Done") # noqa: T201 class SimplifiedBaleen(FunctionalModule): def __init__(self, passages_per_hop=3, max_hops=1): @@ -341,7 +345,7 @@ def generate_answer(self, context: list[str], question) -> str: def forward(self, question): context = [] - for hop in range(self.max_hops): + for _ in range(self.max_hops): query = self.generate_query(context=context, question=question) passages = self.retrieve(query).passages context = deduplicate(context + passages) @@ -358,7 +362,7 @@ def forward(self, question): # uncompiled (i.e., zero-shot) program uncompiled_baleen = SimplifiedBaleen() - print( + print( # noqa: T201 "Uncompiled Baleen retrieval score:", evaluate_on_hotpotqa(uncompiled_baleen, metric=gold_passages_retrieved), ) @@ -369,7 +373,7 @@ def forward(self, question): teacher=SimplifiedBaleen(passages_per_hop=2), trainset=trainset, ) - print( + print( # noqa: T201 "Compiled Baleen retrieval score:", evaluate_on_hotpotqa(compiled_baleen, metric=gold_passages_retrieved), ) diff --git a/pyproject.toml b/pyproject.toml index c01ca1d083..bdb4bc75d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -254,6 +254,15 @@ ignore = [ "RET505", # Within an `except` clause, raise exceptions with `raise "B904", + # We don't need docstrings for every method + "ANN202", + "D107", + "D102", + "D103", + # Inline lambdas + "E731", + # Sometimes we need List and Tuple + "UP006", ] # Allow fix for all enabled rules (when `--fix`) is provided. From 5cdd7eb2478a8f103d24ea1949305f02511ebba4 Mon Sep 17 00:00:00 2001 From: Insop Song Date: Fri, 1 Mar 2024 17:22:26 -0800 Subject: [PATCH 032/243] BootstrapFewShot failing due to lm.copy for AzureOpenAI, #521 BootstrapFewShot failing due to lm.copy for AzureOpenAI, likely due to positional argument issue from recent changes. - code: 98304a2eb9d1dddaaa846e30258cd5d8fc6b5d8d - model azure openai - during `BootstrapFewShot` run, code errored out due to missing arguments, `api_version` ``` >[dspy/dsp/modules/lm.py](https://file+.vscode-resource.vscode-cdn.net/Users/insop/Projects/Github/NLP/dspy/dsp/modules/lm.py)(106)copy() 102 model = kwargs.pop('model') 105 --> 106 return self.__class__(model, **kwargs) ``` AzureOpenAI expects positional arguments, so it is failing. I will add my fixes and see if that will impact other models. ``` class AzureOpenAI(LM): ... def __init__( self, api_base: str, api_version: str, model: str = "gpt-3.5-turbo-instruct", api_key: Optional[str] = None, model_type: Literal["chat", "text"] = "chat", **kwargs, ): --- dsp/modules/azure_openai.py | 3 +++ dsp/modules/lm.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/dsp/modules/azure_openai.py b/dsp/modules/azure_openai.py index d930bec6b5..c90f634e4f 100644 --- a/dsp/modules/azure_openai.py +++ b/dsp/modules/azure_openai.py @@ -107,6 +107,9 @@ def __init__( kwargs["model"] = model self.kwargs = { + "api_base": api_base, + "api_version": api_version, + "api_key": api_key, "temperature": 0.0, "max_tokens": 150, "top_p": 1, diff --git a/dsp/modules/lm.py b/dsp/modules/lm.py index e2965d49ac..5d38660798 100644 --- a/dsp/modules/lm.py +++ b/dsp/modules/lm.py @@ -101,4 +101,4 @@ def copy(self, **kwargs): kwargs = {**self.kwargs, **kwargs} model = kwargs.pop('model') - return self.__class__(model, **kwargs) + return self.__class__(model=model, **kwargs) From 1aebd979d3c5345089481045294784604da526b7 Mon Sep 17 00:00:00 2001 From: Franck Stephane Ndzomga Date: Sat, 2 Mar 2024 16:04:22 +0100 Subject: [PATCH 033/243] feat(data-generation): Implement dynamic synthetic data generation --- CONTRIBUTING.md | 2 +- dspy/utils/synthetic_data_generation.py | 43 +++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 dspy/utils/synthetic_data_generation.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 98095b869f..0bd9514db3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -# Contibuting +# Contributing ## Finding Issues diff --git a/dspy/utils/synthetic_data_generation.py b/dspy/utils/synthetic_data_generation.py new file mode 100644 index 0000000000..1d081f5a41 --- /dev/null +++ b/dspy/utils/synthetic_data_generation.py @@ -0,0 +1,43 @@ +from pydantic import BaseModel +import dspy +import random + +def synthetic_data_generation(schema_class: BaseModel, sample_size: int): + class_name = f"{schema_class.__name__}Signature" + + # Fetch schema information + data_schema = schema_class.model_json_schema() + properties = data_schema['properties'] + + fields = { + '__doc__': f"Generates the following outputs: {{{', '.join(properties.keys())}}}.", + 'sindex': dspy.InputField(desc="a random string") + } + + for field_name, field_info in properties.items(): + fields[field_name] = dspy.OutputField(desc=field_info.get('description', 'No description')) + + signature_class = type(class_name, (dspy.Signature,), fields) + + generator = dspy.Predict(signature_class, n=sample_size) + response = generator(sindex=str(random.randint(1, sample_size))) + + + # Creation of few_shot_examples using dspy.Example + few_shot_examples = [ + dspy.Example({ + field_name: completion[field_name] for field_name in properties.keys() + }) for completion in response.completions + ] + + return few_shot_examples + +# Example usage: + +# class SyntheticFacts(BaseModel): +# fact: str = Field(..., description="a statement") +# veracity: bool = Field(..., description="an assessment of the veracity of the statement") + +# synthetic_examples = synthetic_data_generation(SyntheticFacts, sample_size=10) + +# print(synthetic_examples) From 42377c0c24f94ee51df192ab35ef55e6066f3f31 Mon Sep 17 00:00:00 2001 From: Franck Stephane Ndzomga Date: Sat, 2 Mar 2024 16:10:30 +0100 Subject: [PATCH 034/243] feat(data-generation): Implement dynamic synthetic data generation --- dspy/utils/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dspy/utils/__init__.py b/dspy/utils/__init__.py index c6f239df08..812fe7914d 100644 --- a/dspy/utils/__init__.py +++ b/dspy/utils/__init__.py @@ -1 +1,2 @@ -from .dummies import * \ No newline at end of file +from .dummies import * +from .synthetic_data_generation import synthetic_data_generation From 4c8119a7aed5011ed7156a054f8b949318c85e6c Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Sat, 2 Mar 2024 23:16:59 +0530 Subject: [PATCH 035/243] Exporter Fix and seed addition --- dspy/datasets/synthesizer.py | 43 +++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/dspy/datasets/synthesizer.py b/dspy/datasets/synthesizer.py index cc5b34e95e..e71ac36025 100644 --- a/dspy/datasets/synthesizer.py +++ b/dspy/datasets/synthesizer.py @@ -1,7 +1,8 @@ import dspy +import random from typing import List from tqdm import tqdm, trange -from datasets import DatasetDict +from datasets import Dataset def format_examples(examples: List[dspy.Example]): if isinstance(examples, str): @@ -38,7 +39,7 @@ class ExplainTask(dspy.Signature): class GenerateFieldDescription(dspy.Signature): """Generate a concise and informative description for a given field based on the provided name and task description. This description should be no longer than 10 words and should be in simple english.""" - + task_description = dspy.InputField( prefix="Task Description:", desc="Description of the task the field is an input to.", @@ -53,6 +54,13 @@ class GenerateFieldDescription(dspy.Signature): ) class GenerateInputFieldsData(dspy.Signature): + """Generate synthetic data based on the task description and the given knowledge seed.""" + + knowledge_seed = dspy.InputField( + prefix="Knowledge Seed:", + desc="Seed for the knowledge base search.", + format=lambda x: str(x), + ) task_description = dspy.InputField( prefix="Task Description:", desc="Description of the task the field is an input to.", @@ -78,7 +86,7 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: field_name = key field_description = field_details.field_description - + output_field = dspy.OutputField( prefix=f"{field_name}:", desc=field_description, @@ -98,7 +106,7 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: field_name, input_field ) - + for key in tqdm(output_keys, desc="Preparing Output Fields"): field_details = self.generate_field_description( task_description=task_description, @@ -117,8 +125,8 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: field_name, output_field ) - - return dspy.Predict(self.generate_input_data), dspy.Predict(self.generate_output_data) + + return dspy.ChainOfThought(self.generate_input_data), dspy.Predict(self.generate_output_data) def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Example]: task_description = self.explain_task(examples=examples).explanation @@ -136,8 +144,8 @@ def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Exa data = [] for idx in trange(num_data, desc="Generating Synthetic Data"): - inputs = self.input_predictor(task_description=task_description, config=dict(temperature=0.7+0.01*idx)) - + inputs = self.input_predictor(task_description=task_description, knowledge_seed=random.randint(0, 1000000), config=dict(temperature=0.7+0.01*idx)) + input_kwargs = { key: getattr(inputs, key) for key in input_keys @@ -153,15 +161,20 @@ def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Exa data.append(dspy.Example(**input_kwargs, **output_kwargs).with_inputs(*input_keys)) return data - - def export(self, data: List[dspy.Example], path: str, mode: str = None): + + def export(self, data: List[dspy.Example], path: str, mode: str = None, **kwargs): extention = mode or path.split(".")[-1] - dataset_dict = DatasetDict( + + dataset = Dataset.from_list( [example.toDict() for example in data] ) - dataset_dict.save_to_disk( - path=path, - extention=extention, - ) \ No newline at end of file + if extention == "csv": + dataset.to_csv(path_or_buf=path, **kwargs) + + elif extention == "json": + dataset.to_json(path_or_buf=path, **kwargs) + + elif extention == "arrow" or extention == "hf": + dataset.save_to_disk(path) \ No newline at end of file From ad861337a02c2c65c1f850e056019b03971c5467 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Sun, 3 Mar 2024 02:29:45 +0530 Subject: [PATCH 036/243] Move synthesizer to experimental folder --- dspy/{datasets => experimental}/synthesizer.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename dspy/{datasets => experimental}/synthesizer.py (100%) diff --git a/dspy/datasets/synthesizer.py b/dspy/experimental/synthesizer.py similarity index 100% rename from dspy/datasets/synthesizer.py rename to dspy/experimental/synthesizer.py From 94a3cd33106baf5eb84fd12b68a2ca6fba04d0df Mon Sep 17 00:00:00 2001 From: Franck Stephane Ndzomga Date: Sat, 2 Mar 2024 22:02:58 +0100 Subject: [PATCH 037/243] feat(data-generation): Implement dynamic synthetic data generation from pydantic model or via initial sample data --- dspy/experimental/synthetic_data.py | 68 +++++++++++++++++++++++++ dspy/utils/__init__.py | 1 - dspy/utils/synthetic_data_generation.py | 43 ---------------- 3 files changed, 68 insertions(+), 44 deletions(-) create mode 100644 dspy/experimental/synthetic_data.py delete mode 100644 dspy/utils/synthetic_data_generation.py diff --git a/dspy/experimental/synthetic_data.py b/dspy/experimental/synthetic_data.py new file mode 100644 index 0000000000..a8258c8646 --- /dev/null +++ b/dspy/experimental/synthetic_data.py @@ -0,0 +1,68 @@ +from pydantic import BaseModel +import dspy +import random +from typing import List, Optional + +class descriptionSignature(dspy.Signature): + field_name = dspy.InputField(desc="name of a field") + example = dspy.InputField(desc="an example value for the field") + description = dspy.OutputField(desc="a short text only description of what the field contains") + +class SyntheticDataGenerator: + def __init__(self, schema_class: Optional[BaseModel] = None, examples: Optional[List[dspy.Example]] = None): + self.schema_class = schema_class + self.examples = examples + + def generate(self, sample_size: int) -> List[dspy.Example]: + if not self.schema_class and not self.examples: + raise ValueError("Either a schema_class or examples must be provided.") + if self.examples and len(self.examples) >= sample_size: + print("No additional data generation needed.") + return self.examples[:sample_size] + + additional_samples_needed = sample_size - (len(self.examples) if self.examples else 0) + generated_examples = self._generate_additional_examples(additional_samples_needed) + + return self.examples + generated_examples if self.examples else generated_examples + + def _define_or_infer_fields(self): + if self.schema_class: + data_schema = self.schema_class.model_json_schema() + properties = data_schema['properties'] + elif self.examples: + inferred_schema = self.examples[0].__dict__['_store'] + descriptor = dspy.Predict(descriptionSignature) + properties = {field: {'description': str((descriptor(field_name=field, example=str(inferred_schema[field]))).description)} + for field in inferred_schema.keys()} + else: + properties = {} + return properties + + def _generate_additional_examples(self, additional_samples_needed: int) -> List[dspy.Example]: + properties = self._define_or_infer_fields() + class_name = f"{self.schema_class.__name__ if self.schema_class else 'Inferred'}Signature" + fields = self._prepare_fields(properties, class_name) + + signature_class = type(class_name, (dspy.Signature,), fields) + generator = dspy.Predict(signature_class, n=additional_samples_needed) + response = generator(sindex=str(random.randint(1, additional_samples_needed))) + + return [dspy.Example({field_name: getattr(completion, field_name) for field_name in properties.keys()}) + for completion in response.completions] + + def _prepare_fields(self, properties, class_name) -> dict: + return { + '__doc__': f"Generates the following outputs: {{{', '.join(properties.keys())}}}.", + 'sindex': dspy.InputField(desc="a random string"), + **{field_name: dspy.OutputField(desc=properties[field_name].get('description', 'No description')) + for field_name in properties.keys()} + } + +# # Usage example +# # Generating synthetic data via a pydantic model +# generator = SyntheticDataGenerator(schema_class=SyntheticFacts) +# examples = generator.generate(sample_size=6) + +# # Generating synthetic data via existing examples +# generator = SyntheticDataGenerator(examples=existing_examples) +# examples = generator.generate(sample_size=5) diff --git a/dspy/utils/__init__.py b/dspy/utils/__init__.py index 812fe7914d..9f8b201f6b 100644 --- a/dspy/utils/__init__.py +++ b/dspy/utils/__init__.py @@ -1,2 +1 @@ from .dummies import * -from .synthetic_data_generation import synthetic_data_generation diff --git a/dspy/utils/synthetic_data_generation.py b/dspy/utils/synthetic_data_generation.py deleted file mode 100644 index 1d081f5a41..0000000000 --- a/dspy/utils/synthetic_data_generation.py +++ /dev/null @@ -1,43 +0,0 @@ -from pydantic import BaseModel -import dspy -import random - -def synthetic_data_generation(schema_class: BaseModel, sample_size: int): - class_name = f"{schema_class.__name__}Signature" - - # Fetch schema information - data_schema = schema_class.model_json_schema() - properties = data_schema['properties'] - - fields = { - '__doc__': f"Generates the following outputs: {{{', '.join(properties.keys())}}}.", - 'sindex': dspy.InputField(desc="a random string") - } - - for field_name, field_info in properties.items(): - fields[field_name] = dspy.OutputField(desc=field_info.get('description', 'No description')) - - signature_class = type(class_name, (dspy.Signature,), fields) - - generator = dspy.Predict(signature_class, n=sample_size) - response = generator(sindex=str(random.randint(1, sample_size))) - - - # Creation of few_shot_examples using dspy.Example - few_shot_examples = [ - dspy.Example({ - field_name: completion[field_name] for field_name in properties.keys() - }) for completion in response.completions - ] - - return few_shot_examples - -# Example usage: - -# class SyntheticFacts(BaseModel): -# fact: str = Field(..., description="a statement") -# veracity: bool = Field(..., description="an assessment of the veracity of the statement") - -# synthetic_examples = synthetic_data_generation(SyntheticFacts, sample_size=10) - -# print(synthetic_examples) From 76750d2058044901373cf0b016ac00767ad064b1 Mon Sep 17 00:00:00 2001 From: Franck Stephane Ndzomga Date: Sat, 2 Mar 2024 22:07:41 +0100 Subject: [PATCH 038/243] feat(data-generation): Implement dynamic synthetic data generation from pydantic model or via initial sample data --- dspy/experimental/__init__.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 dspy/experimental/__init__.py diff --git a/dspy/experimental/__init__.py b/dspy/experimental/__init__.py new file mode 100644 index 0000000000..9fb504bf69 --- /dev/null +++ b/dspy/experimental/__init__.py @@ -0,0 +1 @@ +from .synthetic_data import * From c73f9c0b6b922ad9a631c5fb79a624cae39af418 Mon Sep 17 00:00:00 2001 From: Franck Stephane Ndzomga Date: Sat, 2 Mar 2024 22:23:33 +0100 Subject: [PATCH 039/243] feat(data-generation): Implement dynamic synthetic data generation from pydantic model or via initial sample data --- dspy/experimental/synthetic_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/experimental/synthetic_data.py b/dspy/experimental/synthetic_data.py index a8258c8646..d5177fcace 100644 --- a/dspy/experimental/synthetic_data.py +++ b/dspy/experimental/synthetic_data.py @@ -41,7 +41,7 @@ def _define_or_infer_fields(self): def _generate_additional_examples(self, additional_samples_needed: int) -> List[dspy.Example]: properties = self._define_or_infer_fields() class_name = f"{self.schema_class.__name__ if self.schema_class else 'Inferred'}Signature" - fields = self._prepare_fields(properties, class_name) + fields = self._prepare_fields(properties) signature_class = type(class_name, (dspy.Signature,), fields) generator = dspy.Predict(signature_class, n=additional_samples_needed) @@ -50,7 +50,7 @@ def _generate_additional_examples(self, additional_samples_needed: int) -> List[ return [dspy.Example({field_name: getattr(completion, field_name) for field_name in properties.keys()}) for completion in response.completions] - def _prepare_fields(self, properties, class_name) -> dict: + def _prepare_fields(self, properties) -> dict: return { '__doc__': f"Generates the following outputs: {{{', '.join(properties.keys())}}}.", 'sindex': dspy.InputField(desc="a random string"), From 687e6de4646f9a405b2e27957467dad3d47bd4ad Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Sun, 3 Mar 2024 02:55:47 +0530 Subject: [PATCH 040/243] make task description and keys optional --- dspy/experimental/synthesizer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index e71ac36025..56e5ec2c21 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -128,12 +128,12 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: return dspy.ChainOfThought(self.generate_input_data), dspy.Predict(self.generate_output_data) - def generate(self, examples: List[dspy.Example], num_data: int) -> List[dspy.Example]: - task_description = self.explain_task(examples=examples).explanation + def generate(self, examples: List[dspy.Example], num_data: int, task_description: str = None, input_keys: str = None, output_keys: str = None) -> List[dspy.Example]: + task_description = task_description or self.explain_task(examples=examples).explanation self.generate_output_data.__doc__ = task_description - input_keys = [key for key in examples[0].inputs()] - output_keys = [key for key in examples[0].labels()] + input_keys = input_keys or [key for key in examples[0].inputs()] + output_keys = output_keys or [key for key in examples[0].labels()] self.input_predictor, self.output_predictor = self._prepare_synthetic_data_predictors( input_keys=input_keys, From 88c929c31e1e2a4649c79fe6020aaa1990a0b4b7 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sat, 2 Mar 2024 13:45:09 -0800 Subject: [PATCH 041/243] Moved import from evaluate.py to __init__ so it doesn't get removed --- dspy/evaluate/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dspy/evaluate/__init__.py b/dspy/evaluate/__init__.py index 0b71ccfac7..eea386c599 100644 --- a/dspy/evaluate/__init__.py +++ b/dspy/evaluate/__init__.py @@ -1,3 +1,4 @@ from .evaluate import Evaluate from .metrics import * -from .auto_evaluation import * \ No newline at end of file +from .auto_evaluation import * +from dsp.utils import EM, normalize_text From 36d91f48773b2bf50d3f5855c817e01f0ec13bb7 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Sun, 3 Mar 2024 03:30:08 +0530 Subject: [PATCH 042/243] Update knowledge seed description --- dspy/experimental/synthesizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index 56e5ec2c21..e247d35f3b 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -58,7 +58,7 @@ class GenerateInputFieldsData(dspy.Signature): knowledge_seed = dspy.InputField( prefix="Knowledge Seed:", - desc="Seed for the knowledge base search.", + desc="Seed for the knowledge base search to base the inputs around.", format=lambda x: str(x), ) task_description = dspy.InputField( From 8b516f69d4691cb43a3dac472dc28219d5a045d6 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya <43719685+krypticmouse@users.noreply.github.com> Date: Sun, 3 Mar 2024 03:34:58 +0530 Subject: [PATCH 043/243] Add synthesizer to __init__.py --- dspy/experimental/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dspy/experimental/__init__.py b/dspy/experimental/__init__.py index 9fb504bf69..b4385639db 100644 --- a/dspy/experimental/__init__.py +++ b/dspy/experimental/__init__.py @@ -1 +1,2 @@ from .synthetic_data import * +from .synthesizer import * From 82524baf49c52f1ff38a1d42384fa92b0d9e5cfb Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sat, 2 Mar 2024 14:28:55 -0800 Subject: [PATCH 044/243] Support for running notebooks off colab --- examples/longformqa/longformqa_assertions.ipynb | 6 ++++++ examples/quiz/quiz_assertions.ipynb | 6 ++++++ examples/tweets/tweets_assertions.ipynb | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/examples/longformqa/longformqa_assertions.ipynb b/examples/longformqa/longformqa_assertions.ipynb index d3059b1cbc..b37be6adf6 100644 --- a/examples/longformqa/longformqa_assertions.ipynb +++ b/examples/longformqa/longformqa_assertions.ipynb @@ -47,6 +47,12 @@ "import os\n", "repo_clone_path = '/content/DSPy_LongFormQA_Cache'\n", "\n", + "# Check if '/content' is writable\n", + "if not os.access('/content', os.W_OK):\n", + " # If '/content' is not writable, choose an alternative directory\n", + " # Example: using a directory relative to the current working directory\n", + " repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')\n", + "\n", "# Set up the cache for this notebook\n", "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = repo_clone_path" ] diff --git a/examples/quiz/quiz_assertions.ipynb b/examples/quiz/quiz_assertions.ipynb index 8cdbdbcc9d..48385d97e3 100644 --- a/examples/quiz/quiz_assertions.ipynb +++ b/examples/quiz/quiz_assertions.ipynb @@ -37,6 +37,12 @@ "import os\n", "repo_clone_path = '/content/DSPy_QuizGen_Cache'\n", "\n", + "# Check if '/content' is writable\n", + "if not os.access('/content', os.W_OK):\n", + " # If '/content' is not writable, choose an alternative directory\n", + " # Example: using a directory relative to the current working directory\n", + " repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')\n", + "\n", "# Set up the cache for this notebook\n", "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = repo_clone_path" ] diff --git a/examples/tweets/tweets_assertions.ipynb b/examples/tweets/tweets_assertions.ipynb index 22c2db296a..46afc29cc7 100644 --- a/examples/tweets/tweets_assertions.ipynb +++ b/examples/tweets/tweets_assertions.ipynb @@ -37,6 +37,12 @@ "import os\n", "repo_clone_path = '/content/DSPy_TweetGen_Cache'\n", "\n", + "# Check if '/content' is writable\n", + "if not os.access('/content', os.W_OK):\n", + " # If '/content' is not writable, choose an alternative directory\n", + " # Example: using a directory relative to the current working directory\n", + " repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')\n", + "\n", "# Set up the cache for this notebook\n", "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = repo_clone_path" ] From a1640af862d9b52bb655cb7a481d66f1f2685eac Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Sat, 2 Mar 2024 14:48:55 -0800 Subject: [PATCH 045/243] Update README.md Added HumanEval notebook to list of examples --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 48133a88ac..3604ac6d7b 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,8 @@ You can find other examples tweeted by [@lateinteraction](https://twitter.com/la - [DSPy on BIG-Bench Hard Example, by Chris Levy](https://drchrislevy.github.io/posts/dspy/dspy.html) - [Using Ollama with DSPy for Mistral (quantized) by @jrknox1977](https://gist.github.com/jrknox1977/78c17e492b5a75ee5bbaf9673aee4641) - [Using DSPy, "The Unreasonable Effectiveness of Eccentric Automatic Prompts" (paper) by VMware's Rick Battle & Teja Gollapudi, and interview at TheRegister](https://www.theregister.com/2024/02/22/prompt_engineering_ai_models/) +- Typed DSPy (contributed by @normal-computing) + - [Using DSPy to train Gpt 3.5 on HumanEval by @thomasahle](https://github.com/stanfordnlp/dspy/blob/main/examples/functional/functional.ipynb) There are also recent cool examples at [Weaviate's DSPy cookbook](https://github.com/weaviate/recipes/tree/main/integrations/dspy) by Connor Shorten. [See tutorial on YouTube](https://www.youtube.com/watch?v=CEuUG4Umfxs). From fe23a0614845261cbaf842010f9c84171eb077df Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Sat, 2 Mar 2024 14:49:19 -0800 Subject: [PATCH 046/243] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3604ac6d7b..6ea9d3c80d 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,7 @@ You can find other examples tweeted by [@lateinteraction](https://twitter.com/la - [DSPy on BIG-Bench Hard Example, by Chris Levy](https://drchrislevy.github.io/posts/dspy/dspy.html) - [Using Ollama with DSPy for Mistral (quantized) by @jrknox1977](https://gist.github.com/jrknox1977/78c17e492b5a75ee5bbaf9673aee4641) - [Using DSPy, "The Unreasonable Effectiveness of Eccentric Automatic Prompts" (paper) by VMware's Rick Battle & Teja Gollapudi, and interview at TheRegister](https://www.theregister.com/2024/02/22/prompt_engineering_ai_models/) -- Typed DSPy (contributed by @normal-computing) +- Typed DSPy (contributed by [@normal-computing](https://github.com/normal-computing)) - [Using DSPy to train Gpt 3.5 on HumanEval by @thomasahle](https://github.com/stanfordnlp/dspy/blob/main/examples/functional/functional.ipynb) There are also recent cool examples at [Weaviate's DSPy cookbook](https://github.com/weaviate/recipes/tree/main/integrations/dspy) by Connor Shorten. [See tutorial on YouTube](https://www.youtube.com/watch?v=CEuUG4Umfxs). From cd8ef430668a51fac0478bfb3333dc81c52aa9f6 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Sat, 2 Mar 2024 14:56:33 -0800 Subject: [PATCH 047/243] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e8c20f052d..acb39fedc0 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name="dspy-ai", - version="2.3.4", + version="2.3.5", description="DSPy", long_description=long_description, long_description_content_type='text/markdown', From d461bb744c3d77ed4ed3b8bb6e788359b050fe8d Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Sat, 2 Mar 2024 14:56:52 -0800 Subject: [PATCH 048/243] Update README.md Initial types example --- README.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6ea9d3c80d..bc0cc2ed6a 100644 --- a/README.md +++ b/README.md @@ -276,9 +276,32 @@ compiled_rag = teleprompter.compile(RAG(), trainset=my_rag_trainset) If we now use `compiled_rag`, it will invoke our LM with rich prompts with few-shot demonstrations of chain-of-thought retrieval-augmented question answering on our data. +## 5) Pydantic Types +Sometimes you need more than just string inputs/outputs. +Assume, for example, you need to find -## 5) FAQ: Is DSPy right for me? +```python +from pydantic import BaseModel, Field + +class TravelInformation(BaseModel): + origin: str = Field(pattern=r"^[A-Z]{3}$") + destination: str = Field(pattern=r"^[A-Z]{3}$") + date: datetime.date + confidence: float = Field(gt=0, lt=1) + +class TravelSignature(Signature): + """ Extract all travel information in the given email """ + email: str = InputField() + flight_information: list[TravelInformation] = OutputField() + +predictor = dspy.TypedPredictor(TravelSignature) +predictor(email='...') +``` + +Which will output a list of `TravelInformation` objects. + +## 6) FAQ: Is DSPy right for me? The **DSPy** philosophy and abstraction differ significantly from other libraries and frameworks, so it's usually straightforward to decide when **DSPy** is (or isn't) the right framework for your usecase. From 8b39ad23f47f05896b7066c6153cc60324330cbc Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Sat, 2 Mar 2024 14:56:57 -0800 Subject: [PATCH 049/243] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5987d710ca..280d22ddb7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.3.4" +version = "2.3.5" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] From 8517db6b156c818fac6cffe2d78d8317489d1c79 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Sat, 2 Mar 2024 15:15:36 -0800 Subject: [PATCH 050/243] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8fe1b533de..d31e9c7913 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name="dspy-ai", - version="2.3.5", + version="2.3.6", description="DSPy", long_description=long_description, long_description_content_type='text/markdown', From 7818700235b00c568ce6c314fb152a18bb8f6332 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Sat, 2 Mar 2024 15:15:49 -0800 Subject: [PATCH 051/243] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dc6469691c..c1ad1e8210 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.3.5" +version = "2.3.6" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] @@ -284,4 +284,4 @@ convention = "google" [tool.ruff.lint.per-file-ignores] "**/{tests,docs}/*" = ["ALL"] -"**__init__.py" = ["F401"] \ No newline at end of file +"**__init__.py" = ["F401"] From cbed0b9cb96c6f8696fc461f8a12e6057dec2f52 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Sat, 2 Mar 2024 23:17:45 +0000 Subject: [PATCH 052/243] OpenAI: No retry on ServiceUnavailableError, APIError --- dsp/modules/gpt3.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/dsp/modules/gpt3.py b/dsp/modules/gpt3.py index e19ca40f54..be1d1e7f4b 100644 --- a/dsp/modules/gpt3.py +++ b/dsp/modules/gpt3.py @@ -30,11 +30,9 @@ ERRORS = ( openai.error.RateLimitError, - openai.error.ServiceUnavailableError, - openai.error.APIError, ) except Exception: - ERRORS = (openai.RateLimitError, openai.APIError) + ERRORS = (openai.RateLimitError,) OpenAIObject = dict From 6ba413079f8c26a78372c086ff5068ff80a1c2ea Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sat, 2 Mar 2024 19:20:15 -0800 Subject: [PATCH 053/243] Some refinements to signature, allowing instantiation and fixing lint issues. --- dspy/signatures/signature.py | 256 +++++++++++++++-------------- pyproject.toml | 14 -- tests/signatures/test_signature.py | 40 +++-- 3 files changed, 160 insertions(+), 150 deletions(-) diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 7cd2f170cb..a335f219f0 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -3,37 +3,42 @@ import dsp from pydantic import BaseModel, Field, create_model from pydantic.fields import FieldInfo -from typing import Type, Union, Dict, Tuple +from typing import Type, Union, Dict, Tuple # noqa: UP035 import re from dspy.signatures.field import InputField, OutputField, new_to_old_field -def signature_to_template(signature): - """Convert from new to legacy format""" +def signature_to_template(signature) -> dsp.Template: + """Convert from new to legacy format.""" return dsp.Template( signature.instructions, **{name: new_to_old_field(field) for name, field in signature.fields.items()}, ) -def _default_instructions(cls): - inputs_ = ", ".join([f"`{field}`" for field in cls.input_fields.keys()]) - outputs_ = ", ".join([f"`{field}`" for field in cls.output_fields.keys()]) +def _default_instructions(cls) -> str: + inputs_ = ", ".join([f"`{field}`" for field in cls.input_fields]) + outputs_ = ", ".join([f"`{field}`" for field in cls.output_fields]) return f"Given the fields {inputs_}, produce the fields {outputs_}." class SignatureMeta(type(BaseModel)): - def __new__(mcs, name, bases, namespace, **kwargs): + def __call__(cls, *args, **kwargs): # noqa: ANN002 + if cls is Signature: + return make_signature(*args, **kwargs) + return super().__call__(*args, **kwargs) + + def __new__(mcs, signature_name, bases, namespace, **kwargs): # noqa: N804 # Set `str` as the default type for all fields raw_annotations = namespace.get("__annotations__", {}) - for name, field in namespace.items(): + for name, _field in namespace.items(): if not name.startswith("__") and name not in raw_annotations: raw_annotations[name] = str namespace["__annotations__"] = raw_annotations # Let Pydantic do its thing - cls = super().__new__(mcs, name, bases, namespace, **kwargs) + cls = super().__new__(mcs, signature_name, bases, namespace, **kwargs) if cls.__doc__ is None: cls.__doc__ = _default_instructions(cls) @@ -69,17 +74,20 @@ def signature(cls) -> str: def instructions(cls) -> str: return getattr(cls, "__doc__", "") - def with_instructions(cls, instructions: str): + def with_instructions(cls, instructions: str) -> Type["Signature"]: return Signature(cls.fields, instructions) @property - def fields(cls): + def fields(cls) -> dict[str, FieldInfo]: # Make sure to give input fields before output fields return {**cls.input_fields, **cls.output_fields} - def with_updated_fields(cls, name, type_=None, **kwargs): - """Returns a new Signature type with the field, name, updated - with fields[name].json_schema_extra[key] = value.""" + def with_updated_fields(cls, name, type_=None, **kwargs) -> Type["Signature"]: + """Update the field, name, in a new Signature type. + + Returns a new Signature type with the field, name, updated + with fields[name].json_schema_extra[key] = value. + """ fields_copy = deepcopy(cls.fields) fields_copy[name].json_schema_extra = { **fields_copy[name].json_schema_extra, @@ -90,27 +98,23 @@ def with_updated_fields(cls, name, type_=None, **kwargs): return Signature(fields_copy, cls.instructions) @property - def input_fields(cls): + def input_fields(cls) -> dict[str, FieldInfo]: return cls._get_fields_with_type("input") @property - def output_fields(cls): + def output_fields(cls) -> dict[str, FieldInfo]: return cls._get_fields_with_type("output") - def _get_fields_with_type(cls, field_type): - return { - k: v - for k, v in cls.model_fields.items() - if v.json_schema_extra["__dspy_field_type"] == field_type - } + def _get_fields_with_type(cls, field_type) -> dict[str, FieldInfo]: + return {k: v for k, v in cls.model_fields.items() if v.json_schema_extra["__dspy_field_type"] == field_type} - def prepend(cls, name, field, type_=None): + def prepend(cls, name, field, type_=None) -> Type["Signature"]: return cls.insert(0, name, field, type_) - def append(cls, name, field, type_=None): + def append(cls, name, field, type_=None) -> Type["Signature"]: return cls.insert(-1, name, field, type_) - def insert(cls, index: int, name: str, field, type_: Type = None): + def insert(cls, index: int, name: str, field, type_: Type = None) -> Type["Signature"]: # It's posisble to set the type as annotation=type in pydantic.Field(...) # But this may be annoying for users, so we allow them to pass the type if type_ is None: @@ -122,11 +126,7 @@ def insert(cls, index: int, name: str, field, type_: Type = None): output_fields = list(cls.output_fields.items()) # Choose the list to insert into based on the field type - lst = ( - input_fields - if field.json_schema_extra["__dspy_field_type"] == "input" - else output_fields - ) + lst = input_fields if field.json_schema_extra["__dspy_field_type"] == "input" else output_fields # We support negative insert indices if index < 0: index += len(lst) + 1 @@ -137,83 +137,7 @@ def insert(cls, index: int, name: str, field, type_: Type = None): new_fields = dict(input_fields + output_fields) return Signature(new_fields, cls.instructions) - def _parse_signature(cls, signature: str) -> Tuple[Type, Field]: - pattern = r"^\s*[\w\s,]+\s*->\s*[\w\s,]+\s*$" - if not re.match(pattern, signature): - raise ValueError(f"Invalid signature format: '{signature}'") - - fields = {} - inputs_str, outputs_str = map(str.strip, signature.split("->")) - inputs = [v.strip() for v in inputs_str.split(",") if v.strip()] - outputs = [v.strip() for v in outputs_str.split(",") if v.strip()] - for name in inputs: - fields[name] = (str, InputField()) - for name in outputs: - fields[name] = (str, OutputField()) - - return fields - - def __call__( - cls, - signature: Union[str, Dict[str, Tuple[type, FieldInfo]]], - instructions: str = None, - ): - """ - Creates a new Signature type with the given fields and instructions. - Note: - Even though we're calling a type, we're not making an instance of the type. - In general we don't allow instances of Signature types to be made. The call - syntax is only for your convenience. - Parameters: - signature: Format: "input1, input2 -> output1, output2" - instructions: Optional prompt for the signature. - """ - - if isinstance(signature, str): - fields = cls._parse_signature(signature) - else: - fields = signature - - # Validate the fields, this is important because we sometimes forget the - # slightly unintuitive syntax with tuples of (type, Field) - fixed_fields = {} - for name, type_field in fields.items(): - assert isinstance( - name, str, - ), f"Field names must be strings, not {type(name)}" - if isinstance(type_field, FieldInfo): - type_ = type_field.annotation - field = type_field - else: - assert isinstance( - type_field, tuple, - ), f"Field values must be tuples, not {type(type_field)}" - type_, field = type_field - # It might be better to be explicit about the type, but it currently would break - # program of thought and teleprompters, so we just silently default to string. - if type_ is None: - type_ = str - assert isinstance(type_, type) or isinstance( - typing.get_origin(type_), type, - ), f"Field types must be types, not {type(type_)}" - assert isinstance( - field, FieldInfo, - ), f"Field values must be Field instances, not {type(field)}" - fixed_fields[name] = (type_, field) - - # Fixing the fields shouldn't change the order - assert list(fixed_fields.keys()) == list(fields.keys()) - - # Default prompt when no instructions are provided - if instructions is None: - sig = Signature(signature, "") # Simple way to parse input/output fields - instructions = _default_instructions(sig) - - signature = create_model("Signature", __base__=Signature, **fixed_fields) - signature.__doc__ = instructions - return signature - - def equals(cls, other): + def equals(cls, other) -> bool: """Compare the JSON schema of two Pydantic models.""" if not isinstance(other, type) or not issubclass(other, BaseModel): return False @@ -226,30 +150,44 @@ def equals(cls, other): return True def __repr__(cls): - """ - Outputs something on the form: + """Output a representation of the signature. + + Uses the form: Signature(question, context -> answer question: str = InputField(desc="..."), context: List[str] = InputField(desc="..."), answer: int = OutputField(desc="..."), - ) + ). """ field_reprs = [] for name, field in cls.fields.items(): field_reprs.append(f"{name} = Field({field})") field_repr = "\n ".join(field_reprs) - return ( - f"Signature({cls.signature}\n" - f" instructions={repr(cls.instructions)}\n" - f" {field_repr}\n)" - ) + return f"{cls.__name__}({cls.signature}\n instructions={repr(cls.instructions)}\n {field_repr}\n)" class Signature(BaseModel, metaclass=SignatureMeta): + """A signature for a predictor. + + You typically subclass it, like this: + class MySignature(Signature): + input: str = InputField(desc="...") + output: int = OutputField(desc="...") + + You can call Signature("input1, input2 -> output1, output2") to create a new signature type. + You can also include instructions, Signature("input -> output", "This is a test"). + But it's generally better to use the make_signature function. + + If you are not sure if your input is a string representation, (like "input1, input2 -> output1, output2"), + or a signature, you can use the ensure_signature function. + + For compatibility with the legacy dsp format, you can use the signature_to_template function. + """ + pass -def ensure_signature(signature): +def ensure_signature(signature: str | Type[Signature]) -> Signature: if signature is None: return None if isinstance(signature, str): @@ -257,19 +195,97 @@ def ensure_signature(signature): return signature -def infer_prefix(attribute_name: str) -> str: - """Infers a prefix from an attribute name.""" +def make_signature( + signature: Union[str, Dict[str, Tuple[type, FieldInfo]]], + instructions: str = None, + signature_name: str = "StringSignature", +) -> Type[Signature]: + """Create a new Signature type with the given fields and instructions. + + Note: + Even though we're calling a type, we're not making an instance of the type. + In general, instances of Signature types are not allowed to be made. The call + syntax is provided for convenience. + + Args: + signature: The signature format, specified as "input1, input2 -> output1, output2". + instructions: An optional prompt for the signature. + signature_name: An optional name for the new signature type. + """ + fields = _parse_signature(signature) if isinstance(signature, str) else signature + + # Validate the fields, this is important because we sometimes forget the + # slightly unintuitive syntax with tuples of (type, Field) + fixed_fields = {} + for name, type_field in fields.items(): + if not isinstance(name, str): + raise ValueError(f"Field names must be strings, not {type(name)}") + if isinstance(type_field, FieldInfo): + type_ = type_field.annotation + field = type_field + else: + if not isinstance(type_field, tuple): + raise ValueError(f"Field values must be tuples, not {type(type_field)}") + type_, field = type_field + # It might be better to be explicit about the type, but it currently would break + # program of thought and teleprompters, so we just silently default to string. + if type_ is None: + type_ = str + if not isinstance(type_, type) and not isinstance(typing.get_origin(type_), type): + raise ValueError(f"Field types must be types, not {type(type_)}") + if not isinstance(field, FieldInfo): + raise ValueError(f"Field values must be Field instances, not {type(field)}") + fixed_fields[name] = (type_, field) + + # Fixing the fields shouldn't change the order + assert list(fixed_fields.keys()) == list(fields.keys()) # noqa: S101 + + # Default prompt when no instructions are provided + if instructions is None: + sig = Signature(signature, "") # Simple way to parse input/output fields + instructions = _default_instructions(sig) + + return create_model( + signature_name, + __base__=Signature, + __doc__=instructions, + **fixed_fields, + ) + +def _parse_signature(signature: str) -> Tuple[Type, Field]: + pattern = r"^\s*[\w\s,]+\s*->\s*[\w\s,]+\s*$" + if not re.match(pattern, signature): + raise ValueError(f"Invalid signature format: '{signature}'") + + fields = {} + inputs_str, outputs_str = map(str.strip, signature.split("->")) + inputs = [v.strip() for v in inputs_str.split(",") if v.strip()] + outputs = [v.strip() for v in outputs_str.split(",") if v.strip()] + for name in inputs: + fields[name] = (str, InputField()) + for name in outputs: + fields[name] = (str, OutputField()) + + return fields + + +def infer_prefix(attribute_name: str) -> str: + """Infer a prefix from an attribute name.""" # Convert camelCase to snake_case, but handle sequences of capital letters properly s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", attribute_name) intermediate_name = re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1) # Insert underscores around numbers to ensure spaces in the final output with_underscores_around_numbers = re.sub( - r"([a-zA-Z])(\d)", r"\1_\2", intermediate_name, + r"([a-zA-Z])(\d)", + r"\1_\2", + intermediate_name, ) with_underscores_around_numbers = re.sub( - r"(\d)([a-zA-Z])", r"\1_\2", with_underscores_around_numbers, + r"(\d)([a-zA-Z])", + r"\1_\2", + with_underscores_around_numbers, ) # Convert snake_case to 'Proper Title Case', but ensure acronyms are uppercased diff --git a/pyproject.toml b/pyproject.toml index bdb4bc75d9..7828de0cf3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -232,28 +232,14 @@ ignore = [ "ANN003", # utf-8 encoding skip "UP009", - # First argument of a method should be named `self` - "N805", - # 1 blank line required between summary line and description - "D205", # Missing return type annotation for special method `__init__` "ANN204", - # Avoid using the generic variable name `df` for DataFrames - "PD901", - # Unnecessary assignment to `df` before `return` statement - "RET504", - # commented code - "ERA001", # Star-arg unpacking after a keyword argument is strongly discouraged "B026", # Missing type annotation for function argument `self` "ANN001", # Dynamically typed expressions (typing.Any) are disallowed in `wrapper` "ANN401", - # Unnecessary `elif` after `return` statement - "RET505", - # Within an `except` clause, raise exceptions with `raise - "B904", # We don't need docstrings for every method "ANN202", "D107", diff --git a/tests/signatures/test_signature.py b/tests/signatures/test_signature.py index b093258540..26fdfc7aa0 100644 --- a/tests/signatures/test_signature.py +++ b/tests/signatures/test_signature.py @@ -39,12 +39,8 @@ class TestSignature(Signature): input = InputField(prefix="Modified:") output = OutputField() - assert ( - TestSignature.input_fields["input"].json_schema_extra["prefix"] == "Modified:" - ) - assert ( - TestSignature.output_fields["output"].json_schema_extra["prefix"] == "Output:" - ) + assert TestSignature.input_fields["input"].json_schema_extra["prefix"] == "Modified:" + assert TestSignature.output_fields["output"].json_schema_extra["prefix"] == "Output:" def test_signature_parsing(): @@ -69,10 +65,7 @@ def test_with_updated_field(): assert signature1 is not signature2, "The type should be immutable" for key in signature1.fields.keys(): if key != "input1": - assert ( - signature1.fields[key].json_schema_extra - == signature2.fields[key].json_schema_extra - ) + assert signature1.fields[key].json_schema_extra == signature2.fields[key].json_schema_extra assert signature1.instructions == signature2.instructions @@ -97,18 +90,14 @@ def test_signature_instructions_none(): def test_signature_from_dict(): - signature = Signature( - {"input1": InputField(), "input2": InputField(), "output": OutputField()} - ) + signature = Signature({"input1": InputField(), "input2": InputField(), "output": OutputField()}) for k in ["input1", "input2", "output"]: assert k in signature.fields assert signature.fields[k].annotation == str def test_signature_from_dict(): - signature = Signature( - {"input1": InputField(), "input2": InputField(), "output": OutputField()} - ) + signature = Signature({"input1": InputField(), "input2": InputField(), "output": OutputField()}) assert "input1" in signature.input_fields assert "input2" in signature.input_fields assert "output" in signature.output_fields @@ -164,3 +153,22 @@ def test_infer_prefix(): assert infer_prefix("URLAddress") == "URL Address" assert infer_prefix("isHTTPSecure") == "Is HTTP Secure" assert infer_prefix("isHTTPSSecure123") == "Is HTTPS Secure 123" + + +def test_insantiating(): + sig = Signature("input -> output") + assert issubclass(sig, Signature) + assert sig.__name__ == "StringSignature" + value = sig(input="test", output="test") + assert isinstance(value, sig) + + +def test_insantiating2(): + class SubSignature(Signature): + input = InputField() + output = OutputField() + + assert issubclass(SubSignature, Signature) + assert SubSignature.__name__ == "SubSignature" + value = SubSignature(input="test", output="test") + assert isinstance(value, SubSignature) From 6af5366e9064918e784f68ed736626e08c53a317 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sat, 2 Mar 2024 19:23:47 -0800 Subject: [PATCH 054/243] Avoid shadowing by lint --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 7828de0cf3..db7fc3fe4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -218,6 +218,8 @@ select = [ "ERA", # pandas-vet "PD", + # avoid shadowing + "PLW", ] ignore = [ "D100", From 2c11b8beb23f248f6e293d409821a2fdacf64f5a Mon Sep 17 00:00:00 2001 From: Arian Askari Date: Sun, 3 Mar 2024 12:22:50 +0100 Subject: [PATCH 055/243] Update skycamp2023.ipynb --- skycamp2023.ipynb | 1 + 1 file changed, 1 insertion(+) diff --git a/skycamp2023.ipynb b/skycamp2023.ipynb index 28cd3c7607..5590509daf 100644 --- a/skycamp2023.ipynb +++ b/skycamp2023.ipynb @@ -425,6 +425,7 @@ " # TODO: Replace `None` with a call to self.generate_query_from_context to generate a search query.\n", " # Note: In DSPy, always pass keyword arguments (e.g., context=..., question=...) to the modules to avoid ambiguity.\n", " # Note 2: Don't forget to access the field .search_query to extract that from the output of the module.\n", + " # Note 3: Check the following notebook for a completed example: https://github.com/stanfordnlp/dspy/blob/main/skycamp2023_completed.ipynb.\n", " search_query2 = None\n", "\n", " # TODO: Replace `None` with a call to self.retrieve to retrieve passages. Append them to the list `passages`.\n", From 9a1c189171b774f590e17254758cfe166294cb89 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 12:30:25 -0800 Subject: [PATCH 056/243] Support for n=... and type-strings --- dspy/functional/functional.py | 61 ++++++++++++++++------------- dspy/primitives/prediction.py | 20 +++++----- dspy/signatures/signature.py | 53 ++++++++++++++++++++++--- tests/functional/test_functional.py | 51 ++++++++++++++++++------ 4 files changed, 130 insertions(+), 55 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 96c218ebbd..041ee7abc5 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -1,3 +1,4 @@ +from collections import defaultdict import inspect import os import openai @@ -7,6 +8,7 @@ from typing import Annotated, List, Tuple # noqa: UP035 from dsp.templates import passages2text import json +from dspy.primitives.prediction import Prediction from dspy.signatures.signature import ensure_signature @@ -71,7 +73,7 @@ def TypedChainOfThought(signature) -> dspy.Module: # noqa: N802 class TypedPredictor(dspy.Module): def __init__(self, signature): super().__init__() - self.signature = signature + self.signature = ensure_signature(signature) self.predictor = dspy.Predict(signature) def copy(self) -> "TypedPredictor": @@ -127,8 +129,7 @@ def _prepare_signature(self) -> dspy.Signature: name, desc=field.json_schema_extra.get("desc", "") + ( - ". Respond with a single JSON object. JSON Schema: " - + json.dumps(type_.model_json_schema()) + ". Respond with a single JSON object. JSON Schema: " + json.dumps(type_.model_json_schema()) ), format=lambda x, to_json=to_json: (x if isinstance(x, str) else to_json(x)), parser=lambda x, from_json=from_json: from_json(_unwrap_json(x)), @@ -152,28 +153,33 @@ def forward(self, **kwargs) -> dspy.Prediction: for try_i in range(MAX_RETRIES): result = self.predictor(**modified_kwargs, new_signature=signature) errors = {} - parsed_results = {} + parsed_results = defaultdict(list) # Parse the outputs for name, field in signature.output_fields.items(): - try: - value = getattr(result, name) - parser = field.json_schema_extra.get("parser", lambda x: x) - parsed_results[name] = parser(value) - except (pydantic.ValidationError, ValueError) as e: - errors[name] = _format_error(e) - # If we can, we add an example to the error message - current_desc = field.json_schema_extra.get("desc", "") - i = current_desc.find("JSON Schema: ") - if i == -1: - continue # Only add examples to JSON objects - suffix, current_desc = current_desc[i:], current_desc[:i] - prefix = "You MUST use this format: " - if try_i + 1 < MAX_RETRIES \ - and prefix not in current_desc \ - and (example := self._make_example(field.annotation)): - signature = signature.with_updated_fields( - name, desc=current_desc + "\n" + prefix + example + "\n" + suffix, - ) + for i, completion in enumerate(result.completions): + try: + value = completion[name] + parser = field.json_schema_extra.get("parser", lambda x: x) + completion[name] = parser(value) + parsed_results[name].append(parser(value)) + except (pydantic.ValidationError, ValueError) as e: + errors[name] = _format_error(e) + # If we can, we add an example to the error message + current_desc = field.json_schema_extra.get("desc", "") + i = current_desc.find("JSON Schema: ") + if i == -1: + continue # Only add examples to JSON objects + suffix, current_desc = current_desc[i:], current_desc[:i] + prefix = "You MUST use this format: " + if ( + try_i + 1 < MAX_RETRIES + and prefix not in current_desc + and (example := self._make_example(field.annotation)) + ): + signature = signature.with_updated_fields( + name, + desc=current_desc + "\n" + prefix + example + "\n" + suffix, + ) if errors: # Add new fields for each error for name, error in errors.items(): @@ -187,11 +193,12 @@ def forward(self, **kwargs) -> dspy.Prediction: ) else: # If there are no errors, we return the parsed results - for name, value in parsed_results.items(): - setattr(result, name, value) - return result + # for name, value in parsed_results.items(): + # setattr(result, name, value) + return Prediction.from_completions(parsed_results) raise ValueError( - "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", errors, + "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", + errors, ) diff --git a/dspy/primitives/prediction.py b/dspy/primitives/prediction.py index df653c1c4a..d77ee15a9d 100644 --- a/dspy/primitives/prediction.py +++ b/dspy/primitives/prediction.py @@ -4,12 +4,12 @@ class Prediction(Example): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - + del self._demos del self._input_keys self._completions = None - + @classmethod def from_completions(cls, list_or_dict, signature=None): obj = cls() @@ -17,16 +17,16 @@ def from_completions(cls, list_or_dict, signature=None): obj._store = {k: v[0] for k, v in obj._completions.items()} return obj - + def __repr__(self): - store_repr = ',\n '.join(f"{k}={repr(v)}" for k, v in self._store.items()) + store_repr = ",\n ".join(f"{k}={repr(v)}" for k, v in self._store.items()) if self._completions is None or len(self._completions) == 1: return f"Prediction(\n {store_repr}\n)" - + num_completions = len(self._completions) return f"Prediction(\n {store_repr},\n completions=Completions(...)\n) ({num_completions-1} completions omitted)" - + def __str__(self): return self.__repr__() @@ -62,15 +62,15 @@ def __getitem__(self, key): if isinstance(key, int): if key < 0 or key >= len(self): raise IndexError("Index out of range") - + return Prediction(**{k: v[key] for k, v in self._completions.items()}) - + return self._completions[key] def __getattr__(self, name): if name in self._completions: return self._completions[name] - + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") def __len__(self): @@ -82,7 +82,7 @@ def __contains__(self, key): return key in self._completions def __repr__(self): - items_repr = ',\n '.join(f"{k}={repr(v)}" for k, v in self._completions.items()) + items_repr = ",\n ".join(f"{k}={repr(v)}" for k, v in self._completions.items()) return f"Completions(\n {items_repr}\n)" def __str__(self): diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index a335f219f0..95455c3704 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -1,9 +1,10 @@ +import ast from copy import deepcopy import typing import dsp from pydantic import BaseModel, Field, create_model from pydantic.fields import FieldInfo -from typing import Type, Union, Dict, Tuple # noqa: UP035 +from typing import Any, Type, Union, Dict, Tuple # noqa: UP035 import re from dspy.signatures.field import InputField, OutputField, new_to_old_field @@ -254,7 +255,7 @@ def make_signature( def _parse_signature(signature: str) -> Tuple[Type, Field]: - pattern = r"^\s*[\w\s,]+\s*->\s*[\w\s,]+\s*$" + pattern = r"^\s*[\w\s,:]+\s*->\s*[\w\s,:]+\s*$" if not re.match(pattern, signature): raise ValueError(f"Invalid signature format: '{signature}'") @@ -262,14 +263,54 @@ def _parse_signature(signature: str) -> Tuple[Type, Field]: inputs_str, outputs_str = map(str.strip, signature.split("->")) inputs = [v.strip() for v in inputs_str.split(",") if v.strip()] outputs = [v.strip() for v in outputs_str.split(",") if v.strip()] - for name in inputs: - fields[name] = (str, InputField()) - for name in outputs: - fields[name] = (str, OutputField()) + for name_type in inputs: + name, type_ = _parse_named_type_node(name_type) + fields[name] = (type_, InputField()) + for name_type in outputs: + name, type_ = _parse_named_type_node(name_type) + fields[name] = (type_, OutputField()) return fields +def _parse_named_type_node(node, names=None) -> Any: + parts = node.split(":") + if len(parts) == 1: + return parts[0], str + name, type_str = parts + type_ = _parse_type_node(ast.parse(type_str), names) + return name, type_ + + +def _parse_type_node(node, names=None) -> Any: + """Recursively parse an AST node representing a type annotation. + + using structural pattern matching introduced in Python 3.10. + """ + if names is None: + names = {} + match node: + case ast.Module(body=body): + if len(body) != 1: + raise ValueError(f"Code is not syntactically valid: {node}") + return _parse_type_node(body[0], names) + case ast.Expr(value=value): + return _parse_type_node(value, names) + case ast.Name(id=id): + if id in names: + return names[id] + for type_ in [int, str, float, bool, list, tuple, dict]: + if type_.__name__ == id: + return type_ + case ast.Subscript(value=value, slice=slice): + base_type = _parse_type_node(value, names) + arg_type = _parse_type_node(slice, names) + return base_type[arg_type] + case ast.Tuple(elts=elts): + return tuple(_parse_type_node(elt, names) for elt in elts) + raise ValueError(f"Code is not syntactically valid: {node}") + + def infer_prefix(attribute_name: str) -> str: """Infer a prefix from an attribute name.""" # Convert camelCase to snake_case, but handle sequences of capital letters properly diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index f515f93266..029519bcfd 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -35,9 +35,7 @@ def hard_questions(topics: List[str]) -> List[str]: pass expected = ["What is the speed of light?", "What is the speed of sound?"] - lm = DummyLM( - ['{"value": ["What is the speed of light?", "What is the speed of sound?"]}'] - ) + lm = DummyLM(['{"value": ["What is the speed of light?", "What is the speed of sound?"]}']) dspy.settings.configure(lm=lm) question = hard_questions(topics=["Physics", "Music"]) @@ -88,9 +86,7 @@ def test_simple_class(): class Answer(pydantic.BaseModel): value: float certainty: float - comments: List[str] = pydantic.Field( - description="At least two comments about the answer" - ) + comments: List[str] = pydantic.Field(description="At least two comments about the answer") class QA(FunctionalModule): @predictor @@ -229,9 +225,7 @@ def simple_metric(example, prediction, trace=None): lm = DummyLM(["blue", "Ring-ding-ding-ding-dingeringeding!"], follow_examples=True) dspy.settings.configure(lm=lm, trace=[]) - bootstrap = BootstrapFewShot( - metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 - ) + bootstrap = BootstrapFewShot(metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1) compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) lm.inspect_history(n=2) @@ -295,7 +289,7 @@ def flight_information(email: str) -> TravelInformation: # Example with a bad origin code. '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', # Example to help the model understand - '{...}', + "{...}", # Fixed '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', ] @@ -344,9 +338,9 @@ def flight_information(email: str) -> TravelInformation: [ # First origin is wrong, then destination, then all is good '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', - '{...}', # Example to help the model understand + "{...}", # Example to help the model understand '{"origin": "JFK", "destination": "LA0", "date": "2022-12-25"}', - '{...}', # Example to help the model understand + "{...}", # Example to help the model understand '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', ] ) @@ -447,3 +441,36 @@ def test(input: Annotated[str, Field(description="description")]) -> Annotated[f output = test(input="input") assert output == 0.5 + + +def test_multiple_outputs(): + lm = DummyLM([str(i) for i in range(100)]) + dspy.settings.configure(lm=lm) + + test = TypedPredictor("input -> output") + output = test(input="input", config=dict(n=3)).completions.output + assert output == ["0", "1", "2"] + + +def test_multiple_outputs_int(): + lm = DummyLM([str(i) for i in range(100)]) + dspy.settings.configure(lm=lm) + + class TestSignature(dspy.Signature): + input: int = dspy.InputField() + output: int = dspy.OutputField() + + test = TypedPredictor(TestSignature) + + output = test(input=8, config=dict(n=3)).completions.output + assert output == [0, 1, 2] + + +def test_parse_type_string(): + lm = DummyLM([str(i) for i in range(100)]) + dspy.settings.configure(lm=lm) + + test = TypedPredictor("input:int -> output:int") + + output = test(input=8, config=dict(n=3)).completions.output + assert output == [0, 1, 2] From a021bd26763a0c3efa8fffa84e8a8fcdf27554f8 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 12:30:35 -0800 Subject: [PATCH 057/243] Linting of dummy --- dspy/utils/dummies.py | 62 ++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index a0c997145b..3a37388073 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -1,6 +1,5 @@ import random from dsp.modules import LM -from typing import List, Union, Dict import numpy as np from dsp.utils.utils import dotdict import re @@ -9,9 +8,9 @@ class DummyLM(LM): """Dummy language model for unit testing purposes.""" - def __init__(self, answers: Union[List[str], Dict[str,str]], follow_examples: bool = False): - """ - Initializes the dummy language model. + def __init__(self, answers: list[str] | dict[str, str], follow_examples: bool = False): + """Initializes the dummy language model. + Parameters: - answers: A list of strings or a dictionary with string keys and values. - follow_examples: If True, and the prompt contains an example exactly equal to the prompt, @@ -24,7 +23,7 @@ def __init__(self, answers: Union[List[str], Dict[str,str]], follow_examples: bo self.answers = answers self.follow_examples = follow_examples - def basic_request(self, prompt, n=1, **kwargs): + def basic_request(self, prompt, n=1, **kwargs) -> dict[str, list[dict[str, str]]]: """Generates a dummy response based on the prompt.""" dummy_response = {"choices": []} for _ in range(n): @@ -55,12 +54,14 @@ def basic_request(self, prompt, n=1, **kwargs): answer = "No more responses" # Mimic the structure of a real language model response. - dummy_response["choices"].append({ - "text": answer, - "finish_reason": "simulated completion", - }) - - RED, GREEN, RESET = '\033[91m', '\033[92m', '\033[0m' + dummy_response["choices"].append( + { + "text": answer, + "finish_reason": "simulated completion", + }, + ) + + RED, GREEN, RESET = "\033[91m", "\033[92m", "\033[0m" print("=== DummyLM ===") print(prompt, end="") print(f"{RED}{answer}{RESET}") @@ -77,67 +78,68 @@ def basic_request(self, prompt, n=1, **kwargs): return dummy_response - def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): + def __call__(self, prompt, _only_completed=True, _return_sorted=False, **kwargs): """Retrieves dummy completions.""" response = self.basic_request(prompt, **kwargs) choices = response["choices"] # Filter choices and return text completions. - completions = [choice["text"] for choice in choices] - - return completions + return [choice["text"] for choice in choices] - def get_convo(self, index): - """Get the prompt + anwer from the ith message""" - return self.history[index]['prompt'] \ - + " " \ - + self.history[index]['response']['choices'][0]['text'] + def get_convo(self, index) -> str: + """Get the prompt + anwer from the ith message.""" + return self.history[index]["prompt"] + " " + self.history[index]["response"]["choices"][0]["text"] -def dummy_rm(passages=()): +def dummy_rm(passages=()) -> callable: if not passages: - def inner(query:str, *, k:int, **kwargs): + + def inner(query: str, *, k: int, **kwargs): assert False, "No passages defined" + return inner max_length = max(map(len, passages)) + 100 vectorizer = DummyVectorizer(max_length) passage_vecs = vectorizer(passages) - def inner(query:str, *, k:int, **kwargs): + + def inner(query: str, *, k: int, **kwargs): assert k <= len(passages) query_vec = vectorizer([query])[0] scores = passage_vecs @ query_vec largest_idx = (-scores).argsort()[:k] - #return dspy.Prediction(passages=[passages[i] for i in largest_idx]) + # return dspy.Prediction(passages=[passages[i] for i in largest_idx]) return [dotdict(dict(long_text=passages[i])) for i in largest_idx] + return inner class DummyVectorizer: - """Simple vectorizer based on n-grams""" + """Simple vectorizer based on n-grams.""" + def __init__(self, max_length=100, n_gram=2): self.max_length = max_length self.n_gram = n_gram self.P = 10**9 + 7 # A large prime number random.seed(123) self.coeffs = [random.randrange(1, self.P) for _ in range(n_gram)] - + def _hash(self, gram): - """Hashes a string using a polynomial hash function""" + """Hashes a string using a polynomial hash function.""" h = 1 for coeff, c in zip(self.coeffs, gram): h = h * coeff + ord(c) h %= self.P return h % self.max_length - def __call__(self, texts: List[str]) -> np.ndarray: + def __call__(self, texts: list[str]) -> np.ndarray: vecs = [] for text in texts: - grams = [text[i:i+self.n_gram] for i in range(len(text) - self.n_gram + 1)] + grams = [text[i : i + self.n_gram] for i in range(len(text) - self.n_gram + 1)] vec = [0] * self.max_length for gram in grams: vec[self._hash(gram)] += 1 vecs.append(vec) - + vecs = np.array(vecs, dtype=np.float32) vecs -= np.mean(vecs, axis=1, keepdims=True) vecs /= np.linalg.norm(vecs, axis=1, keepdims=True) + 1e-10 # Added epsilon to avoid division by zero From f223579a7e124c1e8b5ad27910f6a05190cd6433 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 13:08:30 -0800 Subject: [PATCH 058/243] Rewrote type parsing to be 3.9 compatible --- dspy/signatures/signature.py | 49 +++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 95455c3704..6493a8ce19 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -285,29 +285,38 @@ def _parse_named_type_node(node, names=None) -> Any: def _parse_type_node(node, names=None) -> Any: """Recursively parse an AST node representing a type annotation. - using structural pattern matching introduced in Python 3.10. + without using structural pattern matching introduced in Python 3.10. """ if names is None: names = {} - match node: - case ast.Module(body=body): - if len(body) != 1: - raise ValueError(f"Code is not syntactically valid: {node}") - return _parse_type_node(body[0], names) - case ast.Expr(value=value): - return _parse_type_node(value, names) - case ast.Name(id=id): - if id in names: - return names[id] - for type_ in [int, str, float, bool, list, tuple, dict]: - if type_.__name__ == id: - return type_ - case ast.Subscript(value=value, slice=slice): - base_type = _parse_type_node(value, names) - arg_type = _parse_type_node(slice, names) - return base_type[arg_type] - case ast.Tuple(elts=elts): - return tuple(_parse_type_node(elt, names) for elt in elts) + + if isinstance(node, ast.Module): + body = node.body + if len(body) != 1: + raise ValueError(f"Code is not syntactically valid: {node}") + return _parse_type_node(body[0], names) + + if isinstance(node, ast.Expr): + value = node.value + return _parse_type_node(value, names) + + if isinstance(node, ast.Name): + id_ = node.id + if id_ in names: + return names[id_] + for type_ in [int, str, float, bool, list, tuple, dict]: + if type_.__name__ == id_: + return type_ + + elif isinstance(node, ast.Subscript): + base_type = _parse_type_node(node.value, names) + arg_type = _parse_type_node(node.slice, names) + return base_type[arg_type] + + elif isinstance(node, ast.Tuple): + elts = node.elts + return tuple(_parse_type_node(elt, names) for elt in elts) + raise ValueError(f"Code is not syntactically valid: {node}") From dab44cd1fa5b18c8e46f595e889cebfa8ccaf461 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 13:14:39 -0800 Subject: [PATCH 059/243] Python 3.9 compatability --- dspy/signatures/signature.py | 2 +- dspy/utils/dummies.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 6493a8ce19..8c1f161642 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -188,7 +188,7 @@ class MySignature(Signature): pass -def ensure_signature(signature: str | Type[Signature]) -> Signature: +def ensure_signature(signature: Union[str, Type[Signature]]) -> Signature: if signature is None: return None if isinstance(signature, str): diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index 3a37388073..1f6390536a 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -1,4 +1,5 @@ import random +from typing import Union from dsp.modules import LM import numpy as np from dsp.utils.utils import dotdict @@ -8,7 +9,7 @@ class DummyLM(LM): """Dummy language model for unit testing purposes.""" - def __init__(self, answers: list[str] | dict[str, str], follow_examples: bool = False): + def __init__(self, answers: Union[list[str], dict[str, str]], follow_examples: bool = False): """Initializes the dummy language model. Parameters: From fe321da85da1bc290a0bcd2e6c277d2d7e68a91c Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 13:28:51 -0800 Subject: [PATCH 060/243] Validate main signature fields --- dspy/functional/functional.py | 48 ++++++++++++++++------------- tests/functional/test_functional.py | 17 ++++++++++ 2 files changed, 44 insertions(+), 21 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 041ee7abc5..aa82288295 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -155,31 +155,37 @@ def forward(self, **kwargs) -> dspy.Prediction: errors = {} parsed_results = defaultdict(list) # Parse the outputs - for name, field in signature.output_fields.items(): - for i, completion in enumerate(result.completions): - try: + for i, completion in enumerate(result.completions): + try: + for name, field in signature.output_fields.items(): value = completion[name] parser = field.json_schema_extra.get("parser", lambda x: x) completion[name] = parser(value) parsed_results[name].append(parser(value)) - except (pydantic.ValidationError, ValueError) as e: - errors[name] = _format_error(e) - # If we can, we add an example to the error message - current_desc = field.json_schema_extra.get("desc", "") - i = current_desc.find("JSON Schema: ") - if i == -1: - continue # Only add examples to JSON objects - suffix, current_desc = current_desc[i:], current_desc[:i] - prefix = "You MUST use this format: " - if ( - try_i + 1 < MAX_RETRIES - and prefix not in current_desc - and (example := self._make_example(field.annotation)) - ): - signature = signature.with_updated_fields( - name, - desc=current_desc + "\n" + prefix + example + "\n" + suffix, - ) + # Instantiate the actual signature with the parsed values. + # This allow pydantic to validate the fields defined in the signature. + _dummy = self.signature( + **kwargs, + **{key: value[i] for key, value in parsed_results.items()}, + ) + except (pydantic.ValidationError, ValueError) as e: + errors[name] = _format_error(e) + # If we can, we add an example to the error message + current_desc = field.json_schema_extra.get("desc", "") + i = current_desc.find("JSON Schema: ") + if i == -1: + continue # Only add examples to JSON objects + suffix, current_desc = current_desc[i:], current_desc[:i] + prefix = "You MUST use this format: " + if ( + try_i + 1 < MAX_RETRIES + and prefix not in current_desc + and (example := self._make_example(field.annotation)) + ): + signature = signature.with_updated_fields( + name, + desc=current_desc + "\n" + prefix + example + "\n" + suffix, + ) if errors: # Add new fields for each error for name, error in errors.items(): diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 029519bcfd..e012324f74 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -474,3 +474,20 @@ def test_parse_type_string(): output = test(input=8, config=dict(n=3)).completions.output assert output == [0, 1, 2] + + +def test_fields_on_base_signature(): + class SimpleOutput(dspy.Signature): + output: float = dspy.OutputField(gt=0, lt=1) + + lm = DummyLM( + [ + "2.1", # Bad output + "0.5", # Good output + ] + ) + dspy.settings.configure(lm=lm) + + predictor = TypedPredictor(SimpleOutput) + + assert predictor().output == 0.5 From 16be7a1bc84decdb417e2f0e4d337656db34b995 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 14:12:52 -0800 Subject: [PATCH 061/243] Fixes to completions --- dspy/functional/functional.py | 21 +++++++-------- tests/functional/test_functional.py | 41 +++++++++++++++++++++++++++++ tests/signatures/test_signature.py | 2 ++ 3 files changed, 53 insertions(+), 11 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index aa82288295..cb7b3669aa 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -10,7 +10,7 @@ import json from dspy.primitives.prediction import Prediction -from dspy.signatures.signature import ensure_signature +from dspy.signatures.signature import ensure_signature, make_signature MAX_RETRIES = 3 @@ -83,7 +83,7 @@ def copy(self) -> "TypedPredictor": def _make_example(type_) -> str: # Note: DSPy will cache this call so we only pay the first time TypedPredictor is called. json_object = dspy.Predict( - dspy.Signature( + make_signature( "json_schema -> json_object", "Make a very succinct json object that validates with the following schema", ), @@ -153,21 +153,20 @@ def forward(self, **kwargs) -> dspy.Prediction: for try_i in range(MAX_RETRIES): result = self.predictor(**modified_kwargs, new_signature=signature) errors = {} - parsed_results = defaultdict(list) + parsed_results = [] # Parse the outputs for i, completion in enumerate(result.completions): try: + parsed = {} for name, field in signature.output_fields.items(): value = completion[name] parser = field.json_schema_extra.get("parser", lambda x: x) completion[name] = parser(value) - parsed_results[name].append(parser(value)) + parsed[name] = parser(value) # Instantiate the actual signature with the parsed values. # This allow pydantic to validate the fields defined in the signature. - _dummy = self.signature( - **kwargs, - **{key: value[i] for key, value in parsed_results.items()}, - ) + _dummy = self.signature(**kwargs, **parsed) + parsed_results.append(parsed) except (pydantic.ValidationError, ValueError) as e: errors[name] = _format_error(e) # If we can, we add an example to the error message @@ -199,9 +198,9 @@ def forward(self, **kwargs) -> dspy.Prediction: ) else: # If there are no errors, we return the parsed results - # for name, value in parsed_results.items(): - # setattr(result, name, value) - return Prediction.from_completions(parsed_results) + return Prediction.from_completions( + {key: [r[key] for r in parsed_results] for key in signature.output_fields} + ) raise ValueError( "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", errors, diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index e012324f74..cabbc7cd09 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -11,6 +11,7 @@ from dspy.functional import predictor, cot, FunctionalModule, TypedPredictor, functional from dspy.primitives.example import Example from dspy.teleprompt.bootstrap import BootstrapFewShot +from dspy.teleprompt.vanilla import LabeledFewShot from dspy.utils.dummies import DummyLM @@ -491,3 +492,43 @@ class SimpleOutput(dspy.Signature): predictor = TypedPredictor(SimpleOutput) assert predictor().output == 0.5 + + +def test_synthetic_data_gen(): + class SyntheticFact(BaseModel): + fact: str = Field(..., description="a statement") + varacity: bool = Field(..., description="is the statement true or false") + + class ExampleSignature(dspy.Signature): + """Generate an example of a synthetic fact.""" + + fact: SyntheticFact = dspy.OutputField() + + lm = DummyLM( + [ + '{"fact": "The sky is blue", "varacity": true}', + '{"fact": "The sky is green", "varacity": false}', + '{"fact": "The sky is red", "varacity": true}', + '{"fact": "The earth is flat", "varacity": false}', + '{"fact": "The earth is round", "varacity": true}', + '{"fact": "The earth is a cube", "varacity": false}', + ] + ) + dspy.settings.configure(lm=lm) + + generator = TypedPredictor(ExampleSignature) + examples = generator(config=dict(n=3)) + for ex in examples.completions.fact: + assert isinstance(ex, SyntheticFact) + assert examples.completions.fact[0] == SyntheticFact(fact="The sky is blue", varacity=True) + + # If you have examples and want more + existing_examples = [ + dspy.Example(fact="The sky is blue", varacity=True), + dspy.Example(fact="The sky is green", varacity=False), + ] + trained = LabeledFewShot().compile(student=generator, trainset=existing_examples) + + augmented_examples = trained(config=dict(n=3)) + for ex in augmented_examples.completions.fact: + assert isinstance(ex, SyntheticFact) diff --git a/tests/signatures/test_signature.py b/tests/signatures/test_signature.py index 26fdfc7aa0..554d9b1274 100644 --- a/tests/signatures/test_signature.py +++ b/tests/signatures/test_signature.py @@ -82,6 +82,8 @@ def test_instructions_signature(): def test_signature_instructions(): sig1 = Signature("input1 -> output1", instructions="This is a test") assert sig1.instructions == "This is a test" + sig2 = Signature("input1 -> output1", "This is a test") + assert sig2.instructions == "This is a test" def test_signature_instructions_none(): From 6ed29b410a2135d9a6e872c1093e5bd696d39291 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 15:38:10 -0800 Subject: [PATCH 062/243] Fixed #520 --- dspy/functional/functional.py | 10 +- examples/functional/functional.ipynb | 213 ++++++++++++++++++++++----- 2 files changed, 177 insertions(+), 46 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index cb7b3669aa..8ca45523f4 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -8,12 +8,13 @@ from typing import Annotated, List, Tuple # noqa: UP035 from dsp.templates import passages2text import json +import ujson from dspy.primitives.prediction import Prediction from dspy.signatures.signature import ensure_signature, make_signature -MAX_RETRIES = 3 +MAX_RETRIES = 5 def predictor(func) -> dspy.Module: @@ -101,9 +102,7 @@ def _make_example(type_) -> str: # library like https://pypi.org/project/polyfactory/ that's made exactly to do this. def _prepare_signature(self) -> dspy.Signature: - """Add formats and parsers to the signature fields, based on the type - annotations of the fields. - """ + """Add formats and parsers to the signature fields, based on the type annotations of the fields.""" signature = self.signature for name, field in self.signature.fields.items(): is_output = field.json_schema_extra["__dspy_field_type"] == "output" @@ -161,7 +160,6 @@ def forward(self, **kwargs) -> dspy.Prediction: for name, field in signature.output_fields.items(): value = completion[name] parser = field.json_schema_extra.get("parser", lambda x: x) - completion[name] = parser(value) parsed[name] = parser(value) # Instantiate the actual signature with the parsed values. # This allow pydantic to validate the fields defined in the signature. @@ -257,7 +255,7 @@ def _unwrap_json(output): output = output[7:-3].strip() if not output.startswith("{") or not output.endswith("}"): raise ValueError("json output should start and end with { and }") - return output + return ujson.dumps(ujson.loads(output)) # ujson is a bit more robust than the standard json ################################################################################ diff --git a/examples/functional/functional.ipynb b/examples/functional/functional.ipynb index cf7088a5d9..90b7f4501f 100644 --- a/examples/functional/functional.ipynb +++ b/examples/functional/functional.ipynb @@ -2,13 +2,15 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n", "Requirement already satisfied: datasets in /opt/homebrew/lib/python3.11/site-packages (2.14.7)\n", "Requirement already satisfied: numpy>=1.17 in /opt/homebrew/lib/python3.11/site-packages (from datasets) (1.26.2)\n", "Requirement already satisfied: pyarrow>=8.0.0 in /opt/homebrew/lib/python3.11/site-packages (from datasets) (12.0.0)\n", @@ -39,9 +41,6 @@ "Requirement already satisfied: pytz>=2020.1 in /opt/homebrew/lib/python3.11/site-packages (from pandas->datasets) (2023.3)\n", "Requirement already satisfied: tzdata>=2022.1 in /opt/homebrew/lib/python3.11/site-packages (from pandas->datasets) (2023.3)\n", "Requirement already satisfied: six>=1.5 in /opt/homebrew/lib/python3.11/site-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.16.0)\n", - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython3.11 -m pip install --upgrade pip\u001b[0m\n", "Note: you may need to restart the kernel to use updated packages.\n" ] } @@ -61,17 +60,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 26, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, { "data": { "text/plain": [ @@ -82,7 +73,7 @@ " 'entry_point': 'has_close_elements'}" ] }, - "execution_count": 2, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -102,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 27, "metadata": {}, "outputs": [ { @@ -134,15 +125,21 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 30, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ + "Now parsing: '{\"code\": \"from typing import List\\\\n\\\\n\\\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\\\n \\\\\"\\\\\"\\\\\" Check if in given list of numbers, are any two numbers closer to each other than\\\\n given threshold.\\\\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\\\\n False\\\\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\\\\n True\\\\n \\\\\"\\\\\"\\\\\"\\\\n for i in range(len(numbers)):\\\\n for j in range(i+1, len(numbers)):\\\\n if abs(numbers[i] - numbers[j]) < threshold:\\\\n return True\\\\n return False\\\\n\"}'\n", + "Parsed: PythonCode(code='from typing import List\\n\\n\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\\n given threshold.\\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\\n False\\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\\n True\\n \"\"\"\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\\n')\n", + "is this the problem?\n", + "kwargs={'prompt': PythonCode(code='from typing import List\\n\\n\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\\n given threshold.\\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\\n False\\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\\n True\\n \"\"\"\\n'), 'test': PythonCode(code=\"\\n\\nMETADATA = {\\n 'author': 'jt',\\n 'dataset': 'test'\\n}\\n\\n\\ndef check(candidate):\\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.3) == True\\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.05) == False\\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.95) == True\\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.8) == False\\n assert candidate([1.0, 2.0, 3.0, 4.0, 5.0, 2.0], 0.1) == True\\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 1.0) == True\\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 0.5) == False\\n\\n\"), 'entry_point': 'has_close_elements'}\n", + "parsed={'solution': PythonCode(code='from typing import List\\n\\n\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\\n given threshold.\\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\\n False\\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\\n True\\n \"\"\"\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\\n')}\n", + "after wards\n", "Prediction(\n", - " solution=PythonCode(code='def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False')\n", + " solution=PythonCode(code='from typing import List\\n\\n\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\\n given threshold.\\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\\n False\\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\\n True\\n \"\"\"\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\\n')\n", ")\n" ] } @@ -170,7 +167,7 @@ "# The signature is the main DSpy object. Note that we have types for the input and output fields,\n", "# which was not possible beofore.\n", "class CodeSignature(Signature):\n", - " prompt: str = InputField()\n", + " prompt: PythonCode = InputField()\n", " test: PythonCode = InputField()\n", " entry_point: str = InputField()\n", " solution: PythonCode = OutputField()\n", @@ -180,9 +177,7 @@ " prompt=PythonCode(code=ds['test'][0]['prompt']),\n", " test=PythonCode(code=ds['test'][0]['test']),\n", " entry_point=ds['test'][0]['entry_point']\n", - ")\n", - "\n", - "print(prediction)" + ")\n" ] }, { @@ -194,7 +189,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -205,19 +200,40 @@ "\n", "\n", "\n", - "Make a very succinct json object that validates with the following schema\n", + "Given the fields `prompt`, `test`, `entry_point`, produce the fields `solution`.\n", "\n", "---\n", "\n", "Follow the following format.\n", "\n", - "Json Schema: ${json_schema}\n", - "Json Object: ${json_object}\n", + "Prompt: ${prompt}\n", + "\n", + "Test: ${test}\n", + "\n", + "Entry Point: ${entry_point}\n", + "\n", + "Past Error (solution): An error to avoid in the future\n", + "\n", + "Past Error (solution, 2): An error to avoid in the future\n", + "\n", + "Solution:\n", + "${solution}. Respond with a single JSON object. \n", + "You MUST use this format: {\"code\": \"print('Hello, World!')\"}\n", + "JSON Schema: {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}\n", "\n", "---\n", "\n", - "Json Schema: {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}\n", - "Json Object:\u001b[32m {\"code\": \"print('Hello, World!')\"}\u001b[0m\n", + "Prompt: code='from typing import List\\n\\n\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\\n given threshold.\\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\\n False\\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\\n True\\n \"\"\"\\n'\n", + "\n", + "Test: {\"code\":\"\\n\\nMETADATA = {\\n 'author': 'jt',\\n 'dataset': 'test'\\n}\\n\\n\\ndef check(candidate):\\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.3) == True\\n assert candidate([1.0, 2.0, 3.9, 4.0, 5.0, 2.2], 0.05) == False\\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.95) == True\\n assert candidate([1.0, 2.0, 5.9, 4.0, 5.0], 0.8) == False\\n assert candidate([1.0, 2.0, 3.0, 4.0, 5.0, 2.0], 0.1) == True\\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 1.0) == True\\n assert candidate([1.1, 2.2, 3.1, 4.1, 5.1], 0.5) == False\\n\\n\"}\n", + "\n", + "Entry Point: has_close_elements\n", + "\n", + "Past Error (solution): Input should be a valid string: prompt (error type: string_type)\n", + "\n", + "Past Error (solution, 2): Value error, Code is not syntactically valid: unexpected character after line continuation character (, line 1): code (error type: value_error)\n", + "\n", + "Solution:\u001b[32m {\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"}\u001b[0m\n", "\n", "\n", "\n", @@ -237,7 +253,16 @@ "\n", "Entry Point: ${entry_point}\n", "\n", - "Solution: ${solution}. Respond with a single JSON object using the schema {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}. For example: {\"code\": \"print('Hello, World!')\"}\n", + "Past Error (solution): An error to avoid in the future\n", + "\n", + "Past Error (solution, 2): An error to avoid in the future\n", + "\n", + "Past Error (solution, 3): An error to avoid in the future\n", + "\n", + "Solution:\n", + "${solution}. Respond with a single JSON object. \n", + "You MUST use this format: {\"code\": \"print('Hello, World!')\"}\n", + "JSON Schema: {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}\n", "\n", "---\n", "\n", @@ -247,9 +272,13 @@ "\n", "Entry Point: has_close_elements\n", "\n", - "Solution:\u001b[32m {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}\n", + "Past Error (solution): Input should be a valid string: prompt (error type: string_type)\n", + "\n", + "Past Error (solution, 2): Value error, Code is not syntactically valid: unexpected character after line continuation character (, line 1): code (error type: value_error)\n", + "\n", + "Past Error (solution, 3): Input should be a valid string: prompt (error type: string_type)\n", "\n", - "{\"code\": \"from typing import List\\n\\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"}\u001b[0m\n", + "Solution:\u001b[32m {\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\\\n for i in range(len(numbers)):\\\\n for j in range(i+1, len(numbers)):\\\\n if abs(numbers[i] - numbers[j]) < threshold:\\\\n return True\\\\n return False\"}\u001b[0m\n", "\n", "\n", "\n", @@ -271,7 +300,16 @@ "\n", "Past Error (solution): An error to avoid in the future\n", "\n", - "Solution: ${solution}. Respond with a single JSON object using the schema {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}. For example: {\"code\": \"print('Hello, World!')\"}\n", + "Past Error (solution, 2): An error to avoid in the future\n", + "\n", + "Past Error (solution, 3): An error to avoid in the future\n", + "\n", + "Past Error (solution, 4): An error to avoid in the future\n", + "\n", + "Solution:\n", + "${solution}. Respond with a single JSON object. \n", + "You MUST use this format: {\"code\": \"print('Hello, World!')\"}\n", + "JSON Schema: {\"properties\": {\"code\": {\"title\": \"Code\", \"type\": \"string\"}}, \"required\": [\"code\"], \"title\": \"PythonCode\", \"type\": \"object\"}\n", "\n", "---\n", "\n", @@ -281,9 +319,15 @@ "\n", "Entry Point: has_close_elements\n", "\n", - "Past Error (solution): 1 validation error for PythonCode Invalid JSON: trailing characters at line 3 column 1 [type=json_invalid, input_value='{\"properties\": {\"code\": ...ue\\\\n return False\"}', input_type=str] For further information visit https://errors.pydantic.dev/2.5/v/json_invalid\n", + "Past Error (solution): Input should be a valid string: prompt (error type: string_type)\n", "\n", - "Solution:\u001b[32m {\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"}\u001b[0m\n", + "Past Error (solution, 2): Value error, Code is not syntactically valid: unexpected character after line continuation character (, line 1): code (error type: value_error)\n", + "\n", + "Past Error (solution, 3): Input should be a valid string: prompt (error type: string_type)\n", + "\n", + "Past Error (solution, 4): Value error, Code is not syntactically valid: unexpected character after line continuation character (, line 1): code (error type: value_error)\n", + "\n", + "Solution:\u001b[32m {\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\\\n for i in range(len(numbers)):\\\\n for j in range(i+1, len(numbers)):\\\\n if abs(numbers[i] - numbers[j]) < threshold:\\\\n return True\\\\n return False\"}\u001b[0m\n", "\n", "\n", "\n" @@ -291,7 +335,96 @@ } ], "source": [ - "lm.inspect_history(n=3)" + "lm.inspect_history(n=3)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "def has_close_elements(numbers: List[float], threshold: float) -> bool:\n", + " for i in range(len(numbers)):\n", + " for j in range(i+1, len(numbers)):\n", + " if abs(numbers[i] - numbers[j]) < threshold:\n", + " return True\n", + " return False\n" + ] + } + ], + "source": [ + "d = {\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"}\n", + "print(d[\"code\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "ename": "JSONDecodeError", + "evalue": "Invalid control character at: line 1 column 82 (char 81)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[7], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mjson\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m \u001b[43mjson\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloads\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m{\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcode\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m: \u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mdef has_close_elements(numbers: List[float], threshold: float) -> bool:\u001b[39;49m\u001b[38;5;130;43;01m\\n\u001b[39;49;00m\u001b[38;5;124;43m for i in range(len(numbers)):\u001b[39;49m\u001b[38;5;130;43;01m\\n\u001b[39;49;00m\u001b[38;5;124;43m for j in range(i+1, len(numbers)):\u001b[39;49m\u001b[38;5;130;43;01m\\n\u001b[39;49;00m\u001b[38;5;124;43m if abs(numbers[i] - numbers[j]) < threshold:\u001b[39;49m\u001b[38;5;130;43;01m\\n\u001b[39;49;00m\u001b[38;5;124;43m return True\u001b[39;49m\u001b[38;5;130;43;01m\\n\u001b[39;49;00m\u001b[38;5;124;43m return False\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m}\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.8/Frameworks/Python.framework/Versions/3.11/lib/python3.11/json/__init__.py:346\u001b[0m, in \u001b[0;36mloads\u001b[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 341\u001b[0m s \u001b[38;5;241m=\u001b[39m s\u001b[38;5;241m.\u001b[39mdecode(detect_encoding(s), \u001b[38;5;124m'\u001b[39m\u001b[38;5;124msurrogatepass\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 344\u001b[0m parse_int \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m parse_float \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 345\u001b[0m parse_constant \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_pairs_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m kw):\n\u001b[0;32m--> 346\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_default_decoder\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 347\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 348\u001b[0m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;241m=\u001b[39m JSONDecoder\n", + "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.8/Frameworks/Python.framework/Versions/3.11/lib/python3.11/json/decoder.py:337\u001b[0m, in \u001b[0;36mJSONDecoder.decode\u001b[0;34m(self, s, _w)\u001b[0m\n\u001b[1;32m 332\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdecode\u001b[39m(\u001b[38;5;28mself\u001b[39m, s, _w\u001b[38;5;241m=\u001b[39mWHITESPACE\u001b[38;5;241m.\u001b[39mmatch):\n\u001b[1;32m 333\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Return the Python representation of ``s`` (a ``str`` instance\u001b[39;00m\n\u001b[1;32m 334\u001b[0m \u001b[38;5;124;03m containing a JSON document).\u001b[39;00m\n\u001b[1;32m 335\u001b[0m \n\u001b[1;32m 336\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 337\u001b[0m obj, end \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraw_decode\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43midx\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_w\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mend\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 338\u001b[0m end \u001b[38;5;241m=\u001b[39m _w(s, end)\u001b[38;5;241m.\u001b[39mend()\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m end \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mlen\u001b[39m(s):\n", + "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.8/Frameworks/Python.framework/Versions/3.11/lib/python3.11/json/decoder.py:353\u001b[0m, in \u001b[0;36mJSONDecoder.raw_decode\u001b[0;34m(self, s, idx)\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Decode a JSON document from ``s`` (a ``str`` beginning with\u001b[39;00m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;124;03ma JSON document) and return a 2-tuple of the Python\u001b[39;00m\n\u001b[1;32m 346\u001b[0m \u001b[38;5;124;03mrepresentation and the index in ``s`` where the document ended.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 350\u001b[0m \n\u001b[1;32m 351\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 352\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 353\u001b[0m obj, end \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscan_once\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43midx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 354\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m err:\n\u001b[1;32m 355\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m JSONDecodeError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExpecting value\u001b[39m\u001b[38;5;124m\"\u001b[39m, s, err\u001b[38;5;241m.\u001b[39mvalue) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "\u001b[0;31mJSONDecodeError\u001b[0m: Invalid control character at: line 1 column 82 (char 81)" + ] + } + ], + "source": [ + "import json\n", + "json.loads('{\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'code': 'def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False'}" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import ujson\n", + "ujson.loads('{\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"} ')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'code': 'def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False'}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "json.loads(ujson.dumps(ujson.loads('{\"code\": \"def has_close_elements(numbers: List[float], threshold: float) -> bool:\\n for i in range(len(numbers)):\\n for j in range(i+1, len(numbers)):\\n if abs(numbers[i] - numbers[j]) < threshold:\\n return True\\n return False\"} ')))" ] }, { @@ -313,7 +446,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -341,7 +474,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -410,7 +543,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -677,7 +810,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -988,7 +1121,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1207,7 +1340,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, From 8171ba13ec1fcd46db88b2bcc654e243b18ac4ca Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Sun, 3 Mar 2024 15:40:48 -0800 Subject: [PATCH 063/243] Changed max retries back to 3 --- dspy/functional/functional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 8ca45523f4..da5e52f94c 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -14,7 +14,7 @@ from dspy.signatures.signature import ensure_signature, make_signature -MAX_RETRIES = 5 +MAX_RETRIES = 3 def predictor(func) -> dspy.Module: From a637bf47b527cd7373ac32fcc5db2f662e81b01f Mon Sep 17 00:00:00 2001 From: Isaac Miller <17116851+isaacbmiller@users.noreply.github.com> Date: Sun, 3 Mar 2024 22:23:57 -0600 Subject: [PATCH 064/243] ci(dspy): Add Ruff linting workflow (#538) * Add Ruff linting workflow * Update GitHub Actions workflows * Make ruff apply but not fail * Style fixes by Ruff * Combine Workflows * Add caching * Add import related rules * Automatic Style fixes * Try and fix caching * Fix import * Use natve python dep caching * Update actions/checkout to v4 * Use autofix in tests * Try to cache "install poetry" * Empty-Commit to test caching * Remove workflow deps * Rename action * Update workflow name * Fix Docker stop command and subprocess run check * Fix functional imports --------- Co-authored-by: isaacbmiller --- .github/workflows/run_tests.yml | 73 +++++++++++++++++++--- dsp/__init__.py | 1 - dsp/evaluation/utils.py | 5 +- dsp/modules/__init__.py | 26 ++++---- dsp/modules/aws_lm.py | 5 +- dsp/modules/azurecognitivesearch.py | 3 +- dsp/modules/bedrock.py | 3 +- dsp/modules/cache_utils.py | 5 +- dsp/modules/clarifai.py | 1 + dsp/modules/cohere.py | 1 + dsp/modules/colbertv2.py | 4 +- dsp/modules/databricks.py | 2 +- dsp/modules/finetuning/finetune_hf.py | 20 +++--- dsp/modules/google.py | 3 +- dsp/modules/gpt3.py | 4 +- dsp/modules/hf.py | 5 +- dsp/modules/hf_client.py | 15 +++-- dsp/modules/ollama.py | 6 +- dsp/modules/pyserini.py | 5 +- dsp/primitives/__init__.py | 8 +-- dsp/primitives/compiler.py | 9 +-- dsp/primitives/demonstrate.py | 2 +- dsp/primitives/inspect.py | 5 +- dsp/primitives/predict.py | 8 +-- dsp/primitives/primitives.py | 4 +- dsp/templates/__init__.py | 2 +- dsp/templates/template_v2.py | 8 ++- dsp/templates/template_v3.py | 3 +- dsp/templates/utils.py | 2 +- dsp/utils/__init__.py | 4 +- dsp/utils/dpr.py | 3 +- dsp/utils/metrics.py | 2 +- dsp/utils/settings.py | 3 +- dsp/utils/settings_v2.py | 5 +- dsp/utils/utils.py | 7 ++- dspy/__init__.py | 18 ++---- dspy/datasets/__init__.py | 4 +- dspy/datasets/colors.py | 1 + dspy/datasets/dataloader.py | 10 +-- dspy/datasets/dataset.py | 5 +- dspy/datasets/gsm8k.py | 3 +- dspy/datasets/hotpotqa.py | 1 + dspy/evaluate/__init__.py | 5 +- dspy/evaluate/auto_evaluation.py | 3 +- dspy/evaluate/evaluate.py | 11 ++-- dspy/evaluate/metrics.py | 1 + dspy/experimental/__init__.py | 2 +- dspy/experimental/synthesizer.py | 19 +++--- dspy/experimental/synthetic_data.py | 9 ++- dspy/functional/__init__.py | 2 +- dspy/functional/functional.py | 19 +++--- dspy/predict/__init__.py | 10 +-- dspy/predict/aggregation.py | 3 +- dspy/predict/chain_of_thought.py | 1 - dspy/predict/chain_of_thought_with_hint.py | 2 - dspy/predict/knn.py | 3 + dspy/predict/langchain.py | 7 +-- dspy/predict/multi_chain_comparison.py | 4 +- dspy/predict/predict.py | 4 +- dspy/predict/program_of_thought.py | 4 +- dspy/predict/react.py | 1 + dspy/predict/retry.py | 3 +- dspy/primitives/__init__.py | 4 +- dspy/primitives/assertions.py | 5 +- dspy/primitives/module.py | 1 + dspy/primitives/program.py | 5 +- dspy/primitives/python_interpreter.py | 4 +- dspy/retrieve/chromadb_rm.py | 12 ++-- dspy/retrieve/databricks_rm.py | 9 ++- dspy/retrieve/deeplake_rm.py | 6 +- dspy/retrieve/marqo_rm.py | 1 + dspy/retrieve/mongodb_atlas_rm.py | 14 +++-- dspy/retrieve/pgvector_rm.py | 8 ++- dspy/retrieve/pinecone_rm.py | 9 ++- dspy/retrieve/qdrant_rm.py | 5 +- dspy/retrieve/retrieve.py | 5 +- dspy/retrieve/vectara_rm.py | 8 +-- dspy/retrieve/weaviate_rm.py | 4 +- dspy/retrieve/you_rm.py | 7 ++- dspy/signatures/signature.py | 9 +-- dspy/teleprompt/__init__.py | 10 +-- dspy/teleprompt/bootstrap.py | 7 +-- dspy/teleprompt/finetune.py | 9 +-- dspy/teleprompt/knn_fewshot.py | 6 +- dspy/teleprompt/random_search.py | 4 +- dspy/teleprompt/signature_opt.py | 7 ++- dspy/teleprompt/signature_opt_bayesian.py | 16 ++--- dspy/teleprompt/teleprompt_optuna.py | 2 +- dspy/utils/dummies.py | 6 +- examples/longformqa/utils.py | 4 +- inspect-app/app.py | 7 ++- pyproject.toml | 2 + setup.py | 2 +- testing/optimizer_tester.py | 25 ++++---- testing/tasks/__init__.py | 4 +- testing/tasks/base_task.py | 1 + testing/tasks/biodex.py | 27 ++++---- testing/tasks/gsm8k.py | 11 ++-- testing/tasks/hotpotqa.py | 4 +- testing/tasks/scone.py | 6 +- testing/tasks/tweet.py | 12 ++-- testing/tasks/tweet_metric.py | 16 +++-- 102 files changed, 428 insertions(+), 293 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 4a31b86453..52ac21e3c5 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,4 +1,4 @@ -name: Build and Run Tests +name: Fix, Test, and Build on: push: @@ -10,24 +10,54 @@ env: POETRY_VERSION: "1.6.1" jobs: + fix: + name: Apply Ruff Fix + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v5 + - uses: chartboost/ruff-action@v1 + with: + args: --fix-only + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: "Automatic Style fixes" + test: - name: Test + name: Run Tests runs-on: ubuntu-latest strategy: matrix: python-version: ["3.9"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + - name: Load cached Poetry installation + id: cached-poetry + uses: actions/cache@v3 + with: + path: ~/.local + key: poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} + - name: Install Poetry + if: steps.cached-poetry.outputs.cache-hit != 'true' + uses: snok/install-poetry@v1 - name: Set up python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install Poetry - uses: snok/install-poetry@v1 + cache: "poetry" - name: Install dependencies - run: poetry install + run: poetry install --no-interaction --no-root + - name: Run lint with tests + uses: chartboost/ruff-action@v1 + with: + args: --fix-only - name: Run tests with pytest run: poetry run pytest tests/ + build_poetry: name: Build Poetry runs-on: ubuntu-latest @@ -35,13 +65,23 @@ jobs: matrix: python-version: ["3.9"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + - name: Load cached Poetry installation + id: cached-poetry + uses: actions/cache@v3 + with: + path: ~/.local + key: poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} + - name: Install Poetry + if: steps.cached-poetry.outputs.cache-hit != 'true' + uses: snok/install-poetry@v1 - name: Set up python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install Poetry - uses: snok/install-poetry@v1 + cache: "poetry" - name: Build run: poetry build - name: Install built package @@ -50,6 +90,7 @@ jobs: run: python -c "import dspy" - name: Test import dsp run: python -c "import dsp" + build_setup: name: Build Setup runs-on: ubuntu-latest @@ -57,10 +98,22 @@ jobs: matrix: python-version: ["3.9"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + - name: Load cached Poetry installation + id: cached-poetry + uses: actions/cache@v3 + with: + path: ~/.local + key: poetry-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }} + - name: Install Poetry + if: steps.cached-poetry.outputs.cache-hit != 'true' + uses: snok/install-poetry@v1 - name: Set up python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + cache: "poetry" - name: Run setup.py build run: python setup.py build diff --git a/dsp/__init__.py b/dsp/__init__.py index fc8a742415..4f98315f13 100644 --- a/dsp/__init__.py +++ b/dsp/__init__.py @@ -3,7 +3,6 @@ from .templates import * from .utils import settings - """ TODO: diff --git a/dsp/evaluation/utils.py b/dsp/evaluation/utils.py index 1bfd740e4f..7b6a477802 100644 --- a/dsp/evaluation/utils.py +++ b/dsp/evaluation/utils.py @@ -1,7 +1,8 @@ -import dsp -import tqdm import pandas as pd +import tqdm + +import dsp try: from IPython.display import display as ipython_display diff --git a/dsp/modules/__init__.py b/dsp/modules/__init__.py index d824d3fa57..241a349c58 100644 --- a/dsp/modules/__init__.py +++ b/dsp/modules/__init__.py @@ -1,19 +1,15 @@ -from .cache_utils import * from .azure_openai import AzureOpenAI -from .gpt3 import * +from .bedrock import * +from .cache_utils import * +from .clarifai import * +from .cohere import * +from .colbertv2 import ColBERTv2 from .databricks import * +from .google import * +from .gpt3 import * from .hf import HFModel -from .colbertv2 import ColBERTv2 -from .sentence_vectorizer import * -from .cohere import * -from .sbert import * -from .pyserini import * +from .hf_client import Anyscale, HFClientTGI, Together from .ollama import * -from .clarifai import * -from .bedrock import * -from .google import * - - -from .hf_client import HFClientTGI -from .hf_client import Anyscale -from .hf_client import Together +from .pyserini import * +from .sbert import * +from .sentence_vectorizer import * diff --git a/dsp/modules/aws_lm.py b/dsp/modules/aws_lm.py index 83674db10a..a44e30a492 100644 --- a/dsp/modules/aws_lm.py +++ b/dsp/modules/aws_lm.py @@ -4,10 +4,11 @@ from __future__ import annotations -from abc import abstractmethod +import json import logging +from abc import abstractmethod from typing import Any, Literal -import json + from dsp.modules.lm import LM # Heuristic translating number of chars to tokens diff --git a/dsp/modules/azurecognitivesearch.py b/dsp/modules/azurecognitivesearch.py index e4ff2e9960..fedc59b015 100644 --- a/dsp/modules/azurecognitivesearch.py +++ b/dsp/modules/azurecognitivesearch.py @@ -1,6 +1,7 @@ -from typing import Union, Any +from typing import Any, Union from dsp.utils import dotdict + try: from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient diff --git a/dsp/modules/bedrock.py b/dsp/modules/bedrock.py index c52ae61970..ac8b42d2d4 100644 --- a/dsp/modules/bedrock.py +++ b/dsp/modules/bedrock.py @@ -1,7 +1,8 @@ from __future__ import annotations -from typing import Any import json +from typing import Any + from dsp.modules.aws_lm import AWSLM diff --git a/dsp/modules/cache_utils.py b/dsp/modules/cache_utils.py index 78270c879a..e6c09fe716 100644 --- a/dsp/modules/cache_utils.py +++ b/dsp/modules/cache_utils.py @@ -1,12 +1,11 @@ import os - +from functools import wraps from pathlib import Path + from joblib import Memory -from functools import wraps from dsp.utils import dotdict - cache_turn_on = True diff --git a/dsp/modules/clarifai.py b/dsp/modules/clarifai.py index 8a691a4bef..2d5839c62b 100644 --- a/dsp/modules/clarifai.py +++ b/dsp/modules/clarifai.py @@ -3,6 +3,7 @@ from dsp.modules.lm import LM + class ClarifaiLLM(LM): """Integration to call models hosted in clarifai platform. diff --git a/dsp/modules/cohere.py b/dsp/modules/cohere.py index 405eaea483..7308d59355 100644 --- a/dsp/modules/cohere.py +++ b/dsp/modules/cohere.py @@ -1,5 +1,6 @@ import math from typing import Any, Optional + import backoff from dsp.modules.lm import LM diff --git a/dsp/modules/colbertv2.py b/dsp/modules/colbertv2.py index 1b33d077a9..8ff3c16225 100644 --- a/dsp/modules/colbertv2.py +++ b/dsp/modules/colbertv2.py @@ -1,11 +1,11 @@ import functools -from typing import Optional, Union, Any +from typing import Any, Optional, Union + import requests from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory from dsp.utils import dotdict - # TODO: Ideally, this takes the name of the index and looks up its port. diff --git a/dsp/modules/databricks.py b/dsp/modules/databricks.py index 8f7d7d6e87..7d90e4fc27 100644 --- a/dsp/modules/databricks.py +++ b/dsp/modules/databricks.py @@ -19,8 +19,8 @@ from dsp.modules.gpt3 import GPT3 try: - from openai.openai_object import OpenAIObject import openai.error + from openai.openai_object import OpenAIObject ERRORS = (openai.error.RateLimitError, openai.error.ServiceUnavailableError, openai.error.APIError) except Exception: ERRORS = (openai.RateLimitError, openai.APIError) diff --git a/dsp/modules/finetuning/finetune_hf.py b/dsp/modules/finetuning/finetune_hf.py index 0ade213bae..a22ab10fcb 100644 --- a/dsp/modules/finetuning/finetune_hf.py +++ b/dsp/modules/finetuning/finetune_hf.py @@ -1,28 +1,30 @@ # Adapted from: https://www.philschmid.de/fine-tune-flan-t5#3-fine-tune-and-evaluate-flan-t5 -import os -import json import copy import glob -import torch +import json +import os import warnings +from dataclasses import dataclass + import evaluate import numpy as np +import torch from datasets import Dataset -from dataclasses import dataclass from transformers import ( - set_seed, AutoConfig, - AutoModelForSeq2SeqLM, AutoModelForCausalLM, + AutoModelForSeq2SeqLM, AutoTokenizer, + DataCollatorForSeq2Seq, PreTrainedTokenizer, - Trainer, Seq2SeqTrainer, - TrainingArguments, Seq2SeqTrainingArguments, - DataCollatorForSeq2Seq, + Trainer, + TrainingArguments, + set_seed, ) + # from peft import get_peft_model, LoraConfig, TaskType from transformers.trainer_callback import TrainerCallback diff --git a/dsp/modules/google.py b/dsp/modules/google.py index 9a1c3937c1..5d97534ea8 100644 --- a/dsp/modules/google.py +++ b/dsp/modules/google.py @@ -1,6 +1,7 @@ import os -from typing import Any, Optional from collections.abc import Iterable +from typing import Any, Optional + import backoff from dsp.modules.lm import LM diff --git a/dsp/modules/gpt3.py b/dsp/modules/gpt3.py index ad440f2348..d0b343af3c 100644 --- a/dsp/modules/gpt3.py +++ b/dsp/modules/gpt3.py @@ -11,10 +11,10 @@ import json from typing import Any, Literal, Optional, cast -import dsp import backoff import openai +import dsp from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory, cache_turn_on from dsp.modules.lm import LM @@ -24,8 +24,8 @@ OPENAI_LEGACY = True try: - from openai.openai_object import OpenAIObject import openai.error + from openai.openai_object import OpenAIObject ERRORS = ( openai.error.RateLimitError, diff --git a/dsp/modules/hf.py b/dsp/modules/hf.py index 3c9306a8f8..aad0c0e36c 100644 --- a/dsp/modules/hf.py +++ b/dsp/modules/hf.py @@ -1,8 +1,9 @@ # from peft import PeftConfig, PeftModel # from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM, AutoTokenizer, AutoConfig -from typing import Optional, Literal +from typing import Literal, Optional from dsp.modules.lm import LM + # from dsp.modules.finetuning.finetune_hf import preprocess_prompt def openai_to_hf(**kwargs): @@ -43,8 +44,8 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool self.device_map = hf_device_map if not self.is_client: try: - from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM, AutoTokenizer, AutoConfig import torch + from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer except ImportError as exc: raise ModuleNotFoundError( "You need to install Hugging Face transformers library to use HF models.", diff --git a/dsp/modules/hf_client.py b/dsp/modules/hf_client.py index 4def628199..fb92c48d53 100644 --- a/dsp/modules/hf_client.py +++ b/dsp/modules/hf_client.py @@ -1,15 +1,15 @@ import os import random -import requests -from dsp.modules.hf import HFModel, openai_to_hf -from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory -import subprocess import re import shutil +import subprocess # from dsp.modules.adapter import TurboAdapter, DavinciAdapter, LlamaAdapter - import backoff +import requests + +from dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory +from dsp.modules.hf import HFModel, openai_to_hf ERRORS = (Exception) @@ -173,7 +173,7 @@ def close_server(self, port): container_id = match.group(1) port_mapping = subprocess.check_output(['docker', 'port', container_id]).decode().strip() if f'0.0.0.0:{port}' in port_mapping: - subprocess.run(['docker', 'stop', container_id]) + subprocess.run(['docker', 'stop', container_id], check=False) def run_server(self, port, model_name=None, model_path=None, env_variable=None, gpus="all", num_shard=1, max_input_length=4000, max_total_tokens=4096, max_best_of=100): self.close_server(port) @@ -355,8 +355,7 @@ class ChatModuleClient(HFModel): def __init__(self, model, model_path): super().__init__(model=model, is_client=True) - from mlc_chat import ChatModule - from mlc_chat import ChatConfig + from mlc_chat import ChatConfig, ChatModule self.cm = ChatModule( model=model, lib_path=model_path, chat_config=ChatConfig(conv_template="LM"), diff --git a/dsp/modules/ollama.py b/dsp/modules/ollama.py index a2e3e01397..27304d271e 100644 --- a/dsp/modules/ollama.py +++ b/dsp/modules/ollama.py @@ -1,10 +1,10 @@ -from dsp.modules.lm import LM -from typing import Any, Literal - import datetime import hashlib +from typing import Any, Literal + import requests +from dsp.modules.lm import LM def post_request_metadata(model_name, prompt): diff --git a/dsp/modules/pyserini.py b/dsp/modules/pyserini.py index 94ce970ea2..5523a76e77 100644 --- a/dsp/modules/pyserini.py +++ b/dsp/modules/pyserini.py @@ -1,5 +1,6 @@ -from typing import Union import json +from typing import Union + from datasets import Dataset from dsp.utils import dotdict @@ -30,8 +31,8 @@ def __init__(self, """ # Keep pyserini as an optional dependency + from pyserini.prebuilt_index_info import FAISS_INDEX_INFO, IMPACT_INDEX_INFO, TF_INDEX_INFO from pyserini.search import FaissSearcher - from pyserini.prebuilt_index_info import TF_INDEX_INFO, FAISS_INDEX_INFO, IMPACT_INDEX_INFO self.encoder = FaissSearcher._init_encoder_from_str(query_encoder) self.dataset = dataset diff --git a/dsp/primitives/__init__.py b/dsp/primitives/__init__.py index 3798444a1e..ef278efa6b 100644 --- a/dsp/primitives/__init__.py +++ b/dsp/primitives/__init__.py @@ -1,6 +1,6 @@ -from .primitives import * +from .compiler import * from .demonstrate import * -from .search import * -from .predict import * from .inspect import * -from .compiler import * +from .predict import * +from .primitives import * +from .search import * diff --git a/dsp/primitives/compiler.py b/dsp/primitives/compiler.py index 996919e856..625ae23ac7 100644 --- a/dsp/primitives/compiler.py +++ b/dsp/primitives/compiler.py @@ -1,12 +1,13 @@ import os +import random +import subprocess import time + import tqdm import ujson -import random -import subprocess +from datasets.fingerprint import Hasher import dsp -from datasets.fingerprint import Hasher if os.environ.get('DSP_NOTEBOOK_CACHEDIR'): training_data_directory = os.path.join(os.environ.get('DSP_NOTEBOOK_CACHEDIR'), 'compiler') @@ -24,7 +25,7 @@ def openai_check_finetune(jobname): command = f"""openai api fine_tunes.get -i {jobname}""" print(command) - result = subprocess.run(command.split(), stdout=subprocess.PIPE) + result = subprocess.run(command.split(), stdout=subprocess.PIPE, check=False) output = result.stdout.decode("utf-8").strip() try: diff --git a/dsp/primitives/demonstrate.py b/dsp/primitives/demonstrate.py index ae08e28bae..7dfe126b53 100644 --- a/dsp/primitives/demonstrate.py +++ b/dsp/primitives/demonstrate.py @@ -1,5 +1,5 @@ import random -from typing import Callable, Any +from typing import Any, Callable import numpy as np diff --git a/dsp/primitives/inspect.py b/dsp/primitives/inspect.py index 3fbb05c55a..ebcd3b7872 100644 --- a/dsp/primitives/inspect.py +++ b/dsp/primitives/inspect.py @@ -1,8 +1,9 @@ import inspect -import string +import json import random +import string + import requests -import json class FuncInspector: diff --git a/dsp/primitives/predict.py b/dsp/primitives/predict.py index c89256b916..41edf6c51c 100644 --- a/dsp/primitives/predict.py +++ b/dsp/primitives/predict.py @@ -1,11 +1,11 @@ from collections import Counter -from typing import Callable, Any, Optional +from typing import Any, Callable, Optional import dsp -from dsp.utils import zipstar, normalize_text -from dsp.utils.utils import dotdict -from dsp.templates.template_v3 import Template from dsp.primitives.demonstrate import Example +from dsp.templates.template_v3 import Template +from dsp.utils import normalize_text, zipstar +from dsp.utils.utils import dotdict class Completions: diff --git a/dsp/primitives/primitives.py b/dsp/primitives/primitives.py index a7851ea33d..2e4c4bfc07 100644 --- a/dsp/primitives/primitives.py +++ b/dsp/primitives/primitives.py @@ -1,6 +1,8 @@ -import dsp from functools import wraps +import dsp + + # applied right to left (innermost first, like function calls) def compose_decorators(*decorators): def decorator(func): diff --git a/dsp/templates/__init__.py b/dsp/templates/__init__.py index b4aed76438..5eaae95f75 100644 --- a/dsp/templates/__init__.py +++ b/dsp/templates/__init__.py @@ -1,4 +1,4 @@ -from .utils import * from .template_v2 import * from .template_v3 import * +from .utils import * diff --git a/dsp/templates/template_v2.py b/dsp/templates/template_v2.py index d6e61642ea..236fd1d64e 100644 --- a/dsp/templates/template_v2.py +++ b/dsp/templates/template_v2.py @@ -1,9 +1,11 @@ -from collections import namedtuple import re -from typing import Union, Any +from collections import namedtuple +from typing import Any, Union + import dsp from dsp.primitives.demonstrate import Example -from .utils import passages2text, format_answers + +from .utils import format_answers, passages2text Field = namedtuple("Field", "name separator input_variable output_variable description") diff --git a/dsp/templates/template_v3.py b/dsp/templates/template_v3.py index fb097f8770..605705c153 100644 --- a/dsp/templates/template_v3.py +++ b/dsp/templates/template_v3.py @@ -1,5 +1,6 @@ from typing import Callable -from dsp.templates import TemplateV2, passages2text, format_answers, Field + +from dsp.templates import Field, TemplateV2, format_answers, passages2text class Type: diff --git a/dsp/templates/utils.py b/dsp/templates/utils.py index 0b0d4891f3..53624ad2d8 100644 --- a/dsp/templates/utils.py +++ b/dsp/templates/utils.py @@ -1,4 +1,4 @@ -from typing import Union, Optional +from typing import Optional, Union def passages2text(passages: Union[str, list, tuple]) -> str: diff --git a/dsp/utils/__init__.py b/dsp/utils/__init__.py index c41f748328..63624350a7 100644 --- a/dsp/utils/__init__.py +++ b/dsp/utils/__init__.py @@ -1,4 +1,4 @@ +from dsp.utils.dpr import * +from dsp.utils.metrics import * from dsp.utils.settings import * from dsp.utils.utils import * -from dsp.utils.metrics import * -from dsp.utils.dpr import * diff --git a/dsp/utils/dpr.py b/dsp/utils/dpr.py index 2174ea4da6..d4d18f84ff 100644 --- a/dsp/utils/dpr.py +++ b/dsp/utils/dpr.py @@ -4,9 +4,10 @@ Original license: https://github.com/facebookresearch/DPR/blob/main/LICENSE """ -import regex import unicodedata +import regex + class Tokens: """A class to represent a list of tokenized text.""" diff --git a/dsp/utils/metrics.py b/dsp/utils/metrics.py index f0d69f00f6..ddbd51ae94 100644 --- a/dsp/utils/metrics.py +++ b/dsp/utils/metrics.py @@ -1,8 +1,8 @@ import re import string import unicodedata - from collections import Counter + from dsp.utils.utils import print_message diff --git a/dsp/utils/settings.py b/dsp/utils/settings.py index 462f99d429..1de0ada7ec 100644 --- a/dsp/utils/settings.py +++ b/dsp/utils/settings.py @@ -1,6 +1,7 @@ +import threading from contextlib import contextmanager + from dsp.utils.utils import dotdict -import threading class Settings: diff --git a/dsp/utils/settings_v2.py b/dsp/utils/settings_v2.py index 2998ca7cd4..6652474d3d 100644 --- a/dsp/utils/settings_v2.py +++ b/dsp/utils/settings_v2.py @@ -1,7 +1,8 @@ +import copy import threading -from contextlib import contextmanager from concurrent.futures import ThreadPoolExecutor, as_completed -import copy +from contextlib import contextmanager + class Settings: def __init__(self): diff --git a/dsp/utils/utils.py b/dsp/utils/utils.py index aeba5c9700..d5a30b0e39 100644 --- a/dsp/utils/utils.py +++ b/dsp/utils/utils.py @@ -1,10 +1,10 @@ -import os -import tqdm import datetime import itertools - +import os from collections import defaultdict +import tqdm + def print_message(*s, condition=True, pad=False, sep=None): s = " ".join([str(x) for x in s]) @@ -77,6 +77,7 @@ def batch(group, bsize, provide_offset=False): import copy + class dotdict(dict): def __getattr__(self, key): if key.startswith('__') and key.endswith('__'): diff --git a/dspy/__init__.py b/dspy/__init__.py index 98fe6fba89..b61ea8a017 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,18 +1,12 @@ -from dsp.modules.hf_client import ChatModuleClient -from dsp.modules.hf_client import HFServerTGI, HFClientVLLM, HFClientSGLang -from .signatures import * - -from .retrieve import * -from .predict import * -from .primitives import * - # from .evaluation import * - - # FIXME: - - import dsp +from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI + +from .predict import * +from .primitives import * +from .retrieve import * +from .signatures import * settings = dsp.settings diff --git a/dspy/datasets/__init__.py b/dspy/datasets/__init__.py index 62409d6ed4..bf6e3dd019 100644 --- a/dspy/datasets/__init__.py +++ b/dspy/datasets/__init__.py @@ -1,4 +1,4 @@ +from .colors import Colors +from .dataloader import DataLoader from .dataset import Dataset from .hotpotqa import HotPotQA -from .colors import Colors -from .dataloader import DataLoader \ No newline at end of file diff --git a/dspy/datasets/colors.py b/dspy/datasets/colors.py index 265badf270..edf2e41697 100644 --- a/dspy/datasets/colors.py +++ b/dspy/datasets/colors.py @@ -1,4 +1,5 @@ import random + from dspy.datasets.dataset import Dataset ### A bunch of colors, originally from matplotlib diff --git a/dspy/datasets/dataloader.py b/dspy/datasets/dataloader.py index 8900c107f5..c9da0270db 100644 --- a/dspy/datasets/dataloader.py +++ b/dspy/datasets/dataloader.py @@ -1,10 +1,12 @@ -import dspy import random -from dspy.datasets import Dataset +from collections.abc import Mapping +from typing import List, Tuple, Union from datasets import load_dataset -from typing import Union, List, Tuple -from collections.abc import Mapping + +import dspy +from dspy.datasets.dataset import Dataset + class DataLoader(Dataset): def __init__(self,): diff --git a/dspy/datasets/dataset.py b/dspy/datasets/dataset.py index 9cd279199f..c66c5edc23 100644 --- a/dspy/datasets/dataset.py +++ b/dspy/datasets/dataset.py @@ -1,8 +1,9 @@ -import uuid import random +import uuid -from dspy import Example from dsp.utils import dotdict +from dspy import Example + class Dataset: def __init__(self, train_seed=0, train_size=None, eval_seed=0, dev_size=None, test_size=None): diff --git a/dspy/datasets/gsm8k.py b/dspy/datasets/gsm8k.py index 3b3514862d..328d1aee8d 100644 --- a/dspy/datasets/gsm8k.py +++ b/dspy/datasets/gsm8k.py @@ -1,8 +1,9 @@ -import tqdm import random +import tqdm from datasets import load_dataset + class GSM8K: def __init__(self) -> None: super().__init__() diff --git a/dspy/datasets/hotpotqa.py b/dspy/datasets/hotpotqa.py index f1cc734824..9fbf086179 100644 --- a/dspy/datasets/hotpotqa.py +++ b/dspy/datasets/hotpotqa.py @@ -1,6 +1,7 @@ import random from datasets import load_dataset + from dspy.datasets.dataset import Dataset diff --git a/dspy/evaluate/__init__.py b/dspy/evaluate/__init__.py index eea386c599..2526c2be07 100644 --- a/dspy/evaluate/__init__.py +++ b/dspy/evaluate/__init__.py @@ -1,4 +1,5 @@ +from dsp.utils import EM, normalize_text + +from .auto_evaluation import * from .evaluate import Evaluate from .metrics import * -from .auto_evaluation import * -from dsp.utils import EM, normalize_text diff --git a/dspy/evaluate/auto_evaluation.py b/dspy/evaluate/auto_evaluation.py index 1ec659540c..569a5b16c8 100644 --- a/dspy/evaluate/auto_evaluation.py +++ b/dspy/evaluate/auto_evaluation.py @@ -1,4 +1,5 @@ -import dspy +import dspy + class AnswerCorrectnessSignature(dspy.Signature): """Verify that the predicted answer matches the gold answer.""" diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 0be330375b..f38bcff38f 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -1,11 +1,14 @@ -import dsp -import tqdm -import types import threading +import types + import pandas as pd +import tqdm + +import dsp try: - from IPython.display import display as ipython_display, HTML + from IPython.display import HTML + from IPython.display import display as ipython_display except ImportError: ipython_display = print HTML = lambda x: x diff --git a/dspy/evaluate/metrics.py b/dspy/evaluate/metrics.py index 79c6208af6..0e25b5a119 100644 --- a/dspy/evaluate/metrics.py +++ b/dspy/evaluate/metrics.py @@ -2,6 +2,7 @@ import dsp + def answer_exact_match(example, pred, trace=None, frac=1.0): assert(type(example.answer) is str or type(example.answer) is list) diff --git a/dspy/experimental/__init__.py b/dspy/experimental/__init__.py index b4385639db..36477304c3 100644 --- a/dspy/experimental/__init__.py +++ b/dspy/experimental/__init__.py @@ -1,2 +1,2 @@ -from .synthetic_data import * from .synthesizer import * +from .synthetic_data import * diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index e247d35f3b..a66bf4d2ba 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -1,8 +1,11 @@ -import dspy import random from typing import List -from tqdm import tqdm, trange + from datasets import Dataset +from tqdm import tqdm, trange + +import dspy + def format_examples(examples: List[dspy.Example]): if isinstance(examples, str): @@ -14,11 +17,11 @@ def format_examples(examples: List[dspy.Example]): input_keys = example.inputs().keys() label_keys = example.labels().keys() - formatted_example += f"Inputs:\n" + formatted_example += "Inputs:\n" for key in input_keys: formatted_example += f"{key}: {example[key]}\n" - formatted_example += f"Outputs:\n" + formatted_example += "Outputs:\n" for key in label_keys: formatted_example += f"{key}: {example[key]}\n" @@ -94,7 +97,7 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: self.generate_input_data = self.generate_input_data.insert( -1, field_name, - output_field + output_field, ) input_field = dspy.InputField( @@ -104,7 +107,7 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: self.generate_output_data = self.generate_output_data.insert( -1, field_name, - input_field + input_field, ) for key in tqdm(output_keys, desc="Preparing Output Fields"): @@ -123,7 +126,7 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: self.generate_output_data = self.generate_output_data.insert( -1, field_name, - output_field + output_field, ) return dspy.ChainOfThought(self.generate_input_data), dspy.Predict(self.generate_output_data) @@ -167,7 +170,7 @@ def export(self, data: List[dspy.Example], path: str, mode: str = None, **kwargs extention = mode or path.split(".")[-1] dataset = Dataset.from_list( - [example.toDict() for example in data] + [example.toDict() for example in data], ) if extention == "csv": diff --git a/dspy/experimental/synthetic_data.py b/dspy/experimental/synthetic_data.py index d5177fcace..37c1403522 100644 --- a/dspy/experimental/synthetic_data.py +++ b/dspy/experimental/synthetic_data.py @@ -1,8 +1,11 @@ -from pydantic import BaseModel -import dspy import random from typing import List, Optional +from pydantic import BaseModel + +import dspy + + class descriptionSignature(dspy.Signature): field_name = dspy.InputField(desc="name of a field") example = dspy.InputField(desc="an example value for the field") @@ -55,7 +58,7 @@ def _prepare_fields(self, properties) -> dict: '__doc__': f"Generates the following outputs: {{{', '.join(properties.keys())}}}.", 'sindex': dspy.InputField(desc="a random string"), **{field_name: dspy.OutputField(desc=properties[field_name].get('description', 'No description')) - for field_name in properties.keys()} + for field_name in properties.keys()}, } # # Usage example diff --git a/dspy/functional/__init__.py b/dspy/functional/__init__.py index 3ccced47b9..746ac93181 100644 --- a/dspy/functional/__init__.py +++ b/dspy/functional/__init__.py @@ -1 +1 @@ -from .functional import cot, predictor, FunctionalModule, TypedPredictor, TypedChainOfThought +from .functional import FunctionalModule, TypedChainOfThought, TypedPredictor, cot, predictor diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index cb7b3669aa..97bbdb8d89 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -1,18 +1,15 @@ -from collections import defaultdict import inspect -import os -import openai -import dspy +import json import typing +from typing import Annotated, List, Tuple + import pydantic -from typing import Annotated, List, Tuple # noqa: UP035 -from dsp.templates import passages2text -import json -from dspy.primitives.prediction import Prediction +import dspy +from dsp.templates.utils import passages2text +from dspy.primitives.prediction import Prediction from dspy.signatures.signature import ensure_signature, make_signature - MAX_RETRIES = 3 @@ -199,7 +196,7 @@ def forward(self, **kwargs) -> dspy.Prediction: else: # If there are no errors, we return the parsed results return Prediction.from_completions( - {key: [r[key] for r in parsed_results] for key in signature.output_fields} + {key: [r[key] for r in parsed_results] for key in signature.output_fields}, ) raise ValueError( "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", @@ -326,8 +323,8 @@ def gold_passages_retrieved(example, pred, _trace=None) -> bool: def hotpot() -> None: - from dsp.utils import deduplicate import dspy.evaluate + from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.evaluate.evaluate import Evaluate from dspy.teleprompt.bootstrap import BootstrapFewShot diff --git a/dspy/predict/__init__.py b/dspy/predict/__init__.py index 8b0770150b..b176d2a0ed 100644 --- a/dspy/predict/__init__.py +++ b/dspy/predict/__init__.py @@ -1,9 +1,9 @@ -from .predict import Predict +from .aggregation import majority from .chain_of_thought import ChainOfThought -from .multi_chain_comparison import MultiChainComparison from .chain_of_thought_with_hint import ChainOfThoughtWithHint -from .react import ReAct -from .aggregation import majority +from .knn import KNN +from .multi_chain_comparison import MultiChainComparison +from .predict import Predict from .program_of_thought import ProgramOfThought +from .react import ReAct from .retry import Retry -from .knn import KNN \ No newline at end of file diff --git a/dspy/predict/aggregation.py b/dspy/predict/aggregation.py index ca4154aa2d..4cb6df3f91 100644 --- a/dspy/predict/aggregation.py +++ b/dspy/predict/aggregation.py @@ -1,6 +1,5 @@ -from dspy.primitives.prediction import Prediction, Completions from dsp.utils import normalize_text - +from dspy.primitives.prediction import Completions, Prediction default_normalize = lambda s: normalize_text(s) or None diff --git a/dspy/predict/chain_of_thought.py b/dspy/predict/chain_of_thought.py index f374aaabe5..70bab0a7ae 100644 --- a/dspy/predict/chain_of_thought.py +++ b/dspy/predict/chain_of_thought.py @@ -4,7 +4,6 @@ from .predict import Predict - # TODO: FIXME: Insert this right before the *first* output field. Also rewrite this to use the new signature system. # TODO: This shouldn't inherit from Predict. It should be a module that has one or two predictors. diff --git a/dspy/predict/chain_of_thought_with_hint.py b/dspy/predict/chain_of_thought_with_hint.py index 6f78566321..484a2a38a0 100644 --- a/dspy/predict/chain_of_thought_with_hint.py +++ b/dspy/predict/chain_of_thought_with_hint.py @@ -3,8 +3,6 @@ from .predict import Predict - - # TODO: FIXME: Insert this right before the *first* output field. Also rewrite this to use the new signature system. class ChainOfThoughtWithHint(Predict): diff --git a/dspy/predict/knn.py b/dspy/predict/knn.py index e0e49fe2b4..e275ccb6ac 100644 --- a/dspy/predict/knn.py +++ b/dspy/predict/knn.py @@ -1,7 +1,10 @@ from typing import List + import numpy as np + import dsp + class KNN: def __init__(self, k: int, trainset: List[dsp.Example]): self.k = k diff --git a/dspy/predict/langchain.py b/dspy/predict/langchain.py index 4be855e8db..86d439f50c 100644 --- a/dspy/predict/langchain.py +++ b/dspy/predict/langchain.py @@ -1,18 +1,17 @@ import copy import random +from langchain_core.pydantic_v1 import Extra +from langchain_core.runnables import Runnable + import dsp import dspy - from dspy.predict.parameter import Parameter from dspy.predict.predict import Predict from dspy.primitives.prediction import Prediction from dspy.signatures.field import InputField, OutputField from dspy.signatures.signature import infer_prefix -from langchain_core.pydantic_v1 import Extra -from langchain_core.runnables import Runnable - # TODO: This class is currently hard to test, because it hardcodes gpt-4 usage: # gpt4T = dspy.OpenAI(model='gpt-4-1106-preview', max_tokens=4000, model_type='chat') diff --git a/dspy/predict/multi_chain_comparison.py b/dspy/predict/multi_chain_comparison.py index ad69b646ee..37b66afadd 100644 --- a/dspy/predict/multi_chain_comparison.py +++ b/dspy/predict/multi_chain_comparison.py @@ -1,8 +1,8 @@ import dspy from dspy.signatures.signature import ensure_signature -from .predict import Predict -from ..primitives.program import Module +from ..primitives.program import Module +from .predict import Predict class MultiChainComparison(Module): diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index 60e668a836..fe6beb4483 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -1,11 +1,11 @@ -import dsp import random +import dsp from dspy.predict.parameter import Parameter from dspy.primitives.prediction import Prediction - from dspy.signatures.signature import ensure_signature, signature_to_template + class Predict(Parameter): def __init__(self, signature, **config): self.stage = random.randbytes(8).hex() diff --git a/dspy/predict/program_of_thought.py b/dspy/predict/program_of_thought.py index f7e80eed71..1cfb54bd99 100644 --- a/dspy/predict/program_of_thought.py +++ b/dspy/predict/program_of_thought.py @@ -1,8 +1,10 @@ +import re + import dspy from dspy.signatures.signature import ensure_signature + from ..primitives.program import Module from ..primitives.python_interpreter import CodePrompt, PythonInterpreter -import re class ProgramOfThought(Module): diff --git a/dspy/predict/react.py b/dspy/predict/react.py index cf3be8b73c..a465ee710b 100644 --- a/dspy/predict/react.py +++ b/dspy/predict/react.py @@ -1,6 +1,7 @@ import dsp import dspy from dspy.signatures.signature import ensure_signature + from ..primitives.program import Module from .predict import Predict diff --git a/dspy/predict/retry.py b/dspy/predict/retry.py index af1d37f98b..dcef1488f6 100644 --- a/dspy/predict/retry.py +++ b/dspy/predict/retry.py @@ -1,6 +1,7 @@ import copy -import dspy + import dsp +import dspy from .predict import Predict diff --git a/dspy/primitives/__init__.py b/dspy/primitives/__init__.py index b7baf229bf..e667837166 100644 --- a/dspy/primitives/__init__.py +++ b/dspy/primitives/__init__.py @@ -1,5 +1,5 @@ +from .assertions import * from .example import * -from .program import * from .prediction import * -from .assertions import * +from .program import * from .python_interpreter import * diff --git a/dspy/primitives/assertions.py b/dspy/primitives/assertions.py index 140f8bf6a2..19f1aea3a1 100644 --- a/dspy/primitives/assertions.py +++ b/dspy/primitives/assertions.py @@ -1,9 +1,10 @@ import inspect +import logging +import uuid from typing import Any + import dsp import dspy -import logging -import uuid #################### Assertion Helpers #################### diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 1b4a342f71..bee80338f5 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -1,4 +1,5 @@ import copy + import ujson diff --git a/dspy/primitives/program.py b/dspy/primitives/program.py index 5e063bfb00..aa499d9085 100644 --- a/dspy/primitives/program.py +++ b/dspy/primitives/program.py @@ -1,8 +1,9 @@ -from dspy.primitives.module import BaseModule -from dspy.primitives.assertions import * import re +from dspy.primitives.assertions import * +from dspy.primitives.module import BaseModule + class ProgramMeta(type): pass diff --git a/dspy/primitives/python_interpreter.py b/dspy/primitives/python_interpreter.py index fd6166db5e..1b7456c7b0 100644 --- a/dspy/primitives/python_interpreter.py +++ b/dspy/primitives/python_interpreter.py @@ -12,10 +12,12 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== import ast +import builtins import difflib import importlib import re import typing +from collections.abc import Mapping from typing import ( Any, Dict, @@ -24,8 +26,6 @@ Set, Tuple, ) -from collections.abc import Mapping -import builtins class InterpreterError(ValueError): diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index b4fc0bcf98..07ef407d1d 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -2,10 +2,12 @@ Retriever model for chromadb """ -from typing import Optional, List, Union +from typing import List, Optional, Union + +import backoff import openai + import dspy -import backoff from dsp.utils import dotdict try: @@ -16,13 +18,13 @@ try: import chromadb - from chromadb.config import Settings - from chromadb.utils import embedding_functions + import chromadb.utils.embedding_functions as ef from chromadb.api.types import ( Embeddable, EmbeddingFunction, ) - import chromadb.utils.embedding_functions as ef + from chromadb.config import Settings + from chromadb.utils import embedding_functions except ImportError: chromadb = None diff --git a/dspy/retrieve/databricks_rm.py b/dspy/retrieve/databricks_rm.py index 74066ec5b9..c275bdddc2 100644 --- a/dspy/retrieve/databricks_rm.py +++ b/dspy/retrieve/databricks_rm.py @@ -1,10 +1,13 @@ -import dspy import os -import requests -from typing import Union, List from collections import defaultdict +from typing import List, Union + +import requests + +import dspy from dspy.primitives.prediction import Prediction + class DatabricksRM(dspy.Retrieve): """ A retrieval module that uses Databricks Vector Search Endpoint to return the top-k embeddings for a given query. diff --git a/dspy/retrieve/deeplake_rm.py b/dspy/retrieve/deeplake_rm.py index 87beb48565..235108a912 100644 --- a/dspy/retrieve/deeplake_rm.py +++ b/dspy/retrieve/deeplake_rm.py @@ -2,10 +2,12 @@ Retriever model for deeplake """ -from typing import Optional, List, Union +from collections import defaultdict +from typing import List, Optional, Union + import openai + import dspy -from collections import defaultdict from dsp.utils import dotdict try: diff --git a/dspy/retrieve/marqo_rm.py b/dspy/retrieve/marqo_rm.py index a090e47f52..29c52fdb46 100644 --- a/dspy/retrieve/marqo_rm.py +++ b/dspy/retrieve/marqo_rm.py @@ -1,5 +1,6 @@ from collections import defaultdict from typing import List, Union + import dspy from dspy import dotdict diff --git a/dspy/retrieve/mongodb_atlas_rm.py b/dspy/retrieve/mongodb_atlas_rm.py index 70e7847303..3cafb05f65 100644 --- a/dspy/retrieve/mongodb_atlas_rm.py +++ b/dspy/retrieve/mongodb_atlas_rm.py @@ -1,23 +1,25 @@ -from typing import List, Any -import dspy import os +from typing import Any, List + +import backoff from openai import ( - OpenAI, APITimeoutError, InternalServerError, + OpenAI, RateLimitError, UnprocessableEntityError, ) -import backoff + +import dspy try: from pymongo import MongoClient from pymongo.errors import ( - ConnectionFailure, ConfigurationError, - ServerSelectionTimeoutError, + ConnectionFailure, InvalidURI, OperationFailure, + ServerSelectionTimeoutError, ) except ImportError: raise ImportError( diff --git a/dspy/retrieve/pgvector_rm.py b/dspy/retrieve/pgvector_rm.py index 8403efb2e3..cf1773f171 100644 --- a/dspy/retrieve/pgvector_rm.py +++ b/dspy/retrieve/pgvector_rm.py @@ -1,10 +1,12 @@ -import dspy -import openai from typing import List, Optional +import openai + +import dspy + try: - from pgvector.psycopg2 import register_vector import psycopg2 + from pgvector.psycopg2 import register_vector from psycopg2 import sql except ImportError: raise ImportError( diff --git a/dspy/retrieve/pinecone_rm.py b/dspy/retrieve/pinecone_rm.py index b520ed5fac..0328bc6ba1 100644 --- a/dspy/retrieve/pinecone_rm.py +++ b/dspy/retrieve/pinecone_rm.py @@ -3,11 +3,13 @@ Author: Dhar Rawal (@drawal1) """ -from dsp.utils import dotdict -from typing import Optional, List, Union -import dspy +from typing import List, Optional, Union + import backoff +import dspy +from dsp.utils import dotdict + try: import pinecone except ImportError: @@ -19,6 +21,7 @@ ) import openai + try: OPENAI_LEGACY = int(openai.version.__version__[0]) == 0 except Exception: diff --git a/dspy/retrieve/qdrant_rm.py b/dspy/retrieve/qdrant_rm.py index 46977a2524..5c2af050b9 100644 --- a/dspy/retrieve/qdrant_rm.py +++ b/dspy/retrieve/qdrant_rm.py @@ -1,11 +1,12 @@ from collections import defaultdict -from typing import List, Union, Optional +from typing import List, Optional, Union + import dspy from dsp.utils import dotdict try: - from qdrant_client import QdrantClient import fastembed + from qdrant_client import QdrantClient except ImportError: raise ImportError( "The 'qdrant' extra is required to use QdrantRM. Install it with `pip install dspy-ai[qdrant]`", diff --git a/dspy/retrieve/retrieve.py b/dspy/retrieve/retrieve.py index 9108582414..71b4f57349 100644 --- a/dspy/retrieve/retrieve.py +++ b/dspy/retrieve/retrieve.py @@ -1,8 +1,7 @@ -from typing import List, Union, Optional - -import dsp import random +from typing import List, Optional, Union +import dsp from dspy.predict.parameter import Parameter from dspy.primitives.prediction import Prediction diff --git a/dspy/retrieve/vectara_rm.py b/dspy/retrieve/vectara_rm.py index 4d72ed7e87..047c70d6c9 100644 --- a/dspy/retrieve/vectara_rm.py +++ b/dspy/retrieve/vectara_rm.py @@ -1,11 +1,11 @@ -from collections import defaultdict -from typing import List, Union -import dspy -from typing import Optional import json import os +from collections import defaultdict +from typing import List, Optional, Union + import requests +import dspy from dsp.utils import dotdict START_SNIPPET = "<%START%>" diff --git a/dspy/retrieve/weaviate_rm.py b/dspy/retrieve/weaviate_rm.py index 1ef7950c1f..61e8d1ef06 100644 --- a/dspy/retrieve/weaviate_rm.py +++ b/dspy/retrieve/weaviate_rm.py @@ -1,7 +1,7 @@ -from typing import List, Union +from typing import List, Optional, Union + import dspy from dsp.utils import dotdict -from typing import Optional try: import weaviate diff --git a/dspy/retrieve/you_rm.py b/dspy/retrieve/you_rm.py index 7321e67bd3..25a62ccea8 100644 --- a/dspy/retrieve/you_rm.py +++ b/dspy/retrieve/you_rm.py @@ -1,9 +1,10 @@ -import dspy import os +from typing import List, Optional, Union + import requests -from dsp.utils import dotdict -from typing import Union, List, Optional +import dspy +from dsp.utils import dotdict class YouRM(dspy.Retrieve): diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 8c1f161642..22bc6475d7 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -1,12 +1,13 @@ import ast -from copy import deepcopy +import re import typing -import dsp +from copy import deepcopy +from typing import Any, Dict, Tuple, Type, Union # noqa: UP035 + from pydantic import BaseModel, Field, create_model from pydantic.fields import FieldInfo -from typing import Any, Type, Union, Dict, Tuple # noqa: UP035 -import re +import dsp from dspy.signatures.field import InputField, OutputField, new_to_old_field diff --git a/dspy/teleprompt/__init__.py b/dspy/teleprompt/__init__.py index c4454725bd..61425e5b9b 100644 --- a/dspy/teleprompt/__init__.py +++ b/dspy/teleprompt/__init__.py @@ -1,9 +1,9 @@ -from .teleprompt import * from .bootstrap import * -from .vanilla import * -from .random_search import * from .finetune import * -from .teleprompt_optuna import * from .knn_fewshot import * +from .random_search import * from .signature_opt import SignatureOptimizer -from .signature_opt_bayesian import BayesianSignatureOptimizer \ No newline at end of file +from .signature_opt_bayesian import BayesianSignatureOptimizer +from .teleprompt import * +from .teleprompt_optuna import * +from .vanilla import * diff --git a/dspy/teleprompt/bootstrap.py b/dspy/teleprompt/bootstrap.py index 5aaefa271f..6fc9dd2ef7 100644 --- a/dspy/teleprompt/bootstrap.py +++ b/dspy/teleprompt/bootstrap.py @@ -1,16 +1,15 @@ -import dsp -import tqdm import random import threading -import dspy +import tqdm +import dsp +import dspy from dspy.primitives import Example from .teleprompt import Teleprompter from .vanilla import LabeledFewShot - # TODO: metrics should return an object with __bool__ basically, but fine if they're more complex. # They can also be sortable. diff --git a/dspy/teleprompt/finetune.py b/dspy/teleprompt/finetune.py index 29b7887ab5..8400fe7749 100644 --- a/dspy/teleprompt/finetune.py +++ b/dspy/teleprompt/finetune.py @@ -1,17 +1,18 @@ import os -import time -import dsp import random +import time import ujson from datasets.fingerprint import Hasher +import dsp from dspy.signatures.signature import signature_to_template -# from dspy.primitives import Example +from .bootstrap import BootstrapFewShot +# from dspy.primitives import Example from .teleprompt import Teleprompter -from .bootstrap import BootstrapFewShot + # from .vanilla import LabeledFewShot # from dspy.evaluate.evaluate import Evaluate diff --git a/dspy/teleprompt/knn_fewshot.py b/dspy/teleprompt/knn_fewshot.py index 4bc447abd3..b999447078 100644 --- a/dspy/teleprompt/knn_fewshot.py +++ b/dspy/teleprompt/knn_fewshot.py @@ -1,9 +1,11 @@ -from typing import List import types +from typing import List + import dsp +from dspy.teleprompt import BootstrapFewShot from .teleprompt import Teleprompter -from dspy.teleprompt import BootstrapFewShot + class KNNFewShot(Teleprompter): def __init__(self, KNN, k: int, trainset: List[dsp.Example]): diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index 3f7f2cd636..bc0e5ef61b 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -1,13 +1,11 @@ import random +from dspy.evaluate.evaluate import Evaluate from dspy.teleprompt.teleprompt import Teleprompter from .bootstrap import BootstrapFewShot from .vanilla import LabeledFewShot -from dspy.evaluate.evaluate import Evaluate - - # TODO: Don't forget dealing with the raw demos. # TODO: Deal with the (pretty common) case of having a metric for filtering and a separate metric for eval. # The metric itself may tell though by the presence of trace. diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index 3163acf3f2..d71aa69a7d 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -1,9 +1,10 @@ +from collections import defaultdict + import dsp import dspy -from dspy.teleprompt.teleprompt import Teleprompter -from dspy.signatures import Signature from dspy.evaluate.evaluate import Evaluate -from collections import defaultdict +from dspy.signatures import Signature +from dspy.teleprompt.teleprompt import Teleprompter """ USAGE SUGGESTIONS: diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 85298276ab..e316462c94 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -1,14 +1,16 @@ +import math +import random +from collections import defaultdict + +import optuna + import dsp import dspy -from dspy.signatures.signature import signature_to_template -from dspy.teleprompt.teleprompt import Teleprompter -from dspy.signatures import Signature from dspy.evaluate.evaluate import Evaluate -from collections import defaultdict -import random +from dspy.signatures import Signature +from dspy.signatures.signature import signature_to_template from dspy.teleprompt import BootstrapFewShot -import optuna -import math +from dspy.teleprompt.teleprompt import Teleprompter """ USAGE SUGGESTIONS: diff --git a/dspy/teleprompt/teleprompt_optuna.py b/dspy/teleprompt/teleprompt_optuna.py index 3686e66b7d..501bd71fde 100644 --- a/dspy/teleprompt/teleprompt_optuna.py +++ b/dspy/teleprompt/teleprompt_optuna.py @@ -1,10 +1,10 @@ import optuna +from dspy.evaluate.evaluate import Evaluate from dspy.teleprompt.teleprompt import Teleprompter from .bootstrap import BootstrapFewShot -from dspy.evaluate.evaluate import Evaluate class BootstrapFewShotWithOptuna(Teleprompter): def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6): diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index 1f6390536a..d97d59b5a8 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -1,9 +1,11 @@ import random +import re from typing import Union -from dsp.modules import LM + import numpy as np + +from dsp.modules import LM from dsp.utils.utils import dotdict -import re class DummyLM(LM): diff --git a/examples/longformqa/utils.py b/examples/longformqa/utils.py index e4ebadc829..bdae81a243 100644 --- a/examples/longformqa/utils.py +++ b/examples/longformqa/utils.py @@ -1,8 +1,10 @@ -import regex as re import nltk +import regex as re + nltk.download('punkt') from nltk.tokenize import sent_tokenize + def extract_text_by_citation(paragraph): citation_regex = re.compile(r'(.*?)(\[\d+\]\.)', re.DOTALL) parts_with_citation = citation_regex.findall(paragraph) diff --git a/inspect-app/app.py b/inspect-app/app.py index 5ea50dd92a..14ea405510 100644 --- a/inspect-app/app.py +++ b/inspect-app/app.py @@ -1,8 +1,9 @@ -from flask import Flask, request, jsonify -from flask_cors import CORS -import boto3 import time +import boto3 +from flask import Flask, jsonify, request +from flask_cors import CORS + app = Flask(__name__) CORS(app) diff --git a/pyproject.toml b/pyproject.toml index 1c0f759684..e2c719eaf2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -218,6 +218,8 @@ select = [ "ERA", # pandas-vet "PD", + # Import sort + "I", # avoid shadowing "PLW", ] diff --git a/setup.py b/setup.py index d31e9c7913..3f3bb18585 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from setuptools import setup, find_packages +from setuptools import find_packages, setup # Read the content of the README file with open('README.md', encoding='utf-8') as f: diff --git a/testing/optimizer_tester.py b/testing/optimizer_tester.py index e55f4dd4c4..68d3946019 100644 --- a/testing/optimizer_tester.py +++ b/testing/optimizer_tester.py @@ -1,18 +1,21 @@ -from .tasks.scone import ScoNeTask -from .tasks.hotpotqa import HotPotQATask -from .tasks.gsm8k import GSM8KTask -from .tasks.biodex import BioDexTask -from .tasks.tweet import TweetTask -from .tasks.tweet_metric import TweetMetricTask -from dspy.evaluate import Evaluate -from dotenv import load_dotenv -import openai -import os -import dspy import csv import datetime +import os from timeit import default_timer as timer +import openai +from dotenv import load_dotenv + +import dspy +from dspy.evaluate import Evaluate + +from .tasks.biodex import BioDexTask +from .tasks.gsm8k import GSM8KTask +from .tasks.hotpotqa import HotPotQATask +from .tasks.scone import ScoNeTask +from .tasks.tweet import TweetTask +from .tasks.tweet_metric import TweetMetricTask + datasets = ["ScoNe", "HotPotQA", "GSM8K", "BioDex", "Tweet"] class OptimizerTester: diff --git a/testing/tasks/__init__.py b/testing/tasks/__init__.py index f5034e36f8..0b1ee32ec9 100644 --- a/testing/tasks/__init__.py +++ b/testing/tasks/__init__.py @@ -1,7 +1,7 @@ from .base_task import BaseTask +from .biodex import BioDexTask from .gsm8k import GSM8KTask from .hotpotqa import HotPotQATask from .scone import ScoNeTask -from .biodex import BioDexTask from .tweet import TweetTask -from .tweet_metric import TweetMetricTask \ No newline at end of file +from .tweet_metric import TweetMetricTask diff --git a/testing/tasks/base_task.py b/testing/tasks/base_task.py index c7f8f42b68..089e6d7bee 100644 --- a/testing/tasks/base_task.py +++ b/testing/tasks/base_task.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod + class BaseTask(ABC): def __init__(self): pass diff --git a/testing/tasks/biodex.py b/testing/tasks/biodex.py index f98da11415..6e51745b37 100644 --- a/testing/tasks/biodex.py +++ b/testing/tasks/biodex.py @@ -1,20 +1,24 @@ from __future__ import annotations -from .base_task import BaseTask -import dspy -from dspy.evaluate import Evaluate -from dsp.utils import deduplicate -import tqdm -import datasets + import math -from functools import lru_cache import os -import re import pickle +import re +from collections import defaultdict +from enum import Enum +from functools import lru_cache +from typing import DefaultDict, Dict, List, Optional, Union + +import datasets +import tqdm from pydantic import BaseModel from rapidfuzz import process -from typing import Union, Optional, Dict, DefaultDict, List -from enum import Enum -from collections import defaultdict + +import dspy +from dsp.utils import deduplicate +from dspy.evaluate import Evaluate + +from .base_task import BaseTask # Must point to a MedDra download with mdhier.asc data_dir = '/future/u/okhattab/data/2023/MedDraV2/meddra_23_0_english/MedAscii' # NOTE: EDIT THIS LINE @@ -529,6 +533,7 @@ def __call__(self, gold, pred, trace=None): metricR = CompilationMetric(field='reactions', metric='recall') from collections import Counter + def reduce_grounded_reactions(grounded_reactions): scores = {} for score, reaction in grounded_reactions: diff --git a/testing/tasks/gsm8k.py b/testing/tasks/gsm8k.py index 6724ebdcd8..04f20d177c 100644 --- a/testing/tasks/gsm8k.py +++ b/testing/tasks/gsm8k.py @@ -1,11 +1,14 @@ -import dspy -from .base_task import BaseTask -from dspy.datasets.gsm8k import gsm8k_metric import random -import tqdm +import tqdm from datasets import load_dataset +import dspy +from dspy.datasets.gsm8k import gsm8k_metric + +from .base_task import BaseTask + + class CoT(dspy.Module): def __init__(self): super().__init__() diff --git a/testing/tasks/hotpotqa.py b/testing/tasks/hotpotqa.py index 19a5075fc9..f0bf17078e 100644 --- a/testing/tasks/hotpotqa.py +++ b/testing/tasks/hotpotqa.py @@ -1,7 +1,9 @@ import dspy +from dspy.datasets import HotPotQA from dspy.evaluate import Evaluate + from .base_task import BaseTask -from dspy.datasets import HotPotQA + class MultiHop(dspy.Module): def __init__(self,passages_per_hop): diff --git a/testing/tasks/scone.py b/testing/tasks/scone.py index 0ee5f0391f..27e8691acb 100644 --- a/testing/tasks/scone.py +++ b/testing/tasks/scone.py @@ -1,9 +1,13 @@ import glob import os +import random + import pandas as pd + import dspy + from .base_task import BaseTask -import random + def load_scone(dirname): dfs = [] diff --git a/testing/tasks/tweet.py b/testing/tasks/tweet.py index 30bdef68a5..73870a0591 100644 --- a/testing/tasks/tweet.py +++ b/testing/tasks/tweet.py @@ -1,10 +1,14 @@ -import dspy -from dspy.datasets import HotPotQA -from .base_task import BaseTask +import os from functools import lru_cache + import openai from dotenv import load_dotenv -import os + +import dspy +from dspy.datasets import HotPotQA + +from .base_task import BaseTask + class TweetSignature(dspy.Signature): ("""Given context and a question, answer with a tweet""") diff --git a/testing/tasks/tweet_metric.py b/testing/tasks/tweet_metric.py index baa7024ab7..b91458c48c 100644 --- a/testing/tasks/tweet_metric.py +++ b/testing/tasks/tweet_metric.py @@ -1,14 +1,18 @@ -import dspy -from dspy.datasets import HotPotQA -from .base_task import BaseTask -from dspy import Example +import os +import uuid from functools import lru_cache + import openai from dotenv import load_dotenv -import os -import uuid from tqdm import tqdm +import dspy +from dspy import Example +from dspy.datasets import HotPotQA + +from .base_task import BaseTask + + class TweetSignature(dspy.Signature): ("""Given context and a question, answer with a tweet""") From 2bd6fdaced86def090cbe4661fdaa07ee8a3bd03 Mon Sep 17 00:00:00 2001 From: Isaac Miller <17116851+isaacbmiller@users.noreply.github.com> Date: Sun, 3 Mar 2024 23:09:41 -0600 Subject: [PATCH 065/243] ci(dspy): Fix tests failing - remove --no-root flag (#541) * Add Ruff linting workflow * Update GitHub Actions workflows * Make ruff apply but not fail * Style fixes by Ruff * Combine Workflows * Add caching * Add import related rules * Automatic Style fixes * Try and fix caching * Fix import * Use natve python dep caching * Update actions/checkout to v4 * Use autofix in tests * Try to cache "install poetry" * Empty-Commit to test caching * Remove workflow deps * Rename action * Update workflow name * Fix Docker stop command and subprocess run check * Fix functional imports * Remove --no-root flag --------- Co-authored-by: isaacbmiller --- .github/workflows/run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 52ac21e3c5..6b4221ade7 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -50,7 +50,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "poetry" - name: Install dependencies - run: poetry install --no-interaction --no-root + run: poetry install --no-interaction - name: Run lint with tests uses: chartboost/ruff-action@v1 with: From 171c204cbb8717b8ddcb4b3f5906476c1fc07d17 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Mon, 4 Mar 2024 19:31:23 +0530 Subject: [PATCH 066/243] Batched Generation, SIgnature Based Generation --- dspy/experimental/synthesizer.py | 111 ++++++++++++++++++++++--------- 1 file changed, 78 insertions(+), 33 deletions(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index e247d35f3b..71490185ea 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -1,10 +1,11 @@ import dspy import random -from typing import List -from tqdm import tqdm, trange + from datasets import Dataset +from tqdm import tqdm, trange +from typing import List, Union, Tuple, Mapping -def format_examples(examples: List[dspy.Example]): +def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): return examples @@ -24,8 +25,20 @@ def format_examples(examples: List[dspy.Example]): return formatted_example +class UnderstandTask(dspy.Signature): + """I'll be providing you a task description, your task is to prepare a concise, comprehensible summary that captures the broad essence and purpose of the task this description aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" + + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task.", + ) + explanation = dspy.OutputField( + prefix="Task Description:", + desc="Explanation of the task.", + ) + class ExplainTask(dspy.Signature): - """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" + """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the broad essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" examples = dspy.InputField( prefix="Examples Datapoints:-", @@ -54,8 +67,8 @@ class GenerateFieldDescription(dspy.Signature): ) class GenerateInputFieldsData(dspy.Signature): - """Generate synthetic data based on the task description and the given knowledge seed.""" - + """Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed.""" + knowledge_seed = dspy.InputField( prefix="Knowledge Seed:", desc="Seed for the knowledge base search to base the inputs around.", @@ -77,16 +90,28 @@ def __init__(self): self.generate_input_data = GenerateInputFieldsData self.generate_output_data = GenerateOutputFieldsData - def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: List[str], task_description: str): - for key in tqdm(input_keys, desc="Preparing Input Fields"): + def _get_field_data(self, key: str, keys_dict: Mapping[str, str]): + if key.startswith("$"): field_details = self.generate_field_description( - task_description=task_description, + task_description=keys_dict["task_description"], field_name=key, ) field_name = key field_description = field_details.field_description + return field_name, field_description + + else: + field_name = key + field_description = keys_dict[key] + + return field_name, field_description + + def _prepare_synthetic_data_predictors(self, input_keys: Mapping[str, str], output_keys: Mapping[str, str], task_description: str): + for key in tqdm(input_keys, desc="Preparing Input Fields"): + field_name, field_description = self._get_field_data(key, input_keys) + output_field = dspy.OutputField( prefix=f"{field_name}:", desc=field_description, @@ -108,13 +133,7 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: ) for key in tqdm(output_keys, desc="Preparing Output Fields"): - field_details = self.generate_field_description( - task_description=task_description, - field_name=key, - ) - - field_name = key - field_description = field_details.field_description + field_name, field_description = self._get_field_data(key, output_keys) output_field = dspy.OutputField( prefix=f"{field_name}:", @@ -128,12 +147,34 @@ def _prepare_synthetic_data_predictors(self, input_keys: List[str], output_keys: return dspy.ChainOfThought(self.generate_input_data), dspy.Predict(self.generate_output_data) - def generate(self, examples: List[dspy.Example], num_data: int, task_description: str = None, input_keys: str = None, output_keys: str = None) -> List[dspy.Example]: - task_description = task_description or self.explain_task(examples=examples).explanation - self.generate_output_data.__doc__ = task_description + def _get_dataset_metadata(self, ground_source: Union[List[dspy.Example], dspy.Signature]): + if isinstance(ground_source, dspy.SignatureMeta): + task_description = self.explain_task(examples=ground_source.__doc__).explanation + input_keys = {k:v.json_schema_extra["desc"] for k,v in ground_source.input_fields.items()} + output_keys = {k:v.json_schema_extra["desc"] for k,v in ground_source.output_fields.items()} + + return task_description, input_keys, output_keys + + elif isinstance(ground_source, list) and isinstance(ground_source[0], dspy.Example): + task_description = self.explain_task(examples=ground_source).explanation + input_keys = {key:f"${{{key}}}" for key in ground_source[0].inputs()} + output_keys = {key:f"${{{key}}}" for key in ground_source[0].labels()} + + return task_description, input_keys, output_keys - input_keys = input_keys or [key for key in examples[0].inputs()] - output_keys = output_keys or [key for key in examples[0].labels()] + else: + raise ValueError("Ground source must be either a list of examples or a signature.") + + def generate( + self, + ground_source: Union[List[dspy.Example], dspy.Signature], + num_data: int, + batch_size: int = None, + ): + batch_size = batch_size or 1 + task_description, input_keys, output_keys = self._get_dataset_metadata(ground_source) + + self.generate_output_data.__doc__ = task_description self.input_predictor, self.output_predictor = self._prepare_synthetic_data_predictors( input_keys=input_keys, @@ -143,22 +184,26 @@ def generate(self, examples: List[dspy.Example], num_data: int, task_description data = [] - for idx in trange(num_data, desc="Generating Synthetic Data"): - inputs = self.input_predictor(task_description=task_description, knowledge_seed=random.randint(0, 1000000), config=dict(temperature=0.7+0.01*idx)) + for idx in trange(0, num_data, batch_size, desc="Generating Synthetic Data"): + iter_temperature = 0.7+0.01*idx + iter_seed = random.randint(0, 1000000) + + inputs = self.input_predictor(task_description=task_description, knowledge_seed=iter_seed, config=dict(temperature=iter_temperature, n=batch_size)) - input_kwargs = { - key: getattr(inputs, key) + input_kwargs = [{ + key: getattr(completions, key) for key in input_keys - } + } for completions in inputs.completions] - outputs = self.output_predictor(**input_kwargs, config=dict(temperature=0.7+0.01*idx)) + for kwargs in input_kwargs: + outputs = self.output_predictor(**kwargs, config=dict(temperature=iter_temperature)) - output_kwargs = { - key: getattr(outputs, key) - for key in output_keys - } + output_kwargs = { + key: getattr(outputs, key) + for key in output_keys + } - data.append(dspy.Example(**input_kwargs, **output_kwargs).with_inputs(*input_keys)) + data.append(dspy.Example(**kwargs, **output_kwargs).with_inputs(*input_keys)) return data @@ -177,4 +222,4 @@ def export(self, data: List[dspy.Example], path: str, mode: str = None, **kwargs dataset.to_json(path_or_buf=path, **kwargs) elif extention == "arrow" or extention == "hf": - dataset.save_to_disk(path) \ No newline at end of file + dataset.save_to_disk(path) From 98d59f690370cb8908ac9505508aec4a499c57a8 Mon Sep 17 00:00:00 2001 From: krypticmouse Date: Mon, 4 Mar 2024 14:03:30 +0000 Subject: [PATCH 067/243] Automatic Style fixes --- dspy/experimental/synthesizer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index f409c65305..d44597476a 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -1,9 +1,12 @@ -import dspy import random +from collections.abc import Mapping +from typing import List, Union from datasets import Dataset from tqdm import tqdm, trange -from typing import List, Union, Mapping + +import dspy + def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): From c2737f641220f8342fb830266da63b3e6e584969 Mon Sep 17 00:00:00 2001 From: ragul-kachiappan-dev Date: Mon, 4 Mar 2024 19:38:17 +0530 Subject: [PATCH 068/243] fix: printing signature fields in verbose mode for signature_opt --- dspy/teleprompt/signature_opt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index f38eccf1a2..439ecc0740 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -95,7 +95,7 @@ def _print_signature(self, predictor): else: signature = predictor.extended_signature1 print(f"i: {signature.instructions}") - print(f"p: {list(signature.fields().values())[-1].json_schema_extra['prefix']}") + print(f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}") print() From 7a20578795193b4dcc4211d22f8ba337eddcff64 Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Mon, 4 Mar 2024 06:53:29 -0800 Subject: [PATCH 069/243] Update README.md A few more typed examples --- README.md | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bc0cc2ed6a..421bacd3e5 100644 --- a/README.md +++ b/README.md @@ -283,6 +283,7 @@ Assume, for example, you need to find ```python from pydantic import BaseModel, Field +from dspy.functional import TypedPredictor class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -295,12 +296,40 @@ class TravelSignature(Signature): email: str = InputField() flight_information: list[TravelInformation] = OutputField() -predictor = dspy.TypedPredictor(TravelSignature) +predictor = TypedPredictor(TravelSignature) predictor(email='...') ``` Which will output a list of `TravelInformation` objects. +There are other ways to create typed signatures too. Such as +```python +predictor = TypedChainOfThought("question:str -> answer:int") +``` +which applies chain of thought, and is guaranteed to return an int. + +There's even an approach inspired by [tanuki.py](https://github.com/Tanuki/tanuki.py), which can be convenient when defining modules: +```python +from dspy.functional import FunctionalModule, predictor, cot + +class MyModule(FunctionalModule): + @predictor + def hard_question(possible_topics: list[str]) -> str: + """Write a hard question based on one of the topics. It should be answerable by a number.""" + + @cot + def answer(question: str) -> float: + pass + + def forward(possible_topics: list[str]): + q = hard_question(possible_topics=possible_topics) + a = answer(question=q) + return (q, a) +``` + +For more examples, see [the list above](https://github.com/stanfordnlp/dspy#:~:text=Typed%20DSPy), +as well as [the unit tests](https://github.com/stanfordnlp/dspy/blob/main/tests/functional/test_functional.py) for the module. + ## 6) FAQ: Is DSPy right for me? The **DSPy** philosophy and abstraction differ significantly from other libraries and frameworks, so it's usually straightforward to decide when **DSPy** is (or isn't) the right framework for your usecase. From 29a8f048dece1645b1b8c440e5055c67ed6f4ef0 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Mon, 4 Mar 2024 21:17:55 +0530 Subject: [PATCH 070/243] import fix --- dspy/experimental/synthesizer.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index d44597476a..af58888a5c 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -1,12 +1,9 @@ +import dspy import random -from collections.abc import Mapping -from typing import List, Union from datasets import Dataset from tqdm import tqdm, trange - -import dspy - +from typing import List, Union, Mapping def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): @@ -225,4 +222,4 @@ def export(self, data: List[dspy.Example], path: str, mode: str = None, **kwargs dataset.to_json(path_or_buf=path, **kwargs) elif extention == "arrow" or extention == "hf": - dataset.save_to_disk(path) + dataset.save_to_disk(path) \ No newline at end of file From 1913b8301064efe5eac047e40eb09cedf54d03a5 Mon Sep 17 00:00:00 2001 From: krypticmouse Date: Mon, 4 Mar 2024 15:48:21 +0000 Subject: [PATCH 071/243] Automatic Style fixes --- dspy/experimental/synthesizer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index af58888a5c..d23aa30592 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -1,9 +1,12 @@ -import dspy import random +from collections.abc import Mapping +from typing import List, Union from datasets import Dataset from tqdm import tqdm, trange -from typing import List, Union, Mapping + +import dspy + def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): From 7980a2bbd3df77455c12a8d12496ce5875e44f4b Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Mon, 4 Mar 2024 09:26:56 -0800 Subject: [PATCH 072/243] First try at a new optimizer/teleprompt --- dspy/functional/functional.py | 79 +- dspy/predict/predict.py | 8 +- dspy/primitives/module.py | 18 +- dspy/primitives/program.py | 11 +- dspy/teleprompt/signature_opt2.py | 248 ++++ examples/generation.py | 28 + examples/nli/scone/ScoNe | 1 + examples/quiz/DSPy_QuizGen_Cache | 1 + examples/signature_opt2.ipynb | 1717 +++++++++++++++++++++++ examples/tweets/DSPy_TweetGen_Cache | 1 + tests/functional/test_functional.py | 68 +- tests/functional/test_signature_opt2.py | 165 +++ tests/predict/test_predict.py | 36 +- 13 files changed, 2325 insertions(+), 56 deletions(-) create mode 100644 dspy/teleprompt/signature_opt2.py create mode 100644 examples/generation.py create mode 160000 examples/nli/scone/ScoNe create mode 160000 examples/quiz/DSPy_QuizGen_Cache create mode 100644 examples/signature_opt2.ipynb create mode 160000 examples/tweets/DSPy_TweetGen_Cache create mode 100644 tests/functional/test_signature_opt2.py diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index da5e52f94c..3d6c471414 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -1,4 +1,3 @@ -from collections import defaultdict import inspect import os import openai @@ -14,7 +13,8 @@ from dspy.signatures.signature import ensure_signature, make_signature -MAX_RETRIES = 3 +# Some improvement ideas: +# - Increase the temperature on error def predictor(func) -> dspy.Module: @@ -56,7 +56,7 @@ def __init__(self): self.__dict__[name] = attr.copy() -def TypedChainOfThought(signature) -> dspy.Module: # noqa: N802 +def TypedChainOfThought(signature, max_retries=3) -> dspy.Module: # noqa: N802 """Just like TypedPredictor, but adds a ChainOfThought OutputField.""" signature = ensure_signature(signature) output_keys = ", ".join(signature.output_fields.keys()) @@ -68,17 +68,19 @@ def TypedChainOfThought(signature) -> dspy.Module: # noqa: N802 desc="${produce the " + output_keys + "}. We ...", ), ), + max_retries=max_retries, ) class TypedPredictor(dspy.Module): - def __init__(self, signature): + def __init__(self, signature, max_retries=3): super().__init__() self.signature = ensure_signature(signature) self.predictor = dspy.Predict(signature) + self.max_retries = max_retries def copy(self) -> "TypedPredictor": - return TypedPredictor(self.signature) + return TypedPredictor(self.signature, self.max_retries) @staticmethod def _make_example(type_) -> str: @@ -140,6 +142,15 @@ def _prepare_signature(self) -> dspy.Signature: format_ = passages2text elif inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): format_ = lambda x: x if isinstance(x, str) else x.model_dump_json() + # Special formatting for lists of known types. Maybe the output fields sohuld have this too? + elif typing.get_origin(type_) in (List, list, Tuple, tuple): + (inner_type,) = typing.get_args(type_) + if inspect.isclass(inner_type) and issubclass(inner_type, pydantic.BaseModel): + format_ = ( + lambda x: x if isinstance(x, str) else "[" + ",".join(i.model_dump_json() for i in x) + "]" + ) + else: + format_ = lambda x: x if isinstance(x, str) else json.dumps(x) signature = signature.with_updated_fields(name, format=format_) return signature @@ -149,40 +160,46 @@ def forward(self, **kwargs) -> dspy.Prediction: # We have to re-prepare the signature on every forward call, because the base # signature might have been modified by an optimizer or something like that. signature = self._prepare_signature() - for try_i in range(MAX_RETRIES): + for try_i in range(self.max_retries): result = self.predictor(**modified_kwargs, new_signature=signature) errors = {} parsed_results = [] # Parse the outputs - for i, completion in enumerate(result.completions): - try: - parsed = {} - for name, field in signature.output_fields.items(): + for completion in result.completions: + parsed = {} + for name, field in signature.output_fields.items(): + try: value = completion[name] parser = field.json_schema_extra.get("parser", lambda x: x) parsed[name] = parser(value) - # Instantiate the actual signature with the parsed values. - # This allow pydantic to validate the fields defined in the signature. + except (pydantic.ValidationError, ValueError) as e: + errors[name] = _format_error(e) + # If we can, we add an example to the error message + current_desc = field.json_schema_extra.get("desc", "") + i = current_desc.find("JSON Schema: ") + if i == -1: + continue # Only add examples to JSON objects + suffix, current_desc = current_desc[i:], current_desc[:i] + prefix = "You MUST use this format: " + if ( + try_i + 1 < self.max_retries + and prefix not in current_desc + and (example := self._make_example(field.annotation)) + ): + signature = signature.with_updated_fields( + name, + desc=current_desc + "\n" + prefix + example + "\n" + suffix, + ) + # No reason trying to parse the general signature, or run more completions, if we already have errors + if errors: + break + # Instantiate the actual signature with the parsed values. + # This allow pydantic to validate the fields defined in the signature. + try: _dummy = self.signature(**kwargs, **parsed) parsed_results.append(parsed) - except (pydantic.ValidationError, ValueError) as e: - errors[name] = _format_error(e) - # If we can, we add an example to the error message - current_desc = field.json_schema_extra.get("desc", "") - i = current_desc.find("JSON Schema: ") - if i == -1: - continue # Only add examples to JSON objects - suffix, current_desc = current_desc[i:], current_desc[:i] - prefix = "You MUST use this format: " - if ( - try_i + 1 < MAX_RETRIES - and prefix not in current_desc - and (example := self._make_example(field.annotation)) - ): - signature = signature.with_updated_fields( - name, - desc=current_desc + "\n" + prefix + example + "\n" + suffix, - ) + except pydantic.ValidationError as e: + errors["general"] = _format_error(e) if errors: # Add new fields for each error for name, error in errors.items(): @@ -209,7 +226,7 @@ def _format_error(error: Exception): if isinstance(error, pydantic.ValidationError): errors = [] for e in error.errors(): - fields = ", ".join(e["loc"]) + fields = ", ".join(map(str, e["loc"])) errors.append(f"{e['msg']}: {fields} (error type: {e['type']})") return "; ".join(errors) else: diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index 60e668a836..d0a5f6010e 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -6,6 +6,7 @@ from dspy.signatures.signature import ensure_signature, signature_to_template + class Predict(Parameter): def __init__(self, signature, **config): self.stage = random.randbytes(8).hex() @@ -27,7 +28,7 @@ def dump_state(self): state["signature_instructions"] = self.signature.instructions *_, last_key = self.signature.fields.keys() - state["signature_prefix"] = self.signature.fields[last_key].json_schema_extra['prefix'] + state["signature_prefix"] = self.signature.fields[last_key].json_schema_extra["prefix"] return state @@ -85,6 +86,8 @@ def forward(self, **kwargs): # Switch to legacy format for dsp.generate template = signature_to_template(signature) + # print("Created template", template) + # print("From Signature", signature) if self.lm is None: x, C = dsp.generate(template, **config)(x, stage=self.stage) @@ -103,7 +106,8 @@ def forward(self, **kwargs): for field in template.fields: if field.output_variable not in kwargs.keys(): completions[-1][field.output_variable] = getattr( - c, field.output_variable, + c, + field.output_variable, ) pred = Prediction.from_completions(completions, signature=signature) diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 1b4a342f71..59ec8a09f8 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -8,7 +8,7 @@ def __init__(self): def named_parameters(self): """ - Unlike PyTorch, handles (non-recursive) lists of parameters too. + Unlike PyTorch, handles (non-recursive) lists of parameters too. """ from dspy.predict.parameter import Parameter @@ -27,10 +27,10 @@ def add_parameter(param_name, param_value): elif isinstance(value, BaseModule): # When a sub-module is pre-compiled, keep it frozen. - if not getattr(value, '_compiled', False): + if not getattr(value, "_compiled", False): for sub_name, param in value.named_parameters(): add_parameter(f"{name}.{sub_name}", param) - + elif isinstance(value, (list, tuple)): for idx, item in enumerate(value): add_parameter(f"{name}[{idx}]", item) @@ -49,23 +49,23 @@ def deepcopy(self): def reset_copy(self): obj = copy.deepcopy(self) - + for param in obj.parameters(): param.reset() - + return obj - + def dump_state(self): return {name: param.dump_state() for name, param in self.named_parameters()} - + def load_state(self, state): for name, param in self.named_parameters(): param.load_state(state[name]) - + def save(self, path): with open(path, "w") as f: f.write(ujson.dumps(self.dump_state(), indent=2)) - + def load(self, path): with open(path) as f: self.load_state(ujson.loads(f.read())) diff --git a/dspy/primitives/program.py b/dspy/primitives/program.py index 5e063bfb00..512e7ce67a 100644 --- a/dspy/primitives/program.py +++ b/dspy/primitives/program.py @@ -1,4 +1,3 @@ - from dspy.primitives.module import BaseModule from dspy.primitives.assertions import * import re @@ -16,7 +15,6 @@ class ProgramMeta(type): class Module(BaseModule, metaclass=ProgramMeta): - def _base_init(self): self._compiled = False @@ -29,12 +27,7 @@ def __call__(self, *args, **kwargs): def named_predictors(self): from dspy.predict.predict import Predict - named_parameters = self.named_parameters() - return [ - (name, param) - for name, param in named_parameters - if isinstance(param, Predict) - ] + return [(name, param) for name, param in self.named_parameters() if isinstance(param, Predict)] def predictors(self): return [param for _, param in self.named_predictors()] @@ -52,7 +45,7 @@ def map_named_predictors(self, func): for name, predictor in self.named_predictors(): set_attribute_by_name(self, name, func(predictor)) return self - + def activate_assertions(self, handler=backtrack_handler, **handler_args): """ Activates assertions for the module. diff --git a/dspy/teleprompt/signature_opt2.py b/dspy/teleprompt/signature_opt2.py new file mode 100644 index 0000000000..1a9045ae23 --- /dev/null +++ b/dspy/teleprompt/signature_opt2.py @@ -0,0 +1,248 @@ +import random +from typing import Generic, Literal, TypeVar, Type + +import pydantic +import dspy +from dspy.functional.functional import TypedChainOfThought +from dspy.signatures import Signature +from dspy import BaseModel + +# TODO: Consider using the prompt optimizer to optimize the prompt optimizer :O + + +def make_info(signature: type[Signature]) -> BaseModel: + """Creates a SignatureInfo pydantic type, that describes the Signature. + + Returns an instnce of this type, with the instructions and field descriptions of the input type. + """ + # First, create the SignatureInfo type + fields = { + "instructions": (str, pydantic.Field(description="The instructions for the task")), + } + for name in signature.fields: + fields[name + "_prefix"] = (str, pydantic.Field(description=f"The prefix for {name}")) + fields[name + "_desc"] = (str, pydantic.Field(description=f"The description for {name}")) + SignatureInfo = pydantic.create_model( # noqa: N806 + f"SignatureInfo[{signature.__name__}]", + **fields, + ) + + # Add a method to convert the SignatureInfo back into a Signature + def to_signature(info): + new_signature = signature.with_instructions(info.instructions) + for name in signature.fields: + new_signature = new_signature.with_updated_fields( + name, + prefix=getattr(info, name + "_prefix"), + desc=getattr(info, name + "_desc"), + ) + return new_signature + + SignatureInfo.to_signature = to_signature + + # Finally, make an instance of the SignatureInfo type with the signature's + # default instructions and field descriptions + values = {"instructions": signature.instructions} + for name, field in signature.fields.items(): + values[name + "_prefix"] = field.json_schema_extra["prefix"] + values[name + "_desc"] = field.json_schema_extra["desc"] + return SignatureInfo(**values) + + +T = TypeVar("T", bound=BaseModel) + + +# Note: This function wouldn't be necessary if we could make the number of prompts a generic parameter of the class, +# but alas it seems like this isn't possible in Python right now. The main reason being that types and generics only +# live inside the type system, and can't be used to generate code at runtime. +def make_initial_signature(n_prompts: int) -> Type[Signature]: + """Creates a GenerateInstructionInitial signature with the given number of initial prompts.""" + + class GenerateInstructionInitial(Signature, Generic[T]): + """You are a creative instruction optimizer for large language models. + + I will give you a ``signature`` of fields (inputs and outputs) in English. + Your task is to propose variations of the signature that will lead a good language model. + + Be very creative and think out of the box. Consider using inspiration such as: + Openers: + # You are as smart as ChatGPT. + # You are highly intelligent. + # You are an expert mathematician. + # You are a professor of mathematics. + Task Descriptions: + # Solve the following math question. + # Answer the following math question. + Closers: + # This will be fun! + # Take a deep breath and think carefully. + # I really need your help! + """ + + basic_signature: T = dspy.InputField() + proposed_signatures: list[T] = dspy.OutputField( + desc=f"A list of {n_prompts} very different variations of the basic signature", + min_items=n_prompts, + max_items=n_prompts, + ) + + return GenerateInstructionInitial + + +class ScoredSignature(BaseModel, Generic[T]): + signature: T + score: float = dspy.Field(gt=0, lt=100) + + +class GenerateInstructionGivenAttempts(dspy.Signature, Generic[T]): + """You are an instruction optimizer for large language models. + + I will give some task instructions I've tried, along with their corresponding validation scores. + - The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. + - Your task is to propose a new instruction that will lead a good language model to perform the task even better. + - Be creative, and think out of the box. + - Don't repeat instructions, descriptions and prefixes that have already been attempted. + """ + + attempted_signatures: list[ScoredSignature[T]] = dspy.InputField() + proposed_signature: T = dspy.OutputField(desc="Next signature to try") + # expected_score: float = dspy.OutputField(desc="The expected score for the new signature") + + +def optimize_signature( + student, + evaluator, + n_iterations=10, + strategy: Literal["best", "last"] = "best", + # Formerly part of the constructor + prompt_model=None, + initial_prompts=2, + temperature=1.4, + verbose=False, +) -> dspy.Program: + """Create a new program that is optimized for the given task. + + `student` is a program that needs to be optimized, + note that it may be zero-shot or already pre-optimized for demos != []. + + Parameters + ---------- + student : dspy.Program + The program to optimize. + evaluator : dspy.Evaluator + The evaluator to use to score the program. + n_iterations : int, optional + The number of iterations to run, by default 10 + strategy : Literal["best", "last"], optional + The strategy to use to select the final program, by default "best" + prompt_model : dspy.LanguageModel, optional + The language model to use to generate prompts, by default None + initial_prompts : int, optional + The number of initial prompts to generate, by default 2. + Note that we also use the "plain" signature as a prompt, so the total number of prompts is initial_prompts + 1. + temperature : float, optional + The temperature to use when generating new prompts, by default 1.4 + verbose : bool, optional + Whether to print debug information, by default False + """ + prompt_model = prompt_model or dspy.settings.lm + MyGenerateInstructionInitial = make_initial_signature(initial_prompts) # noqa: N806 + + module = student.deepcopy() + # For some reason named_predictors sometimes returns an empty list, so we use named_parameters instead + named_predictors = module.named_parameters() + if verbose: + print("All predictors:") + print(f"{named_predictors=}") + + candidates = {} + scores = [] + + # First round, just use initial prompts + for name, p in named_predictors: + candidates[name] = [make_info(p.signature)] + + # Make some initial candidates + with dspy.settings.context(lm=prompt_model): + # TODO: Parallelize this + for name, p in named_predictors: + if verbose: + print(f"Generating new signature for {p}...") + info = candidates[name][0] # Use initial info, to make sure types are identical + generator = TypedChainOfThought(MyGenerateInstructionInitial[type(info)]) + candidates[name] += generator( + basic_signature=info, + config={"temperature": temperature}, + ).proposed_signatures + assert len(candidates[name]) == initial_prompts + 1 # Basic signature + initial prompts + + candidates[name] = [ + info.model_copy(update={"instructions": info.instructions + f"({i})"}) + for i, info in enumerate(candidates[name]) + ] + + for i, c in enumerate(candidates[name]): + print(f"Generated candidate {i}:") + print(c.to_signature()) + + # Main loop of scoring + generating new candidates + for i in range(n_iterations): + if verbose: + print("\n" + "=" * 80) + print(f"Running eval iteration {i}...") + + # Test candidate i + for p in module.predictors(): + print(f"Installing signature {i}: ") + print(candidates[name][i].to_signature()) + p.signature = candidates[name][i].to_signature() + + score = evaluator(module) + score += random.random() * 10 + scores.append(score) + + if verbose: + print(f"Scores for iteration {i}: {score}") + + # If we are still testing initial prompts, continue + if i + 1 < len(next(iter(candidates.values()))): + continue + + # If we are done, there's no need to generate new candidates + if i + 1 == n_iterations: + break + + # Otherwise generate the next candidate + with dspy.settings.context(lm=prompt_model): + # TODO: Parallelize this + for name, p in named_predictors: + SignatureInfo = type(candidates[name][0]) # noqa: N806 + generator = TypedChainOfThought(GenerateInstructionGivenAttempts[SignatureInfo]) + attempted_signatures = [ + ScoredSignature[SignatureInfo](signature=info, score=sc) + for info, sc in zip(candidates[name], scores) + ] + attempted_signatures.sort(key=lambda x: x.score) + if verbose: + print( + f"Generating new signature for {name} based on {len(attempted_signatures)} previous signatures..." + ) + new_signature = generator( + attempted_signatures=attempted_signatures, + config={"temperature": temperature}, + ).proposed_signature + if verbose: + print("Generated candidate:") + print(new_signature.to_signature()) + candidates[name].append(new_signature) + + if strategy == "last": + return module + + if strategy == "best": + i = scores.index(max(scores)) + for name, p in named_predictors: + p.signature = candidates[name][i].to_signature() + return module + + raise ValueError(f"Invalid strategy: {strategy}") diff --git a/examples/generation.py b/examples/generation.py new file mode 100644 index 0000000000..36d3d1c921 --- /dev/null +++ b/examples/generation.py @@ -0,0 +1,28 @@ +from pydantic import BaseModel, Field +from dspy.teleprompt import LabeledFewShot +from dspy.functional import TypedPredictor + +import dspy +turbo = dspy.OpenAI(model='gpt-3.5-turbo') +colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') +dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) + +class SyntheticFact(BaseModel): + fact: str = Field(..., description="a statement") + varacity: bool = Field(..., description="is the statement true or false") + +class ExampleSignature(dspy.Signature): + """Generate an example of a synthetic fact.""" + fact: SyntheticFact = dspy.OutputField() + +generator = TypedPredictor(ExampleSignature) +examples = generator(config=dict(n=10)) + +# If you have examples and want more +existing_examples = [ + dspy.Example(fact="The sky is blue", varacity=True), + dspy.Example(fact="The sky is green", varacity=False), +] +trained = LabeledFewShot().compile(student=generator, trainset=existing_examples) + +augmented_examples = trained(config=dict(n=10)) diff --git a/examples/nli/scone/ScoNe b/examples/nli/scone/ScoNe new file mode 160000 index 0000000000..b02532a2f4 --- /dev/null +++ b/examples/nli/scone/ScoNe @@ -0,0 +1 @@ +Subproject commit b02532a2f4185c6118a57a148455e0750592d8c8 diff --git a/examples/quiz/DSPy_QuizGen_Cache b/examples/quiz/DSPy_QuizGen_Cache new file mode 160000 index 0000000000..27d6d433e7 --- /dev/null +++ b/examples/quiz/DSPy_QuizGen_Cache @@ -0,0 +1 @@ +Subproject commit 27d6d433e73b91d3cf677ecf1d757813fcbd611d diff --git a/examples/signature_opt2.ipynb b/examples/signature_opt2.ipynb new file mode 100644 index 0000000000..9edc0d4c4e --- /dev/null +++ b/examples/signature_opt2.ipynb @@ -0,0 +1,1717 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "import dspy\n", + "import os\n", + "os.environ['OPENAI_API_KEY'] = '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'" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000)\n", + "gpt4 = dspy.OpenAI(model='gpt-4', max_tokens=4000)\n", + "dspy.settings.configure(lm=turbo)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(20, 50)" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from dspy.datasets import HotPotQA\n", + "\n", + "# Load the dataset.\n", + "dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)\n", + "\n", + "# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\n", + "trainset = [x.with_inputs('question') for x in dataset.train]\n", + "devset = [x.with_inputs('question') for x in dataset.dev]\n", + "\n", + "len(trainset), len(devset)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "class BasicQA(dspy.Signature):\n", + " \"\"\"Answer questions with short factoid answers.\"\"\"\n", + "\n", + " question = dspy.InputField()\n", + " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "All predictors:\n", + "named_predictors=[('predictor', Predict(BasicQA(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", + ")))]\n", + "Generating new signature for Predict(BasicQA(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", + "))...\n", + "Generated candidate 0:\n", + "StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.(0)'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", + ")\n", + "Generated candidate 1:\n", + "StringSignature(question -> answer\n", + " instructions='Answer series of questions where answers must share a common theme.(1)'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q1,', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'charge amplifier designed based by Moog on stripline technology', '__dspy_field_type': 'output', 'prefix': 'A1 Wolfgang Amplifier,'})\n", + ")\n", + "Generated candidate 2:\n", + "StringSignature(question -> answer\n", + " instructions='Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q-What Computability study ranges?', 'desc': 'may also involve strings produced by non-deterministic computation'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!', '__dspy_field_type': 'output', 'prefix': 'A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder'})\n", + ")\n", + "\n", + "================================================================================\n", + "Running eval iteration 0...\n", + "Installing signature 0: \n", + "StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.(0)'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 5 / 20 (25.0): 100%|██████████| 20/20 [00:00<00:00, 5144.18it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 5 / 20 (25.0%)\n", + "Scores for iteration 0: 28.55243378134378\n", + "\n", + "================================================================================\n", + "Running eval iteration 1...\n", + "Installing signature 1: \n", + "StringSignature(question -> answer\n", + " instructions='Answer series of questions where answers must share a common theme.(1)'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q1,', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'charge amplifier designed based by Moog on stripline technology', '__dspy_field_type': 'output', 'prefix': 'A1 Wolfgang Amplifier,'})\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 5 / 20 (25.0): 100%|██████████| 20/20 [00:00<00:00, 4207.98it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 5 / 20 (25.0%)\n", + "Scores for iteration 1: 29.911449495835697\n", + "\n", + "================================================================================\n", + "Running eval iteration 2...\n", + "Installing signature 2: \n", + "StringSignature(question -> answer\n", + " instructions='Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q-What Computability study ranges?', 'desc': 'may also involve strings produced by non-deterministic computation'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!', '__dspy_field_type': 'output', 'prefix': 'A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder'})\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 5 / 20 (25.0): 100%|██████████| 20/20 [00:00<00:00, 4577.93it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 5 / 20 (25.0%)\n", + "Scores for iteration 2: 34.58455944537296\n", + "Generating new signature for predictor based on 3 previous signatures...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "from dspy.evaluate import Evaluate\n", + "from dspy.evaluate.metrics import answer_exact_match\n", + "from dspy.functional import TypedPredictor\n", + "from dspy.teleprompt.signature_opt2 import optimize_signature\n", + "\n", + "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n", + "\n", + "program = optimize_signature(\n", + " student=TypedPredictor(BasicQA),\n", + " evaluator=Evaluate(devset=trainset, metric=answer_exact_match, num_threads=10, display_progress=True),\n", + " initial_prompts=2,\n", + " n_iterations=8,\n", + " verbose=True,\n", + " prompt_model=gpt4,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\n", + "\n", + "Given the fields `attempted_signatures`, produce the fields `proposed_signature`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Attempted Signatures: ${attempted_signatures}\n", + "Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ...\n", + "Proposed Signature: The improved signature for the language model. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "\n", + "---\n", + "\n", + "Attempted Signatures: [{\"signature\":{\"instructions\":\"Answer questions with short factoid answers.(0)\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Answer series of questions where answers must share a common theme.(1)\",\"question_prefix\":\"Q1,\",\"question_desc\":\"${question}\",\"answer_prefix\":\"A1 Wolfgang Amplifier,\",\"answer_desc\":\"charge amplifier designed based by Moog on stripline technology\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)\",\"question_prefix\":\"Q-What Computability study ranges?\",\"question_desc\":\"may also involve strings produced by non-deterministic computation\",\"answer_prefix\":\"A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder\",\"answer_desc\":\"answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!\"},\"score\":25.0}]\n", + "Reasoning: Let's think step by step in order to\u001b[32m produce the proposed_signature. We can see that the attempted signatures are quite varied and complex, with different instructions, prefixes, and descriptions for both questions and answers. However, they all share a common structure: they all have instructions, a question prefix, a question description, an answer prefix, and an answer description. Therefore, we can propose a signature that includes these common elements, but with more general descriptions to accommodate the variety of tasks. \n", + "Proposed Signature: The improved signature for the language model. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Make a very succinct json object that validates with the following schema\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Json Schema: ${json_schema}\n", + "Json Object: ${json_object}\n", + "\n", + "---\n", + "\n", + "Json Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Json Object:\u001b[32m {\"instructions\": \"Complete the task\", \"question_prefix\": \"Q:\", \"question_desc\": \"What is the capital of France?\", \"answer_prefix\": \"A:\", \"answer_desc\": \"Paris\"}\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Given the fields `attempted_signatures`, produce the fields `proposed_signature`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Attempted Signatures: ${attempted_signatures}\n", + "\n", + "Past Error (proposed_signature): An error to avoid in the future\n", + "\n", + "Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ...\n", + "\n", + "Proposed Signature:\n", + "The improved signature for the language model. Respond with a single JSON object. \n", + "You MUST use this format: {\"instructions\": \"Complete the task\", \"question_prefix\": \"Q:\", \"question_desc\": \"What is the capital of France?\", \"answer_prefix\": \"A:\", \"answer_desc\": \"Paris\"}\n", + "JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "\n", + "---\n", + "\n", + "Attempted Signatures: [{\"signature\":{\"instructions\":\"Answer questions with short factoid answers.(0)\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Answer series of questions where answers must share a common theme.(1)\",\"question_prefix\":\"Q1,\",\"question_desc\":\"${question}\",\"answer_prefix\":\"A1 Wolfgang Amplifier,\",\"answer_desc\":\"charge amplifier designed based by Moog on stripline technology\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)\",\"question_prefix\":\"Q-What Computability study ranges?\",\"question_desc\":\"may also involve strings produced by non-deterministic computation\",\"answer_prefix\":\"A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder\",\"answer_desc\":\"answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!\"},\"score\":25.0}]\n", + "\n", + "Past Error (proposed_signature): ValueError('json output should start and end with { and }')\n", + "\n", + "Reasoning: Let's think step by step in order to\u001b[32m produce the proposed_signature. We need to ensure that the JSON object is properly formatted and contains all the required fields. The attempted signatures provided are not in the correct format. They contain additional fields such as \"score\" and the structure of the JSON object is not as expected. The JSON object should start and end with { and }, and should not contain any additional fields or arrays. \n", + "\n", + "Proposed Signature:\n", + "{\"instructions\": \"Answer questions with short factoid answers.\", \"question_prefix\": \"Question:\", \"question_desc\": \"${question}\", \"answer_prefix\": \"Answer:\", \"answer_desc\": \"often between 1 and 5 words\"}\u001b[0m\n", + "\n", + "\n", + "\n" + ] + } + ], + "source": [ + "gpt4.inspect_history(n=3)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'tp' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[19], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtp\u001b[49m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__dict__\u001b[39m\n", + "\u001b[0;31mNameError\u001b[0m: name 'tp' is not defined" + ] + } + ], + "source": [ + "tp.__dict__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "module.predictors()=[]\n", + "\n", + "================================================================================\n", + "Running eval iteration 0...\n", + "list: []\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8900>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fe20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5fa60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ef20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632f240>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6020>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848af20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e3060>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13ab60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e8e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ee80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001f80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4e00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ff60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x10632efc0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5b20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488f40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0720>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafec0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488540>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e160>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e700>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe34c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0d60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e980>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e160>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5d00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafd80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2fc0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafd80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f560>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe09a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3100>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5fba0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e160>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a4b80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe37e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2f20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0b80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5ee0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f1a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba87c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1760>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe00e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafec0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0360>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a0020c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f6a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fb00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f600>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a000720>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632efc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fb00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0cc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584887c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f9c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2660>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0c20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f1a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4e00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158489d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1e40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2ac0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0720>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001f80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ede0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ee80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2ca0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe28e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e020>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b4900>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a4b80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2160>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0220>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0e00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a4b80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001f80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e8e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3b00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f880>, 'parser': })\n", + ")\n", + "StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0900>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0400>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b4900>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13afc0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe34c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe23e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b47c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ec00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fd80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3560>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe07c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e700>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158536840>, 'parser': })\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5483.04it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Scores for iteration 0: 32.0\n", + "\n", + "================================================================================\n", + "Running eval iteration 1...\n", + "list: []\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585367a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1391c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e3060>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2f20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5b20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488540>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002020>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f240>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe09a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f060>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bce0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1391c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe02c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848af20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c139da0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ec00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5fd80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158536840>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6020>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x10632efc0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bce0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f880>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f420>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489f80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a000720>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c139da0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158536840>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6660>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158489d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e840>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5d9e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1260>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f9c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba8900>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5ee0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2fc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632efc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848be20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1b20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e160>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba89a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848a520>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a7240>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe23e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0ea0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158536840>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e980>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe07c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f880>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489d00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5a80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0e00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1f80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5ee0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f9c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2160>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0fe0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3f60>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ef20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848a520>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585367a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0360>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2160>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5ee0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafd80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba8900>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe22a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16ae719e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b68e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe11c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2660>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0720>, 'parser': })\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Created templateCreated template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632d120>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb6ac0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138720>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafc40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632ede0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138c20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c139580>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489d00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002660>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848be20>, 'parser': })\n", + ")\n", + " Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3f60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0720>, 'parser': })\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4371.34it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Scores for iteration 1: 32.0\n", + "\n", + "================================================================================\n", + "Running eval iteration 2...\n", + "list: []\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13afc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5a80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafe20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b47c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb5620>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba82c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a4b80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138b80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b240>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bc40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b600>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5080>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0360>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafa60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b68e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafec0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0b80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13afc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138040>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002160>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488ea0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13af20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5d00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba89a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb4220>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2160>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb47c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6020>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584887c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0180>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584885e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6e80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafe20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2200>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3380>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8400>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c1389a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3d80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0540>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138c20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6e80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafc40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1ee0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2340>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2ca0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba89a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0680>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1a80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2c00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba89a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b7ba0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489260>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bcb47c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138b80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b42c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001da0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5a80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c139580>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe02c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13b240>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002660>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848a3e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0360>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848b9c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafd80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5940>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b600>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3ba0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6e80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafa60>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bcb7920>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe13a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2340>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b47c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002660>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a7240>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138040>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2d40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe32e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13bba0>, 'parser': })\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba82c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4680>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2d40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b6b60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002020>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3880>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe22a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1e40>, 'parser': })\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5230.07it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Scores for iteration 2: 32.0\n", + "\n", + "================================================================================\n", + "Running eval iteration 3...\n", + "list: []\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13aca0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6e80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafe20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b62a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584885e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba89a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a001da0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138180>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3d80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13aca0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002520>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafe20>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158489f80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1120>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b62a0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002020>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1a80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe19e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8900>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b49a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0540>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138c20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0360>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bce0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b62a0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a4ea0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138040>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138180>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002160>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b47c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488ea0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1080>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb6c00>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0360>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489f80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b6b60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6e80>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0720>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138180>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafb00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb68e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3d80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0360>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bc40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4680>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002160>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1da0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1d00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138180>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba87c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488540>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1620>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbaff60>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138ae0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2340>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0a40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba87c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848bc40>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848af20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2520>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1da0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe23e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584885e0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbaff60>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138ae0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe05e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0fe0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba80e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0540>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3560>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe11c0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe34c0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1b20>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b6b60>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8900>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3ce0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0e00>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0220>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2840>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b68e0>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0cc0>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138040>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0680>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b240>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb5620>, 'parser': })\n", + ")\n", + "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", + "From Signature StringSignature(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bc40>})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafa60>, 'parser': })\n", + ")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4131.18it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Scores for iteration 3: 32.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 63, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "optimizer.compile(TypedPredictor(BasicQA), evaluator, n_iterations=4)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gpt4.inspect_history(n=4)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Feel free to any other queries you like." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py39", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/tweets/DSPy_TweetGen_Cache b/examples/tweets/DSPy_TweetGen_Cache new file mode 160000 index 0000000000..22186fd4d4 --- /dev/null +++ b/examples/tweets/DSPy_TweetGen_Cache @@ -0,0 +1 @@ +Subproject commit 22186fd4d4fa940256ca8c4ab70f165276e5c834 diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index cabbc7cd09..600e35b63d 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -8,7 +8,7 @@ import pytest import dspy -from dspy.functional import predictor, cot, FunctionalModule, TypedPredictor, functional +from dspy.functional import predictor, cot, FunctionalModule, TypedPredictor, TypedChainOfThought from dspy.primitives.example import Example from dspy.teleprompt.bootstrap import BootstrapFewShot from dspy.teleprompt.vanilla import LabeledFewShot @@ -122,7 +122,7 @@ def forward(self, **kwargs): qa = QA() assert isinstance(qa, FunctionalModule) - assert isinstance(qa.answer, functional._StripOutput) + assert isinstance(qa.answer, dspy.Module) question, answer = qa(topic="Physics") @@ -407,6 +407,7 @@ def get_user_details() -> UserDetails: with pytest.raises(ValueError): get_user_details() + print(lm.get_convo(-1)) assert lm.get_convo(-1) == textwrap.dedent( """\ Given the fields , produce the fields `get_user_details`. @@ -467,6 +468,23 @@ class TestSignature(dspy.Signature): assert output == [0, 1, 2] +def test_multiple_outputs_int_cot(): + # Note: Multiple outputs only work when the language model "speculatively" generates all the outputs in one go. + lm = DummyLM( + [ + "thoughts 0\nOutput: 0\n", + "thoughts 1\nOutput: 1\n", + "thoughts 2\nOutput: 2\n", + ] + ) + dspy.settings.configure(lm=lm) + + test = TypedChainOfThought("input:str -> output:int") + + output = test(input="8", config=dict(n=3)).completions.output + assert output == [0, 1, 2] + + def test_parse_type_string(): lm = DummyLM([str(i) for i in range(100)]) dspy.settings.configure(lm=lm) @@ -532,3 +550,49 @@ class ExampleSignature(dspy.Signature): augmented_examples = trained(config=dict(n=3)) for ex in augmented_examples.completions.fact: assert isinstance(ex, SyntheticFact) + + +def test_list_input2(): + # Inspired by the Signature Optimizer + + class ScoredString(pydantic.BaseModel): + string: str + score: float + + class ScoredSignature(dspy.Signature): + attempted_signatures: list[ScoredString] = dspy.InputField() + proposed_signature: str = dspy.OutputField() + + program = TypedChainOfThought(ScoredSignature) + + lm = DummyLM(["Thoughts", "Output"]) + dspy.settings.configure(lm=lm) + + output = program( + attempted_signatures=[ + ScoredString(string="string 1", score=0.5), + ScoredString(string="string 2", score=0.4), + ScoredString(string="string 3", score=0.3), + ] + ).proposed_signature + + print(lm.get_convo(-1)) + + assert output == "Output" + + assert lm.get_convo(-1) == textwrap.dedent("""\ + Given the fields `attempted_signatures`, produce the fields `proposed_signature`. + + --- + + Follow the following format. + + Attempted Signatures: ${attempted_signatures} + Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ... + Proposed Signature: ${proposed_signature} + + --- + + Attempted Signatures: [{"string":"string 1","score":0.5},{"string":"string 2","score":0.4},{"string":"string 3","score":0.3}] + Reasoning: Let's think step by step in order to Thoughts + Proposed Signature: Output""") diff --git a/tests/functional/test_signature_opt2.py b/tests/functional/test_signature_opt2.py new file mode 100644 index 0000000000..469bf3f252 --- /dev/null +++ b/tests/functional/test_signature_opt2.py @@ -0,0 +1,165 @@ +import json +import dspy +from dspy.evaluate import Evaluate +from dspy.functional import TypedPredictor +from dspy.teleprompt.signature_opt2 import ( + GenerateInstructionGivenAttempts, + ScoredSignature, + make_info, + optimize_signature, +) +from dspy.utils import DummyLM + +from dspy.evaluate import Evaluate +from dspy.evaluate.metrics import answer_exact_match +from dspy.functional import TypedPredictor + + +class BasicQA(dspy.Signature): + question: str = dspy.InputField() + answer: str = dspy.OutputField() + + +hotpotqa = [ + ex.with_inputs("question") + for ex in [ + dspy.Example( + question="At My Window was released by which American singer-songwriter?", + answer="John Townes Van Zandt", + ), + dspy.Example( + question="which American actor was Candace Kita guest starred with ", + answer="Bill Murray", + ), + dspy.Example( + question="Which of these publications was most recently published, Who Put the Bomp or Self?", + answer="Self", + ), + dspy.Example( + question="The Victorians - Their Story In Pictures is a documentary series written by an author born in what year?", + answer="1950", + ), + dspy.Example( + question="Which magazine has published articles by Scott Shaw, Tae Kwon Do Times or Southwest Art?", + answer="Tae Kwon Do Times", + ), + dspy.Example( + question="In what year was the club founded that played Manchester City in the 1972 FA Charity Shield", + answer="1874", + ), + dspy.Example( + question="Which is taller, the Empire State Building or the Bank of America Tower?", + answer="The Empire State Building", + ), + dspy.Example( + question='Which American actress who made their film debut in the 1995 teen drama "Kids" was the co-founder of Voto Latino?', + answer="Rosario Dawson", + ), + dspy.Example( + question="Tombstone stared an actor born May 17, 1955 known as who?", + answer="Bill Paxton", + ), + dspy.Example( + question="What is the code name for the German offensive that started this Second World War engagement on the Eastern Front (a few hundred kilometers from Moscow) between Soviet and German forces, which included 102nd Infantry Division?", + answer="Operation Citadel", + ), + dspy.Example( + question='Who acted in the shot film The Shore and is also the youngest actress ever to play Ophelia in a Royal Shakespeare Company production of "Hamlet." ?', + answer="Kerry Condon", + ), + dspy.Example( + question="Which company distributed this 1977 American animated film produced by Walt Disney Productions for which Sherman Brothers wrote songs?", + answer="Buena Vista Distribution", + ), + dspy.Example( + question="Samantha Cristoforetti and Mark Shuttleworth are both best known for being first in their field to go where? ", + answer="space", + ), + dspy.Example( + question="Having the combination of excellent foot speed and bat speed helped Eric Davis, create what kind of outfield for the Los Angeles Dodgers? ", + answer="Outfield of Dreams", + ), + dspy.Example( + question="Which Pakistani cricket umpire who won 3 consecutive ICC umpire of the year awards in 2009, 2010, and 2011 will be in the ICC World Twenty20?", + answer="Aleem Sarwar Dar", + ), + dspy.Example( + question="The Organisation that allows a community to influence their operation or use and to enjoy the benefits arisingwas founded in what year?", + answer="2010", + ), + dspy.Example( + question='"Everything Has Changed" is a song from an album released under which record label ?', + answer="Big Machine Records", + ), + dspy.Example( + question="Who is older, Aleksandr Danilovich Aleksandrov or Anatoly Fomenko?", + answer="Aleksandr Danilovich Aleksandrov", + ), + dspy.Example( + question="On the coast of what ocean is the birthplace of Diogal Sakho?", + answer="Atlantic", + ), + dspy.Example( + question="This American guitarist best known for her work with the Iron Maidens is an ancestor of a composer who was known as what?", + answer="The Waltz King", + ), + ] +] + + +def test_signature_info(): + info = make_info(BasicQA) + SignatureInfo = type(info) + + devset = [ + dspy.Example( + instructions="Answer the following questions", + question_desc="Some question to answer", + question_prefix="Q: ", + answer_desc="A short answer to the question", + answer_prefix="A: ", + ), + ] + + lm = DummyLM( + [ + json.dumps(dict(devset[0])), # Proposed signature + ] + ) + dspy.settings.configure(lm=lm) + + generator = TypedPredictor(GenerateInstructionGivenAttempts[SignatureInfo]) + + res = generator(attempted_signatures=[ScoredSignature[SignatureInfo](signature=info, score=50)]) + assert res.proposed_signature == SignatureInfo(**devset[0]) + + # Test the "to_signature" method + + class OutputSignature(dspy.Signature): + """Answer the following questions""" + + question: str = dspy.InputField(desc="Some question to answer", prefix="Q: ") + answer: str = dspy.OutputField(desc="A short answer to the question", prefix="A: ") + + assert res.proposed_signature.to_signature().equals(OutputSignature) + + +def test_opt(): + qa_model = DummyLM([]) + prompt_model = DummyLM( + [ + # Seed prompts + "some thoughts", + '{"value": [{"instructions": "I", "question_desc": "$q", "question_prefix": "Q:", "answer_desc": "$a", "answer_prefix": "A:"}]}', + ] + ) + dspy.settings.configure(lm=qa_model) + + program = optimize_signature( + student=TypedPredictor(BasicQA), + evaluator=Evaluate(devset=hotpotqa, metric=answer_exact_match, num_threads=1), + initial_prompts=1, + n_iterations=1, + verbose=True, + prompt_model=prompt_model, + ) diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py index e44b3a135c..2ded1e9f12 100644 --- a/tests/predict/test_predict.py +++ b/tests/predict/test_predict.py @@ -1,14 +1,13 @@ import dspy from dspy import Predict, Signature from dspy.utils.dummies import DummyLM +import copy def test_initialization_with_string_signature(): signature_string = "input1, input2 -> output" predict = Predict(signature_string) - expected_instruction = ( - "Given the fields `input1`, `input2`, produce the fields `output`." - ) + expected_instruction = "Given the fields `input1`, `input2`, produce the fields `output`." assert predict.signature.instructions == expected_instruction assert predict.signature.instructions == Signature(signature_string).instructions @@ -89,3 +88,34 @@ def test_multi_output(): results = program(question="What is 1+1?") assert results.completions.answer[0] == "my first answer" assert results.completions.answer[1] == "my second answer" + + +def test_multi_output2(): + program = Predict("question -> answer1, answer2", n=2) + dspy.settings.configure( + lm=DummyLM( + [ + "my 0 answer\nAnswer 2: my 2 answer", + "my 1 answer\nAnswer 2: my 3 answer", + ], + ) + ) + results = program(question="What is 1+1?") + assert results.completions.answer1[0] == "my 0 answer" + assert results.completions.answer1[1] == "my 1 answer" + assert results.completions.answer2[0] == "my 2 answer" + assert results.completions.answer2[1] == "my 3 answer" + + +def test_named_predictors(): + class MyModule(dspy.Module): + def __init__(self): + super().__init__() + self.inner = Predict("question -> answer") + + program = MyModule() + assert program.named_predictors() == [("inner", program.inner)] + + # Check that it also works the second time. + program2 = copy.deepcopy(program) + assert program2.named_predictors() == [("inner", program2.inner)] From c81e973f6decb127c87501fa54d1e9b1d3265a83 Mon Sep 17 00:00:00 2001 From: Tomaz Bratanic Date: Mon, 4 Mar 2024 20:15:08 +0100 Subject: [PATCH 073/243] Add support for neo4j vector index --- dspy/retrieve/neo4j_rm.py | 104 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 dspy/retrieve/neo4j_rm.py diff --git a/dspy/retrieve/neo4j_rm.py b/dspy/retrieve/neo4j_rm.py new file mode 100644 index 0000000000..20a5b7b339 --- /dev/null +++ b/dspy/retrieve/neo4j_rm.py @@ -0,0 +1,104 @@ +import os +from typing import Any, List, Optional, Union + +import backoff +from openai import ( + APITimeoutError, + InternalServerError, + OpenAI, + RateLimitError, + UnprocessableEntityError, +) + +import dspy +from dsp.utils import dotdict + +try: + from neo4j import GraphDatabase + from neo4j.exceptions import ( + AuthError, + ServiceUnavailable, + ) +except ImportError: + raise ImportError( + "Please install the neo4j package by running `pip install dspy-ai[neo4j]`", + ) + + +class Embedder: + def __init__(self, provider: str, model: str): + if provider == "openai": + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("Environment variable OPENAI_API_KEY must be set") + self.client = OpenAI() + self.model = model + + @backoff.on_exception( + backoff.expo, + ( + APITimeoutError, + InternalServerError, + RateLimitError, + UnprocessableEntityError, + ), + max_time=15, + ) + def __call__(self, queries) -> Any: + embedding = self.client.embeddings.create(input=queries, model=self.model) + return [result.embedding for result in embedding.data] + + +DEFAULT_INDEX_QUERY = "CALL db.index.vector.queryNodes($index, $k, $embedding) YIELD node, score " + + +class Neo4jRM(dspy.Retrieve): + def __init__( + self, + index_name: str, + text_node_property: str, + k: int = 5, + retrieval_query: str = None, + embedding_provider: str = "openai", + embedding_model: str = "text-embedding-ada-002", + ): + super().__init__(k=k) + self.index_name = index_name + self.username = os.getenv("NEO4J_USERNAME") + self.password = os.getenv("NEO4J_PASSWORD") + self.uri = os.getenv("NEO4J_URI") + self.database = os.getenv("NEO4J_DATABASE") + self.k = k + self.retrieval_query = retrieval_query + self.text_node_property = text_node_property + if not self.username: + raise ValueError("Environment variable NEO4J_USERNAME must be set") + if not self.password: + raise ValueError("Environment variable NEO4J_PASSWORD must be set") + if not self.uri: + raise ValueError("Environment variable NEO4J_URI must be set") + try: + self.driver = GraphDatabase.driver(self.uri, auth=(self.username, self.password)) + self.driver.verify_connectivity() + + except ( + ServiceUnavailable, + AuthError, + ) as e: + raise ConnectionError("Failed to connect to Neo4j database") from e + + self.embedder = Embedder(provider=embedding_provider, model=embedding_model) + + def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> dspy.Prediction: + if not isinstance(query_or_queries, list): + query_or_queries = [query_or_queries] + query_vectors = self.embedder(query_or_queries) + contents = [] + retrieval_query = self.retrieval_query or f"RETURN node.{self.text_node_property} AS output" + for vector in query_vectors: + records, _, _ = self.driver.execute_query( + DEFAULT_INDEX_QUERY + retrieval_query, + {"embedding": vector, "index": self.index_name, "k": k or self.k}, + ) + contents.extend([dotdict({"long_text": r["output"]}) for r in records]) + return contents From c33197585cb88c44c5ee88c49a13a0d27b6bd8ab Mon Sep 17 00:00:00 2001 From: Tomaz Bratanic Date: Mon, 4 Mar 2024 20:23:31 +0100 Subject: [PATCH 074/243] switch to text --- dspy/retrieve/neo4j_rm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/retrieve/neo4j_rm.py b/dspy/retrieve/neo4j_rm.py index 20a5b7b339..2c5998c492 100644 --- a/dspy/retrieve/neo4j_rm.py +++ b/dspy/retrieve/neo4j_rm.py @@ -94,11 +94,11 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> query_or_queries = [query_or_queries] query_vectors = self.embedder(query_or_queries) contents = [] - retrieval_query = self.retrieval_query or f"RETURN node.{self.text_node_property} AS output" + retrieval_query = self.retrieval_query or f"RETURN node.{self.text_node_property} AS text" for vector in query_vectors: records, _, _ = self.driver.execute_query( DEFAULT_INDEX_QUERY + retrieval_query, {"embedding": vector, "index": self.index_name, "k": k or self.k}, ) - contents.extend([dotdict({"long_text": r["output"]}) for r in records]) + contents.extend([dotdict({"long_text": r["text"]}) for r in records]) return contents From 1754cbd37d84daa6b87429d8d5fa413c5f396c43 Mon Sep 17 00:00:00 2001 From: Tomaz Bratanic Date: Mon, 4 Mar 2024 20:38:42 +0100 Subject: [PATCH 075/243] Add docs & some improvements --- docs/api/retrieval_model_clients/Neo4jRM.md | 80 +++++++++++++++++++++ dspy/retrieve/neo4j_rm.py | 7 +- 2 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 docs/api/retrieval_model_clients/Neo4jRM.md diff --git a/docs/api/retrieval_model_clients/Neo4jRM.md b/docs/api/retrieval_model_clients/Neo4jRM.md new file mode 100644 index 0000000000..dc4530f09f --- /dev/null +++ b/docs/api/retrieval_model_clients/Neo4jRM.md @@ -0,0 +1,80 @@ + +# retrieve.neo4j_rm + +### Constructor + +Initialize an instance of the `Neo4jRM` class. + +```python +Neo4jRM( + index_name: str, + text_node_property: str, + k: int = 5, + retrieval_query: str = None, + embedding_provider: str = "openai", + embedding_model: str = "text-embedding-ada-002", +) +``` + +**Environment Variables:** + +You need to define the credentials as environment variables: + +- `NEO4J_USERNAME` (_str_): Specifies the username required for authenticating with the Neo4j database. This is a crucial security measure to ensure that only authorized users can access the database. + +- `NEO4J_PASSWORD` (_str_): Defines the password associated with the `NEO4J_USERNAME` for authentication purposes. This password should be kept secure to prevent unauthorized access to the database. + +- `NEO4J_URI` (_str_): Indicates the Uniform Resource Identifier (URI) used to connect to the Neo4j database. This URI typically includes the protocol, hostname, and port, providing the necessary information to establish a connection to the database. + +- `NEO4J_DATABASE` (_str_, optional): Specifies the name of the database to connect to within the Neo4j instance. If not set, the system defaults to using `"neo4j"` as the database name. This allows for flexibility in connecting to different databases within a single Neo4j server. + +**Parameters:** +- `index_name` (_str_): Specifies the name of the vector index to be used within Neo4j for organizing and querying data. +- `text_node_property` (_str_, _optional_): Defines the specific property of nodes that will be returned. +- `k` (_int_, _optional_): The number of top results to return from the retrieval operation. It defaults to 5 if not explicitly specified. +- `retrieval_query` (_str_, _optional_): A custom query string provided for retrieving data. If not provided, a default query tailored to the `text_node_property` will be used. +- `embedding_provider` (_str_, _optional_): The name of the service provider for generating embeddings. Defaults to "openai" if not specified. +- `embedding_model` (_str_, _optional_): The specific embedding model to use from the provider. By default, it uses the "text-embedding-ada-002" model from OpenAI. + + +### Methods + +#### `forward(self, query: [str], k: Optional[int] = None) -> dspy.Prediction` + +Search the neo4j vector index for the top `k` passages matching the given query or queries, using embeddings generated via the specified `embedding_model`. + +**Parameters:** +- `query` (str_): The query. +- `k` (_Optional[int]_, _optional_): The number of results to retrieve. If not specified, defaults to the value set during initialization. + +**Returns:** +- `dspy.Prediction`: Contains the retrieved passages as a list of string with the prediction signature. + +ex: +```python +Prediction( + passages=['Passage 1 Lorem Ipsum awesome', 'Passage 2 Lorem Ipsum Youppidoo', 'Passage 3 Lorem Ipsum Yassssss'] +) +``` + +### Quick Example how to use Neo4j in a local environment. + + +```python +from dspy.retrieve.neo4j_rm import Neo4jRM +import os + +os.environ["NEO4J_URI"] = 'bolt://localhost:7687' +os.environ["NEO4J_USERNAME"] = 'neo4j' +os.environ["NEO4J_PASSWORD"] = 'password' + +retriever_model = Neo4jRM( + index_name="vector", + text_node_property="text" +) + +results = retriever_model("Explore the significance of quantum computing", k=3) + +for passage in results.passages: + print("Document:", result, "\n") +``` diff --git a/dspy/retrieve/neo4j_rm.py b/dspy/retrieve/neo4j_rm.py index 2c5998c492..db0c671adc 100644 --- a/dspy/retrieve/neo4j_rm.py +++ b/dspy/retrieve/neo4j_rm.py @@ -56,7 +56,7 @@ class Neo4jRM(dspy.Retrieve): def __init__( self, index_name: str, - text_node_property: str, + text_node_property: str = None, k: int = 5, retrieval_query: str = None, embedding_provider: str = "openai", @@ -67,7 +67,7 @@ def __init__( self.username = os.getenv("NEO4J_USERNAME") self.password = os.getenv("NEO4J_PASSWORD") self.uri = os.getenv("NEO4J_URI") - self.database = os.getenv("NEO4J_DATABASE") + self.database = os.getenv("NEO4J_DATABASE", "neo4j") self.k = k self.retrieval_query = retrieval_query self.text_node_property = text_node_property @@ -77,6 +77,8 @@ def __init__( raise ValueError("Environment variable NEO4J_PASSWORD must be set") if not self.uri: raise ValueError("Environment variable NEO4J_URI must be set") + if not self.text_node_property and not self.retrieval_query: + raise ValueError("Either `text_node_property` or `retrieval_query` parameters must be defined") try: self.driver = GraphDatabase.driver(self.uri, auth=(self.username, self.password)) self.driver.verify_connectivity() @@ -99,6 +101,7 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> records, _, _ = self.driver.execute_query( DEFAULT_INDEX_QUERY + retrieval_query, {"embedding": vector, "index": self.index_name, "k": k or self.k}, + database_=self.database, ) contents.extend([dotdict({"long_text": r["text"]}) for r in records]) return contents From ff26c02e051681238f142fa602347755769266dc Mon Sep 17 00:00:00 2001 From: Tomaz Bratanic Date: Mon, 4 Mar 2024 22:12:47 +0100 Subject: [PATCH 076/243] Fixes --- docs/api/retrieval_model_clients/Neo4jRM.md | 3 ++ dspy/retrieve/neo4j_rm.py | 57 +++++++++++++++++++-- 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/docs/api/retrieval_model_clients/Neo4jRM.md b/docs/api/retrieval_model_clients/Neo4jRM.md index dc4530f09f..2bb2ddd2f1 100644 --- a/docs/api/retrieval_model_clients/Neo4jRM.md +++ b/docs/api/retrieval_model_clients/Neo4jRM.md @@ -28,6 +28,8 @@ You need to define the credentials as environment variables: - `NEO4J_DATABASE` (_str_, optional): Specifies the name of the database to connect to within the Neo4j instance. If not set, the system defaults to using `"neo4j"` as the database name. This allows for flexibility in connecting to different databases within a single Neo4j server. +- `OPENAI_API_KEY` (_str_): Specifies the API key required for authenticiating with OpenAI's services. + **Parameters:** - `index_name` (_str_): Specifies the name of the vector index to be used within Neo4j for organizing and querying data. - `text_node_property` (_str_, _optional_): Defines the specific property of nodes that will be returned. @@ -67,6 +69,7 @@ import os os.environ["NEO4J_URI"] = 'bolt://localhost:7687' os.environ["NEO4J_USERNAME"] = 'neo4j' os.environ["NEO4J_PASSWORD"] = 'password' +os.environ["OPENAI_API_KEY"] = 'sk-' retriever_model = Neo4jRM( index_name="vector", diff --git a/dspy/retrieve/neo4j_rm.py b/dspy/retrieve/neo4j_rm.py index db0c671adc..bb5ad25231 100644 --- a/dspy/retrieve/neo4j_rm.py +++ b/dspy/retrieve/neo4j_rm.py @@ -53,6 +53,52 @@ def __call__(self, queries) -> Any: class Neo4jRM(dspy.Retrieve): + """ + Implements a retriever that utilizes Neo4j for retrieving passages. + This class manages a connection to a Neo4j database using official Neo4j Python drivers and requires + the database credentials (username, password, URI, and optionally the database name) to be set as environment variables. + Additionally, it utilizes an embedding provider (defaulting to OpenAI's services) to compute query embeddings, + which are then used to find the most relevant nodes in the Neo4j graph based on the specified node property or custom retrieval query. + + Returns a list of passages in the form of `dspy.Prediction` objects. + + Args: + index_name (str): The name of the vector index in the Neo4j database to query against. + text_node_property (Optional[str]): The property of the node containing the text. Required if `retrieval_query` is not set. + k (Optional[int]): The default number of top passages to retrieve. Defaults to 5. + retrieval_query (Optional[str]): Custom Cypher query for retrieving passages. Required if `text_node_property` is not set. + embedding_provider (str): The provider of the embedding service. Defaults to "openai". + embedding_model (str): The model identifier for generating embeddings. Defaults to "text-embedding-ada-002". + + Examples: + Below is a code snippet showcasing how to initialize Neo4jRM with environment variables for the database connection and OpenAI as the embedding provider: + + ```python + import os + + import dspy + import openai + + os.environ["NEO4J_URI"] = "bolt://localhost:7687" + os.environ["NEO4J_USERNAME"] = "neo4j" + os.environ["NEO4J_PASSWORD"] = "password" + os.environ["OPENAI_API_KEY"] = "sk-" + + neo4j_retriever = Neo4jRM( + index_name="myIndex", + text_node_property="text", + k=10, + embedding_provider="openai", + embedding_model="text-embedding-ada-002", + ) + + dspy.settings.configure(rm=neo4j_retriever) + ``` + + In this example, `Neo4jRM` is configured to retrieve nodes based on the "text" property from an index named "myIndex", + using embeddings computed by OpenAI's "text-embedding-ada-002" model. + """ + def __init__( self, index_name: str, @@ -96,12 +142,17 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> query_or_queries = [query_or_queries] query_vectors = self.embedder(query_or_queries) contents = [] - retrieval_query = self.retrieval_query or f"RETURN node.{self.text_node_property} AS text" + retrieval_query = self.retrieval_query or f"RETURN node.{self.text_node_property} AS text, score" for vector in query_vectors: records, _, _ = self.driver.execute_query( DEFAULT_INDEX_QUERY + retrieval_query, {"embedding": vector, "index": self.index_name, "k": k or self.k}, database_=self.database, ) - contents.extend([dotdict({"long_text": r["text"]}) for r in records]) - return contents + contents.extend([{"passage": dotdict({"long_text": r["text"]}), "score": r["score"]} for r in records]) + sorted_passages = sorted( + contents, + key=lambda x: x["score"], + reverse=True, + )[: k or self.k] + return [el["passage"] for el in sorted_passages] From 7b5f769b7a1fd3e0e4e7e558ddbee3a26dae90f5 Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Mon, 4 Mar 2024 13:58:38 -0800 Subject: [PATCH 077/243] Update README.md Added Franck SN's medium post on chess with typed dspy --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 421bacd3e5..3537a937fc 100644 --- a/README.md +++ b/README.md @@ -141,6 +141,7 @@ You can find other examples tweeted by [@lateinteraction](https://twitter.com/la - [Using DSPy, "The Unreasonable Effectiveness of Eccentric Automatic Prompts" (paper) by VMware's Rick Battle & Teja Gollapudi, and interview at TheRegister](https://www.theregister.com/2024/02/22/prompt_engineering_ai_models/) - Typed DSPy (contributed by [@normal-computing](https://github.com/normal-computing)) - [Using DSPy to train Gpt 3.5 on HumanEval by @thomasahle](https://github.com/stanfordnlp/dspy/blob/main/examples/functional/functional.ipynb) + - [Building a chess playing agent using DSPy](https://medium.com/thoughts-on-machine-learning/building-a-chess-playing-agent-using-dspy-9b87c868f71e) There are also recent cool examples at [Weaviate's DSPy cookbook](https://github.com/weaviate/recipes/tree/main/integrations/dspy) by Connor Shorten. [See tutorial on YouTube](https://www.youtube.com/watch?v=CEuUG4Umfxs). From e4edd5b73d2fe90a073875802b38aab8cf1f60ca Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Mon, 4 Mar 2024 13:59:53 -0800 Subject: [PATCH 078/243] Update README.md Added credit --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3537a937fc..3d96788cec 100644 --- a/README.md +++ b/README.md @@ -140,8 +140,8 @@ You can find other examples tweeted by [@lateinteraction](https://twitter.com/la - [Using Ollama with DSPy for Mistral (quantized) by @jrknox1977](https://gist.github.com/jrknox1977/78c17e492b5a75ee5bbaf9673aee4641) - [Using DSPy, "The Unreasonable Effectiveness of Eccentric Automatic Prompts" (paper) by VMware's Rick Battle & Teja Gollapudi, and interview at TheRegister](https://www.theregister.com/2024/02/22/prompt_engineering_ai_models/) - Typed DSPy (contributed by [@normal-computing](https://github.com/normal-computing)) - - [Using DSPy to train Gpt 3.5 on HumanEval by @thomasahle](https://github.com/stanfordnlp/dspy/blob/main/examples/functional/functional.ipynb) - - [Building a chess playing agent using DSPy](https://medium.com/thoughts-on-machine-learning/building-a-chess-playing-agent-using-dspy-9b87c868f71e) + - [Using DSPy to train Gpt 3.5 on HumanEval by Thomas Ahle](https://github.com/stanfordnlp/dspy/blob/main/examples/functional/functional.ipynb) + - [Building a chess playing agent using DSPy by Franck SN](https://medium.com/thoughts-on-machine-learning/building-a-chess-playing-agent-using-dspy-9b87c868f71e) There are also recent cool examples at [Weaviate's DSPy cookbook](https://github.com/weaviate/recipes/tree/main/integrations/dspy) by Connor Shorten. [See tutorial on YouTube](https://www.youtube.com/watch?v=CEuUG4Umfxs). From b87909500ba99b530950f82140ed76baa8eabc25 Mon Sep 17 00:00:00 2001 From: Nat Taylor Date: Mon, 4 Mar 2024 21:21:42 -0500 Subject: [PATCH 079/243] docs(dspy): add toy invocation of model to minimal-example --- docs/docs/quick-start/minimal-example.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/docs/quick-start/minimal-example.mdx b/docs/docs/quick-start/minimal-example.mdx index fb02ae7c12..e6bf8af6c4 100644 --- a/docs/docs/quick-start/minimal-example.mdx +++ b/docs/docs/quick-start/minimal-example.mdx @@ -85,6 +85,8 @@ This example showcases how to set up your environment, define a custom module, c Feel free to adapt and expand upon this example to suit your specific use case while exploring the extensive capabilities of DSPy. +If you want to try what you just built, run `optimized_cot.forward('Your Question Here')`. + *** From ee646535c232c5ced9e1e948754843e5af5f0d46 Mon Sep 17 00:00:00 2001 From: Isaac Miller <17116851+isaacbmiller@users.noreply.github.com> Date: Mon, 4 Mar 2024 21:03:06 -0600 Subject: [PATCH 080/243] Move ensemble import to teleprompt (#542) --- dspy/teleprompt/__init__.py | 1 + tests/teleprompt/test_ensemble.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/dspy/teleprompt/__init__.py b/dspy/teleprompt/__init__.py index 61425e5b9b..a1088f2df3 100644 --- a/dspy/teleprompt/__init__.py +++ b/dspy/teleprompt/__init__.py @@ -1,4 +1,5 @@ from .bootstrap import * +from .ensemble import * from .finetune import * from .knn_fewshot import * from .random_search import * diff --git a/tests/teleprompt/test_ensemble.py b/tests/teleprompt/test_ensemble.py index 292176af4f..54ca2708db 100644 --- a/tests/teleprompt/test_ensemble.py +++ b/tests/teleprompt/test_ensemble.py @@ -1,6 +1,6 @@ import pytest import dspy -from dspy.teleprompt.ensemble import Ensemble +from dspy.teleprompt import Ensemble class MockProgram(dspy.Module): From 0f39a433c351b0fc3f9d5593374c57324eefb79e Mon Sep 17 00:00:00 2001 From: quajak Date: Mon, 4 Mar 2024 23:39:14 -0500 Subject: [PATCH 081/243] Fix return_all_scores in evaluate --- dspy/evaluate/evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 27358ed4fa..7bdb88ef72 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -174,7 +174,7 @@ def wrapped_program(example_idx, example): ipython_display(HTML(message)) if return_all_scores and return_outputs: - return round(100 * ncorrect / ntotal, 2), results + return round(100 * ncorrect / ntotal, 2), results, [score for *_, score in reordered_devset] elif return_all_scores: return round(100 * ncorrect / ntotal, 2), [score for *_, score in reordered_devset] elif return_outputs: From 188694a55a2cb3f6e595e7a6664aa2fe954ab19d Mon Sep 17 00:00:00 2001 From: Raja Rajendran Date: Tue, 5 Mar 2024 17:58:18 +0530 Subject: [PATCH 082/243] add 'k' as argument to FaissRM.forward() --- dspy/retrieve/faiss_rm.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) mode change 100644 => 100755 dspy/retrieve/faiss_rm.py diff --git a/dspy/retrieve/faiss_rm.py b/dspy/retrieve/faiss_rm.py old mode 100644 new mode 100755 index 7a74ec4b91..37460f8951 --- a/dspy/retrieve/faiss_rm.py +++ b/dspy/retrieve/faiss_rm.py @@ -3,7 +3,7 @@ """ import logging -from typing import Union +from typing import Union, Optional import numpy as np @@ -107,8 +107,8 @@ def _dump_raw_results(self, queries, index_list, distance_list) -> None: logging.debug(f" Hit {j} = {indices[j]}/{distances[j]}: {self._document_chunks[indices[j]]}") return - def forward(self, query_or_queries: Union[str, list[str]]) -> dspy.Prediction: - """Search the faiss index for self.k top passages for query. + def forward(self, query_or_queries: Union[str, list[str]], k: Optional[int] = None) -> dspy.Prediction: + """Search the faiss index for k or self.k top passages for query. Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. @@ -122,13 +122,12 @@ def forward(self, query_or_queries: Union[str, list[str]]) -> dspy.Prediction: emb_npa = np.array(embeddings) # For single query, just look up the top k passages if len(queries) == 1: - distance_list, index_list = self._faiss_index.search(emb_npa, self.k) + distance_list, index_list = self._faiss_index.search(emb_npa, k or self.k) # self._dump_raw_results(queries, index_list, distance_list) passages = [(self._document_chunks[ind], ind) for ind in index_list[0]] - passages = [dotdict({"long_text": passage[0], "index": passage[1]}) for passage in passages] - return dspy.Prediction(passages=passages) + return [dotdict({"long_text": passage[0], "index": passage[1]}) for passage in passages] - distance_list, index_list = self._faiss_index.search(emb_npa, self.k * 3) + distance_list, index_list = self._faiss_index.search(emb_npa, (k or self.k) * 3) # self._dump_raw_results(queries, index_list, distance_list) passage_scores = {} for emb in range(len(embeddings)): @@ -136,7 +135,7 @@ def forward(self, query_or_queries: Union[str, list[str]]) -> dspy.Prediction: distances = distance_list[ emb ] # distances of neighbors for embeddings[emb] - this is an array of k*3 floating point numbers - for res in range(self.k * 3): + for res in range((k or self.k) * 3): neighbor = indices[res] distance = distances[res] if neighbor in passage_scores: @@ -147,10 +146,5 @@ def forward(self, query_or_queries: Union[str, list[str]]) -> dspy.Prediction: # first degree sort: number of queries that got a hit with any particular document chunk. More # is a better match. This is len(queries)-len(x[1]) # second degree sort: sum of the distances of each hit returned by faiss. Smaller distance is a better match - sorted_passages = sorted(passage_scores.items(), key=lambda x: (len(queries) - len(x[1]), sum(x[1])))[: self.k] - return dspy.Prediction( - passages=[ - dotdict({"long_text": self._document_chunks[passage_index], "index": passage_index}) - for passage_index, _ in sorted_passages - ], - ) + sorted_passages = sorted(passage_scores.items(), key=lambda x: (len(queries) - len(x[1]), sum(x[1])))[: k or self.k] + return [ dotdict({"long_text": self._document_chunks[passage_index], "index": passage_index}) for passage_index, _ in sorted_passages ] From 08dd4e9a1b3192cf05121fde08925328f81ab073 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Tue, 5 Mar 2024 18:24:48 +0530 Subject: [PATCH 083/243] Add search to docs --- docs/docusaurus.config.ts | 11 +- docs/package-lock.json | 366 ++++++++++++++++++++++++++++++++++++++ docs/package.json | 1 + 3 files changed, 377 insertions(+), 1 deletion(-) diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts index dff76109cd..b4fa8159b4 100644 --- a/docs/docusaurus.config.ts +++ b/docs/docusaurus.config.ts @@ -154,7 +154,16 @@ const config: Config = { darkTheme: prismThemes.dracula, // Dark theme for code blocks }, }, + themes: [ + [ + require.resolve("@easyops-cn/docusaurus-search-local"), + /** @type {import("@easyops-cn/docusaurus-search-local").PluginOptions} */ + ({ + hashed: true, + }), + ], + ], }; -// Exporting the configuration object export default config; + diff --git a/docs/package-lock.json b/docs/package-lock.json index f352934cb6..76e0f56851 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10,6 +10,7 @@ "dependencies": { "@docusaurus/core": "3.1.0", "@docusaurus/preset-classic": "3.1.0", + "@easyops-cn/docusaurus-search-local": "^0.40.1", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", @@ -2758,6 +2759,85 @@ "node": ">=18.0" } }, + "node_modules/@easyops-cn/autocomplete.js": { + "version": "0.38.1", + "resolved": "https://registry.npmjs.org/@easyops-cn/autocomplete.js/-/autocomplete.js-0.38.1.tgz", + "integrity": "sha512-drg76jS6syilOUmVNkyo1c7ZEBPcPuK+aJA7AksM5ZIIbV57DMHCywiCr+uHyv8BE5jUTU98j/H7gVrkHrWW3Q==", + "dependencies": { + "cssesc": "^3.0.0", + "immediate": "^3.2.3" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local": { + "version": "0.40.1", + "resolved": "https://registry.npmjs.org/@easyops-cn/docusaurus-search-local/-/docusaurus-search-local-0.40.1.tgz", + "integrity": "sha512-4HMFZMpKKdd5qq1nFB8cvrAkgzZ1kNxphVciI64YHtmDYGIthVGZVG6+Ci7AAhzCR+ixLJkYwtVekvuMLjr2ZQ==", + "dependencies": { + "@docusaurus/plugin-content-docs": "^2 || ^3", + "@docusaurus/theme-translations": "^2 || ^3", + "@docusaurus/utils": "^2 || ^3", + "@docusaurus/utils-common": "^2 || ^3", + "@docusaurus/utils-validation": "^2 || ^3", + "@easyops-cn/autocomplete.js": "^0.38.1", + "@node-rs/jieba": "^1.6.0", + "cheerio": "^1.0.0-rc.3", + "clsx": "^1.1.1", + "debug": "^4.2.0", + "fs-extra": "^10.0.0", + "klaw-sync": "^6.0.0", + "lunr": "^2.3.9", + "lunr-languages": "^1.4.0", + "mark.js": "^8.11.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "@docusaurus/theme-common": "^2 || ^3", + "react": "^16.14.0 || ^17 || ^18", + "react-dom": "^16.14.0 || 17 || ^18" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local/node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@emnapi/core": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-0.45.0.tgz", + "integrity": "sha512-DPWjcUDQkCeEM4VnljEOEcXdAD7pp8zSZsgOujk/LGIwCXWbXJngin+MO4zbH429lzeC3WbYLGjE2MaUOwzpyw==", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-0.45.0.tgz", + "integrity": "sha512-Txumi3td7J4A/xTTwlssKieHKTGl3j4A1tglBx72auZ49YK7ePY6XZricgIg9mnZT4xPfA+UPCUdnhRuEFDL+w==", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -2905,6 +2985,255 @@ "react": ">=16" } }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.1.1.tgz", + "integrity": "sha512-ATj9ua659JgrkICjJscaeZdmPr44cb/KFjNWuD0N6pux0SpzaM7+iOuuK11mAnQM2N9q0DT4REu6NkL8ZEhopw==", + "optional": true, + "dependencies": { + "@emnapi/core": "^0.45.0", + "@emnapi/runtime": "^0.45.0", + "@tybys/wasm-util": "^0.8.1" + } + }, + "node_modules/@node-rs/jieba": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba/-/jieba-1.10.0.tgz", + "integrity": "sha512-9oZMCvZVnrAMeWTSnEjJ0OSw7YcV4dJJKSioqq80oUNf3eYLGdEXsgYwCe1AYEMcfUfNVgvjznItJKrsoud0IA==", + "engines": { + "node": ">= 10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "optionalDependencies": { + "@node-rs/jieba-android-arm-eabi": "1.10.0", + "@node-rs/jieba-android-arm64": "1.10.0", + "@node-rs/jieba-darwin-arm64": "1.10.0", + "@node-rs/jieba-darwin-x64": "1.10.0", + "@node-rs/jieba-freebsd-x64": "1.10.0", + "@node-rs/jieba-linux-arm-gnueabihf": "1.10.0", + "@node-rs/jieba-linux-arm64-gnu": "1.10.0", + "@node-rs/jieba-linux-arm64-musl": "1.10.0", + "@node-rs/jieba-linux-x64-gnu": "1.10.0", + "@node-rs/jieba-linux-x64-musl": "1.10.0", + "@node-rs/jieba-wasm32-wasi": "1.10.0", + "@node-rs/jieba-win32-arm64-msvc": "1.10.0", + "@node-rs/jieba-win32-ia32-msvc": "1.10.0", + "@node-rs/jieba-win32-x64-msvc": "1.10.0" + } + }, + "node_modules/@node-rs/jieba-android-arm-eabi": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-android-arm-eabi/-/jieba-android-arm-eabi-1.10.0.tgz", + "integrity": "sha512-bzusJSLHm7I0qL8aQXGLt7IQ51Px35yGGEcQ/Ps4SEt0AxRSJ2/rxNET/8mlwBpOCZ5xiKE3BOBRfQajiPiI3g==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-android-arm64": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-android-arm64/-/jieba-android-arm64-1.10.0.tgz", + "integrity": "sha512-g89Oq5U2RPmtlvuQhjNj8YZc5Gq033ODb7Ot4Z/OdIHvg2WMxi2M1GQhcdKu60dO79/tazc53W6I8/y691DUfQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-darwin-arm64": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-arm64/-/jieba-darwin-arm64-1.10.0.tgz", + "integrity": "sha512-IhR5r+XxFcfhVsF93zQ3uCJy8ndotRntXzoW/JCyKqOahUo/ITQRT6vTKHKMyD9xNmjl222OZonBSo2+mlI2fQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-darwin-x64": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-x64/-/jieba-darwin-x64-1.10.0.tgz", + "integrity": "sha512-MBIs8ixKY4FPnifdZ7eTx6ht85TXE4kFBK4c8A/VDAbnmzBzpEyuV7tHUA2wAdfR0muC9j7/5FB4kQGZgYfc8g==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-freebsd-x64": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-freebsd-x64/-/jieba-freebsd-x64-1.10.0.tgz", + "integrity": "sha512-MuY+1QEXONxo3I/uFLFju0/pSN5bzQORhJkIdP8CYv+jZaVB4Uz6rC7A5HrgjiAXOna6QsKlRgx2bYyHfaBUrA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-arm-gnueabihf": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm-gnueabihf/-/jieba-linux-arm-gnueabihf-1.10.0.tgz", + "integrity": "sha512-QfSBnwISdVuTqsi4iThAO1LSbKRSqSsIWiIJgCduhYsTDDiG9+pHyfiZtcTwSf73SDXHZ400QuBNONWLQ/dSag==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-arm64-gnu": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm64-gnu/-/jieba-linux-arm64-gnu-1.10.0.tgz", + "integrity": "sha512-vzA2tX/6dReEd/7tZ9927glWQmKDausM6R9S5CqZx4BA4NSaWAK0xFdWsz0K7np459FXqNavLdNB5FVFJb4zzA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-arm64-musl": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm64-musl/-/jieba-linux-arm64-musl-1.10.0.tgz", + "integrity": "sha512-gxqoAVOQsn9sgYK6mFO9dsMZ/yOMvVecLZW5rGvLErjiugVvYUlESXIvCqxp2GSws8RtTqJj6p9u/lBmCCuvaw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-x64-gnu": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-x64-gnu/-/jieba-linux-x64-gnu-1.10.0.tgz", + "integrity": "sha512-rS5Shs8JITxJjFIjoIZ5a9O+GO21TJgKu03g2qwFE3QaN5ZOvXtz+/AqqyfT4GmmMhCujD83AGqfOGXDmItF9w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-x64-musl": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-x64-musl/-/jieba-linux-x64-musl-1.10.0.tgz", + "integrity": "sha512-BvSiF2rR8Birh2oEVHcYwq0WGC1cegkEdddWsPrrSmpKmukJE2zyjcxaOOggq2apb8fIRsjyeeUh6X3R5AgjvA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-wasm32-wasi": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-wasm32-wasi/-/jieba-wasm32-wasi-1.10.0.tgz", + "integrity": "sha512-EzeAAbRrFTdYw61rd8Mfwdp/fA21d58z9vLY06CDbI+dqANfMFn1IUdwzKWi8S5J/MRhvbzonbbh3yHlz6F43Q==", + "cpu": [ + "wasm32" + ], + "optional": true, + "dependencies": { + "@napi-rs/wasm-runtime": "^0.1.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@node-rs/jieba-win32-arm64-msvc": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-arm64-msvc/-/jieba-win32-arm64-msvc-1.10.0.tgz", + "integrity": "sha512-eZjRLFUAvq1/E5+xXfJRqIB99Gu6BA+6+EXf/rCLuvEjXrDQuUunhmrSoOL5MjmUXTtazS+bXq9PXV5EFYyOPw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-win32-ia32-msvc": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-ia32-msvc/-/jieba-win32-ia32-msvc-1.10.0.tgz", + "integrity": "sha512-DrfbeCN7UcLN+MiocZabWo74XZIjfpQsJ/WMOItZzVbU2gDcJSkSyAhML9+OqId66DhGCMFFlGinocElM8iIAw==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-win32-x64-msvc": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-x64-msvc/-/jieba-win32-x64-msvc-1.10.0.tgz", + "integrity": "sha512-RjBkBmjjHmj+bofiq5/han8wzbCkDk24OAPJ+YX8PX20GFSHmdjCiWapv3AooN8/RiKqlBfgodjS1JUngNWo5g==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -3298,6 +3627,15 @@ "node": ">=10.13.0" } }, + "node_modules/@tybys/wasm-util": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.8.1.tgz", + "integrity": "sha512-GSsTwyBl4pIzsxAY5wroZdyQKyhXk0d8PCRZtrSZ2WEB1cBdrp2EgGBwHOGCZtIIPun/DL3+AykCv+J6fyRH4Q==", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@types/acorn": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", @@ -7423,6 +7761,11 @@ "node": ">=16.x" } }, + "node_modules/immediate": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", + "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" + }, "node_modules/immer": { "version": "9.0.21", "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", @@ -7952,6 +8295,14 @@ "node": ">=0.10.0" } }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, "node_modules/kleur": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", @@ -8106,6 +8457,21 @@ "yallist": "^3.0.2" } }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==" + }, + "node_modules/lunr-languages": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", + "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==" + }, + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==" + }, "node_modules/markdown-extensions": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", diff --git a/docs/package.json b/docs/package.json index e11c453c5d..8a50a3c89f 100644 --- a/docs/package.json +++ b/docs/package.json @@ -17,6 +17,7 @@ "dependencies": { "@docusaurus/core": "3.1.0", "@docusaurus/preset-classic": "3.1.0", + "@easyops-cn/docusaurus-search-local": "^0.40.1", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", From cab9eb47b7fdc6c485b12d4db252c3404877ab66 Mon Sep 17 00:00:00 2001 From: arnavsinghvi11 <54859892+arnavsinghvi11@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:04:07 -0800 Subject: [PATCH 084/243] remove extra space --- dspy/experimental/synthesizer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer.py index d23aa30592..771b12c981 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer.py @@ -41,7 +41,7 @@ class UnderstandTask(dspy.Signature): ) class ExplainTask(dspy.Signature): - """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the broad essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" + """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the broad essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" examples = dspy.InputField( prefix="Examples Datapoints:-", @@ -225,4 +225,4 @@ def export(self, data: List[dspy.Example], path: str, mode: str = None, **kwargs dataset.to_json(path_or_buf=path, **kwargs) elif extention == "arrow" or extention == "hf": - dataset.save_to_disk(path) \ No newline at end of file + dataset.save_to_disk(path) From 929213e7669958fcae9fd39098ba8fa563fa03ae Mon Sep 17 00:00:00 2001 From: arnavsinghvi11 <54859892+arnavsinghvi11@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:08:52 -0800 Subject: [PATCH 085/243] Update minimal-example.mdx --- docs/docs/quick-start/minimal-example.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/quick-start/minimal-example.mdx b/docs/docs/quick-start/minimal-example.mdx index e6bf8af6c4..8d5a747c34 100644 --- a/docs/docs/quick-start/minimal-example.mdx +++ b/docs/docs/quick-start/minimal-example.mdx @@ -85,7 +85,7 @@ This example showcases how to set up your environment, define a custom module, c Feel free to adapt and expand upon this example to suit your specific use case while exploring the extensive capabilities of DSPy. -If you want to try what you just built, run `optimized_cot.forward('Your Question Here')`. +If you want to try what you just built, run `optimized_cot(question='Your Question Here')`. *** From b2816c4a35e3144a06752423de1afb8e68e1005f Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Tue, 5 Mar 2024 23:33:11 +0530 Subject: [PATCH 086/243] Update code snippets in cheatsheet --- docs/docs/cheatsheet.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/docs/cheatsheet.md b/docs/docs/cheatsheet.md index 31ed316d0b..c5f11e0c42 100644 --- a/docs/docs/cheatsheet.md +++ b/docs/docs/cheatsheet.md @@ -101,7 +101,7 @@ You can choose only selected columns from the csv by specifying them in the argu ```python dolly_100_dataset = dl.from_csv( "dolly_subset_100_rows.csv", - fields=["instruction", "context", "response"], + fields=("instruction", "context", "response"), input_keys=("instruction", "context") ) ``` @@ -301,13 +301,15 @@ Other custom configurations are similar to customizing the `dspy.BootstrapFewSho ### dspy.Ensemble ```python -from dspy.teleprompt import BootstrapFewShotWithRandomSearch, Ensemble +from dspy.teleprompt import BootstrapFewShotWithRandomSearch +from dspy.teleprompt.ensemble import Ensemble fewshot_optimizer = BootstrapFewShotWithRandomSearch(metric=your_defined_metric, max_bootstrapped_demos=2, num_candidate_programs=8, num_threads=NUM_THREADS) your_dspy_program_compiled = fewshot_optimizer.compile(student = your_dspy_program, trainset=trainset, valset=devset) -ensemble_optimizer = dspy.Ensemble(reduce_fn=dspy.majority) -your_dspy_program_compiled_ensemble = ensemble_optimizer.compile(your_dspy_program_compiled.programs[:3]) +ensemble_optimizer = Ensemble(reduce_fn=dspy.majority) +programs = [x[-1] for x in your_dspy_program_compiled.candidate_programs] +your_dspy_program_compiled_ensemble = ensemble_optimizer.compile(programs[:3]) ``` ### dspy.BootstrapFinetune From e145529ece0edc9bd3080e456041a7cec08f548c Mon Sep 17 00:00:00 2001 From: quajak Date: Tue, 5 Mar 2024 14:30:12 -0500 Subject: [PATCH 087/243] Fix applymap deprecation warning --- dspy/evaluate/evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 27358ed4fa..50c1bcc21b 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -140,7 +140,7 @@ def wrapped_program(example_idx, example): df = pd.DataFrame(data) # Truncate every cell in the DataFrame - df = df.applymap(truncate_cell) + df = df.map(truncate_cell) # Rename the 'correct' column to the name of the metric object assert(callable(metric)) From 3ee75d82ec6c66839326ddf7dd7e1dad1cbb81a8 Mon Sep 17 00:00:00 2001 From: Sumedha Kucherlapati Date: Tue, 5 Mar 2024 11:34:11 -0800 Subject: [PATCH 088/243] Add Claude wrapper --- dsp/modules/anthropic.py | 129 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 dsp/modules/anthropic.py diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py new file mode 100644 index 0000000000..99c9f225a6 --- /dev/null +++ b/dsp/modules/anthropic.py @@ -0,0 +1,129 @@ +import os +import backoff +import json +from typing import Optional, Any +from anthropic import Anthropic, RateLimitError + +from dsp.modules.lm import LM +import logging + + +logger = logging.getLogger(__name__) + +BASE_URL = "https://api.anthropic.com/v1/messages" + + +def backoff_hdlr(details): + """Handler from https://pypi.org/project/backoff/""" + print( + "Backing off {wait:0.1f} seconds after {tries} tries " + "calling function {target} with kwargs " + "{kwargs}".format(**details), + ) + + +def giveup_hdlr(details): + """wrapper function that decides when to give up on retry""" + if "rate limits" in details.message: + return False + return True + + +class Claude(LM): + """Wrapper around anthropic's API. Supports both the Anthropic and Azure APIs.""" + def __init__( + self, + model: str = "claude-instant-1.2", + api_key: Optional[str] = None, + api_base: Optional[str] = None, + **kwargs + ): + super().__init__(model) + self.provider = "anthropic" + self.api_key = api_key = os.environ.get("ANTHROPIC_API_KEY") if api_key is None else api_key + self.api_base = BASE_URL if api_base is None else api_base + + self.kwargs = { + "temperature": 0.0 if "temperature" not in kwargs else kwargs["temperature"], + "max_tokens": min(kwargs.get("max_tokens", 4096), 4096), + "top_p": 1.0 if "top_p" not in kwargs else kwargs["top_p"], + "top_k": 1 if "top_k" not in kwargs else kwargs["top_k"], + "n": kwargs.pop("n", kwargs.pop("num_generations", 1)), + **kwargs, + } + self.kwargs["model"] = model + self.history: list[dict[str, Any]] = [] + self.client = Anthropic(api_key=api_key) + + def log_usage(self, response): + """Log the total tokens from the Anthropic API response.""" + usage_data = response.usage + if usage_data: + total_tokens = usage_data.input_tokens + usage_data.output_tokens + logger.info(f'{total_tokens}') + + def basic_request(self, prompt: str, **kwargs): + raw_kwargs = kwargs + + kwargs = {**self.kwargs, **kwargs} + # caching mechanism requires hashable kwargs + kwargs["messages"] = [{"role": "user", "content": prompt}] + kwargs.pop("n") + print(kwargs) + response = self.client.messages.create(**kwargs) + + history = { + "prompt": prompt, + "response": response, + "kwargs": kwargs, + "raw_kwargs": raw_kwargs, + } + self.history.append(history) + + return response + + @backoff.on_exception( + backoff.expo, + (RateLimitError), + max_time=1000, + max_tries=8, + on_backoff=backoff_hdlr, + giveup=giveup_hdlr, + ) + def request(self, prompt: str, **kwargs): + """Handles retrieval of completions from Anthropic whilst handling API errors""" + return self.basic_request(prompt, **kwargs) + + def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): + """Retrieves completions from Anthropic. + + Args: + prompt (str): prompt to send to Anthropic + only_completed (bool, optional): return only completed responses and ignores completion due to length. Defaults to True. + return_sorted (bool, optional): sort the completion choices using the returned probabilities. Defaults to False. + + Returns: + list[str]: list of completion choices + """ + + assert only_completed, "for now" + assert return_sorted is False, "for now" + + + # per eg here: https://docs.anthropic.com/claude/reference/messages-examples + # max tokens can be used as a proxy to return smaller responses + # so this cannot be a proper indicator for incomplete response unless it isnt the user-intent. + # if only_completed and response.stop_reason != "end_turn": + # choices = [] + + n = kwargs.pop("n", 1) + completions = [] + for i in range(n): + response = self.request(prompt, **kwargs) + # TODO: Log llm usage instead of hardcoded openai usage + # if dsp.settings.log_openai_usage: + # self.log_usage(response) + if only_completed and response.stop_reason == "max_tokens": + continue + completions = [c.text for c in response.content] + return completions \ No newline at end of file From 017252c5ed347c88805dc7e3d1af4183e68104e4 Mon Sep 17 00:00:00 2001 From: Sumedha Kucherlapati Date: Tue, 5 Mar 2024 11:39:09 -0800 Subject: [PATCH 089/243] project dependencies --- pyproject.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e2c719eaf2..878601f558 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.3.6" +version = "2.3.7" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] @@ -20,6 +20,7 @@ classifiers = [ ] # We have both project and tool.poetry.dependencies. Should we remove one? dependencies = [ + "anthropic~=0.18.0", "backoff~=2.2.1", "joblib~=1.3.2", "openai>=0.28.1,<2.0.0", @@ -74,6 +75,7 @@ keywords = ["dspy", "ai", "language models", "llm", "openai"] [tool.poetry.dependencies] python = ">=3.9,<3.12" pydantic = "2.5.0" +anthropic = "^0.18.0" backoff = "^2.2.1" joblib = "^1.3.2" openai = "^0.28.1" From 0cffb585fb68645ad63b0a134c1f0a90ac13941c Mon Sep 17 00:00:00 2001 From: Sumedha Kucherlapati Date: Tue, 5 Mar 2024 11:46:51 -0800 Subject: [PATCH 090/243] Adds support to call Claude from dsp modules. --- dsp/modules/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/modules/__init__.py b/dsp/modules/__init__.py index 241a349c58..06e1d6f074 100644 --- a/dsp/modules/__init__.py +++ b/dsp/modules/__init__.py @@ -13,3 +13,4 @@ from .pyserini import * from .sbert import * from .sentence_vectorizer import * +from .anthropic import Claude From 1f6e0cbe3dc669da55be42448d8df44ec6cdb7f5 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Tue, 5 Mar 2024 13:00:20 -0800 Subject: [PATCH 091/243] Removed (deactiviated) key --- examples/signature_opt2.ipynb | 1521 +-------------------------------- 1 file changed, 25 insertions(+), 1496 deletions(-) diff --git a/examples/signature_opt2.ipynb b/examples/signature_opt2.ipynb index 9edc0d4c4e..5fdbdf5d21 100644 --- a/examples/signature_opt2.ipynb +++ b/examples/signature_opt2.ipynb @@ -2,15 +2,15 @@ "cells": [ { "cell_type": "code", - "execution_count": 13, + "execution_count": 1, "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" + "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" ] } ], @@ -19,12 +19,12 @@ "%autoreload 2\n", "import dspy\n", "import os\n", - "os.environ['OPENAI_API_KEY'] = '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'" + "os.environ['OPENAI_API_KEY'] = 'sk-...'" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -35,7 +35,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -44,7 +44,7 @@ "(20, 50)" ] }, - "execution_count": 15, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -64,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -77,122 +77,36 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "All predictors:\n", - "named_predictors=[('predictor', Predict(BasicQA(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", - ")))]\n", - "Generating new signature for Predict(BasicQA(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", - "))...\n", - "Generated candidate 0:\n", - "StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.(0)'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", - ")\n", - "Generated candidate 1:\n", - "StringSignature(question -> answer\n", - " instructions='Answer series of questions where answers must share a common theme.(1)'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q1,', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'charge amplifier designed based by Moog on stripline technology', '__dspy_field_type': 'output', 'prefix': 'A1 Wolfgang Amplifier,'})\n", - ")\n", - "Generated candidate 2:\n", - "StringSignature(question -> answer\n", - " instructions='Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q-What Computability study ranges?', 'desc': 'may also involve strings produced by non-deterministic computation'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!', '__dspy_field_type': 'output', 'prefix': 'A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder'})\n", - ")\n", - "\n", - "================================================================================\n", - "Running eval iteration 0...\n", - "Installing signature 0: \n", - "StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.(0)'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 20 (25.0): 100%|██████████| 20/20 [00:00<00:00, 5144.18it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 20 (25.0%)\n", - "Scores for iteration 0: 28.55243378134378\n", - "\n", - "================================================================================\n", - "Running eval iteration 1...\n", - "Installing signature 1: \n", - "StringSignature(question -> answer\n", - " instructions='Answer series of questions where answers must share a common theme.(1)'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q1,', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'charge amplifier designed based by Moog on stripline technology', '__dspy_field_type': 'output', 'prefix': 'A1 Wolfgang Amplifier,'})\n", - ")\n" + "None\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 5 / 20 (25.0): 100%|██████████| 20/20 [00:00<00:00, 4207.98it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 20 (25.0%)\n", - "Scores for iteration 1: 29.911449495835697\n", - "\n", - "================================================================================\n", - "Running eval iteration 2...\n", - "Installing signature 2: \n", - "StringSignature(question -> answer\n", - " instructions='Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Q-What Computability study ranges?', 'desc': 'may also involve strings produced by non-deterministic computation'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!', '__dspy_field_type': 'output', 'prefix': 'A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder'})\n", - ")\n" + "/opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"signature\" shadows an attribute in parent \"Signature\"; \n", + " warnings.warn(\n" ] }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 20 (25.0): 100%|██████████| 20/20 [00:00<00:00, 4577.93it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 20 (25.0%)\n", - "Scores for iteration 2: 34.58455944537296\n", - "Generating new signature for predictor based on 3 previous signatures...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "ename": "TypeError", + "evalue": "Field 'signature' in 'GenerateInstructionGivenAttempts' must be declared with InputField or OutputField. field.json_schema_extra=None", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[5], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdspy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mevaluate\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmetrics\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m answer_exact_match\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdspy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mfunctional\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m TypedPredictor\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdspy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mteleprompt\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01msignature_opt2\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m optimize_signature\n\u001b[1;32m 6\u001b[0m evaluator \u001b[38;5;241m=\u001b[39m Evaluate(devset\u001b[38;5;241m=\u001b[39mdevset, metric\u001b[38;5;241m=\u001b[39manswer_exact_match, num_threads\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, display_progress\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 8\u001b[0m program \u001b[38;5;241m=\u001b[39m optimize_signature(\n\u001b[1;32m 9\u001b[0m student\u001b[38;5;241m=\u001b[39mTypedPredictor(BasicQA),\n\u001b[1;32m 10\u001b[0m evaluator\u001b[38;5;241m=\u001b[39mEvaluate(devset\u001b[38;5;241m=\u001b[39mtrainset, metric\u001b[38;5;241m=\u001b[39manswer_exact_match, num_threads\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, display_progress\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 14\u001b[0m prompt_model\u001b[38;5;241m=\u001b[39mgpt4,\n\u001b[1;32m 15\u001b[0m )\n", + "File \u001b[0;32m~/repos/dspy/dspy/teleprompt/signature_opt2.py:97\u001b[0m\n\u001b[1;32m 93\u001b[0m signature: T\n\u001b[1;32m 94\u001b[0m score: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m=\u001b[39m dspy\u001b[38;5;241m.\u001b[39mField(gt\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m, lt\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m100\u001b[39m)\n\u001b[0;32m---> 97\u001b[0m \u001b[38;5;28;43;01mclass\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;21;43;01mGenerateInstructionGivenAttempts\u001b[39;49;00m\u001b[43m(\u001b[49m\u001b[43mdspy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mSignature\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mGeneric\u001b[49m\u001b[43m[\u001b[49m\u001b[43mT\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 98\u001b[0m \u001b[38;5;250;43m \u001b[39;49m\u001b[38;5;124;43;03m\"\"\"You are an instruction optimizer for large language models.\u001b[39;49;00m\n\u001b[1;32m 99\u001b[0m \n\u001b[1;32m 100\u001b[0m \u001b[38;5;124;43;03m I will give some task instructions I've tried, along with their corresponding validation scores.\u001b[39;49;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;124;43;03m - Don't repeat instructions, descriptions and prefixes that have already been attempted.\u001b[39;49;00m\n\u001b[1;32m 105\u001b[0m \u001b[38;5;124;43;03m \"\"\"\u001b[39;49;00m\n\u001b[1;32m 107\u001b[0m \u001b[43m \u001b[49m\u001b[43msignature\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mT\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mdspy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mOutputField\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdesc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mProposed signature to try\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/repos/dspy/dspy/signatures/signature.py:48\u001b[0m, in \u001b[0;36mSignatureMeta.__new__\u001b[0;34m(mcs, signature_name, bases, namespace, **kwargs)\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__doc__\u001b[39m \u001b[38;5;241m=\u001b[39m _default_instructions(\u001b[38;5;28mcls\u001b[39m)\n\u001b[1;32m 47\u001b[0m \u001b[38;5;66;03m# Ensure all fields are declared with InputField or OutputField\u001b[39;00m\n\u001b[0;32m---> 48\u001b[0m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_validate_fields\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 50\u001b[0m \u001b[38;5;66;03m# Ensure all fields have a prefix\u001b[39;00m\n\u001b[1;32m 51\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m name, field \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_fields\u001b[38;5;241m.\u001b[39mitems():\n", + "File \u001b[0;32m~/repos/dspy/dspy/signatures/signature.py:65\u001b[0m, in \u001b[0;36mSignatureMeta._validate_fields\u001b[0;34m(cls)\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m field_type \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput\u001b[39m\u001b[38;5;124m\"\u001b[39m]:\n\u001b[1;32m 64\u001b[0m \u001b[38;5;28mprint\u001b[39m(field\u001b[38;5;241m.\u001b[39mjson_schema_extra)\n\u001b[0;32m---> 65\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 66\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mField \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m in \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m must be declared with InputField or OutputField. \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfield\u001b[38;5;241m.\u001b[39mjson_schema_extra\u001b[38;5;132;01m=}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 67\u001b[0m )\n", + "\u001b[0;31mTypeError\u001b[0m: Field 'signature' in 'GenerateInstructionGivenAttempts' must be declared with InputField or OutputField. field.json_schema_extra=None" ] } ], @@ -216,7 +130,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -305,1391 +219,6 @@ "source": [ "gpt4.inspect_history(n=3)" ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'tp' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[19], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtp\u001b[49m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__dict__\u001b[39m\n", - "\u001b[0;31mNameError\u001b[0m: name 'tp' is not defined" - ] - } - ], - "source": [ - "tp.__dict__" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "module.predictors()=[]\n", - "\n", - "================================================================================\n", - "Running eval iteration 0...\n", - "list: []\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8900>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fe20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5fa60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ef20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632f240>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6020>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848af20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e3060>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13ab60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e8e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ee80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001f80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4e00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ff60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x10632efc0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5b20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488f40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0720>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafec0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488540>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e160>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e700>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe34c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0d60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e980>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e160>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5d00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafd80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2fc0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafd80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f560>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe09a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3100>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5fba0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e160>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a4b80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe37e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2f20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0b80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5ee0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f1a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba87c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1760>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe00e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafec0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0360>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a0020c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f6a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fb00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f600>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a000720>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632efc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fb00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0cc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584887c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f9c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2660>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0c20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f1a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4e00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158489d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1e40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2ac0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0720>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001f80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ede0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ee80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2ca0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe28e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e020>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b4900>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a4b80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2160>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0220>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0e00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a4b80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001f80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e8e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3b00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f880>, 'parser': })\n", - ")\n", - "StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0900>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0400>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b4900>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13afc0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe34c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe23e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b47c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ec00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5fd80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3560>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe07c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5e700>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158536840>, 'parser': })\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5483.04it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Scores for iteration 0: 32.0\n", - "\n", - "================================================================================\n", - "Running eval iteration 1...\n", - "list: []\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585367a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1391c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e3060>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2f20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5b20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488540>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002020>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f240>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe09a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f060>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bce0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1391c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe02c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848af20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c139da0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5ec00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5fd80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158536840>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6020>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x10632efc0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bce0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5f880>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f420>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489f80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a000720>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c139da0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158536840>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6660>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158489d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e840>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5d9e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1260>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f9c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba8900>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5ee0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2fc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632efc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848be20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e700>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1b20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e160>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba89a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848a520>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a7240>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe23e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0ea0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158536840>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159a5e980>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ede0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe07c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f880>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489d00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5a80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0e00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1f80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5ee0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5f9c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2160>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0fe0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3f60>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159a5ef20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848a520>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585367a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0360>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2160>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5ee0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafd80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba8900>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe22a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16ae719e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b68e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe11c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2660>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0720>, 'parser': })\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Created templateCreated template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632d120>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb6ac0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138720>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafc40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x10632ede0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138c20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c139580>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489d00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002660>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848be20>, 'parser': })\n", - ")\n", - " Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3f60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0720>, 'parser': })\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4371.34it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Scores for iteration 1: 32.0\n", - "\n", - "================================================================================\n", - "Running eval iteration 2...\n", - "list: []\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13afc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5a80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafe20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b47c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb5620>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba82c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a4b80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138b80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b240>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bc40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b600>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5080>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0360>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafa60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b68e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafec0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0b80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13afc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138040>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002160>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488ea0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13af20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5d00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba89a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb4220>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2160>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb47c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6020>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584887c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0180>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584885e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6e80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafe20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2200>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3380>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8400>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c1389a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3d80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0540>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138c20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6e80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafc40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1ee0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2340>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2ca0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba89a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0680>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1a80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2c00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba89a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b7ba0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489260>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bcb47c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138b80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b42c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a001da0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5a80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c139580>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe02c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13b240>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002660>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848a3e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0360>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848b9c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafd80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a5940>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b600>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3ba0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c1394e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a6e80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafa60>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bcb7920>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe13a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2340>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b47c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002660>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a7240>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138040>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2d40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe32e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13bba0>, 'parser': })\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba82c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4680>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2d40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b6b60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002020>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3880>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe22a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1e40>, 'parser': })\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5230.07it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Scores for iteration 2: 32.0\n", - "\n", - "================================================================================\n", - "Running eval iteration 3...\n", - "list: []\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c13aca0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6e80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafe20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b62a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488ea0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584885e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba89a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a001da0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138180>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3d80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13aca0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002520>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafe20>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158489f80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1120>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b62a0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002020>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1a80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe19e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5940>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8900>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b49a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0540>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138c20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0360>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bce0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b62a0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13f4a4ea0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138040>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138180>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13a002160>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b47c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488ea0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1080>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb6c00>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0360>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158489f80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1585b6b60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a6e80>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0720>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138180>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbafb00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb68e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe3d80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1584e0360>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bc40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b4680>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13a002160>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1da0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1d00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138180>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba87c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x158488540>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e1760>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe1620>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848b9c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13bbaff60>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138ae0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2340>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0a40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba87c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x15848bc40>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848af20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe2520>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1da0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe23e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584885e0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbaff60>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x13c138ae0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe05e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0fe0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x16aba80e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0540>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3560>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe11c0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe34c0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe1b20>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b6b60>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x16aba8900>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe3ce0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0e00>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x159fe0220>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe2840>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x1585b68e0>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x158488f40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13f4a5080>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0cc0>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c138040>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x159fe0680>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13c13b240>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x1584e0b80>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bcb5620>, 'parser': })\n", - ")\n", - "Created template Template(Answer questions with short factoid answers., ['Question:', 'Answer:'])\n", - "From Signature StringSignature(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}', 'format': . at 0x15848bc40>})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:', 'format': . at 0x13bbafa60>, 'parser': })\n", - ")\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4131.18it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Scores for iteration 3: 32.0\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 63, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "optimizer.compile(TypedPredictor(BasicQA), evaluator, n_iterations=4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "gpt4.inspect_history(n=4)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Feel free to any other queries you like." - ] } ], "metadata": { From 687d4b0b6171f794b7ad493b7576d14c2f1d6ec7 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 15:59:23 -0600 Subject: [PATCH 092/243] Pull from original fork repo --- .github/workflows/run_tests.yml | 8 +-- dspy/evaluate/evaluate.py | 105 +++++++++++++++++++++----------- 2 files changed, 69 insertions(+), 44 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 6b4221ade7..76cea5cb60 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -16,7 +16,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v5 - uses: chartboost/ruff-action@v1 with: @@ -33,8 +33,6 @@ jobs: python-version: ["3.9"] steps: - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - name: Load cached Poetry installation id: cached-poetry uses: actions/cache@v3 @@ -66,8 +64,6 @@ jobs: python-version: ["3.9"] steps: - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - name: Load cached Poetry installation id: cached-poetry uses: actions/cache@v3 @@ -99,8 +95,6 @@ jobs: python-version: ["3.9"] steps: - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - name: Load cached Poetry installation id: cached-poetry uses: actions/cache@v3 diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 714a617dc7..d83574477c 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -23,8 +23,18 @@ class Evaluate: - def __init__(self, *, devset, metric=None, num_threads=1, display_progress=False, - display_table=False, display=True, max_errors=5, return_outputs=False): + def __init__( + self, + *, + devset, + metric=None, + num_threads=1, + display_progress=False, + display_table=False, + display=True, + max_errors=5, + return_outputs=False, + ): self.devset = devset self.metric = metric self.num_threads = num_threads @@ -40,7 +50,7 @@ def _execute_single_thread(self, wrapped_program, devset, display_progress): ncorrect = 0 ntotal = 0 reordered_devset = [] - + pbar = tqdm.tqdm(total=len(devset), dynamic_ncols=True, disable=not display_progress) for idx, arg in devset: example_idx, example, prediction, score = wrapped_program(idx, arg) @@ -49,14 +59,14 @@ def _execute_single_thread(self, wrapped_program, devset, display_progress): ntotal += 1 self._update_progress(pbar, ncorrect, ntotal) pbar.close() - + return reordered_devset, ncorrect, ntotal def _execute_multi_thread(self, wrapped_program, devset, num_threads, display_progress): ncorrect = 0 ntotal = 0 reordered_devset = [] - + with ThreadPoolExecutor(max_workers=num_threads) as executor: futures = {executor.submit(wrapped_program, idx, arg) for idx, arg in devset} pbar = tqdm.tqdm(total=len(devset), dynamic_ncols=True, disable=not display_progress) @@ -75,9 +85,18 @@ def _update_progress(self, pbar, ncorrect, ntotal): pbar.set_description(f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)})") pbar.update() - def __call__(self, program, metric=None, devset=None, num_threads=None, - display_progress=None, display_table=None, display=None, - return_all_scores=False, return_outputs=False): + def __call__( + self, + program, + metric=None, + devset=None, + num_threads=None, + display_progress=None, + display_table=None, + display=None, + return_all_scores=False, + return_outputs=False, + ): metric = metric if metric is not None else self.metric devset = devset if devset is not None else self.devset num_threads = num_threads if num_threads is not None else self.num_threads @@ -89,7 +108,7 @@ def __call__(self, program, metric=None, devset=None, num_threads=None, display_table = display_table if display else False return_outputs = return_outputs if return_outputs is not False else self.return_outputs results = [] - + def wrapped_program(example_idx, example): # NOTE: TODO: Won't work if threads create threads! creating_new_thread = threading.get_ident() not in dsp.settings.stack_by_thread @@ -101,14 +120,16 @@ def wrapped_program(example_idx, example): try: prediction = program(**example.inputs()) - score = metric(example, prediction) # FIXME: TODO: What's the right order? Maybe force name-based kwargs! - + score = metric( + example, prediction + ) # FIXME: TODO: What's the right order? Maybe force name-based kwargs! + # increment assert and suggest failures to program's attributes - if hasattr(program, '_assert_failures'): + if hasattr(program, "_assert_failures"): program._assert_failures += dsp.settings.assert_failures - if hasattr(program, '_suggest_failures'): + if hasattr(program, "_suggest_failures"): program._suggest_failures += dsp.settings.suggest_failures - + return example_idx, example, prediction, score except Exception as e: with self.error_lock: @@ -127,7 +148,9 @@ def wrapped_program(example_idx, example): if num_threads == 1: reordered_devset, ncorrect, ntotal = self._execute_single_thread(wrapped_program, devset, display_progress) else: - reordered_devset, ncorrect, ntotal = self._execute_multi_thread(wrapped_program, devset, num_threads, display_progress) + reordered_devset, ncorrect, ntotal = self._execute_multi_thread( + wrapped_program, devset, num_threads, display_progress + ) if return_outputs: # Handle the return_outputs logic results = [(example, prediction, score) for _, example, prediction, score in reordered_devset] @@ -137,7 +160,9 @@ def wrapped_program(example_idx, example): predicted_devset = sorted(reordered_devset) # data = [{**example, **prediction, 'correct': score} for example, prediction, score in zip(reordered_devset, preds, scores)] - data = [merge_dicts(example, prediction) | {'correct': score} for _, example, prediction, score in predicted_devset] + data = [ + merge_dicts(example, prediction) | {"correct": score} for _, example, prediction, score in predicted_devset + ] df = pd.DataFrame(data) @@ -145,9 +170,9 @@ def wrapped_program(example_idx, example): df = df.applymap(truncate_cell) # Rename the 'correct' column to the name of the metric object - assert(callable(metric)) + assert callable(metric) metric_name = metric.__name__ if isinstance(metric, types.FunctionType) else metric.__class__.__name__ - df.rename(columns={'correct': metric_name}, inplace=True) + df.rename(columns={"correct": metric_name}, inplace=True) if display_table: if isinstance(display_table, int): @@ -158,23 +183,23 @@ def wrapped_program(example_idx, example): truncated_rows = 0 styled_df = configure_dataframe_display(df_to_display, metric_name) - + ipython_display(styled_df) if truncated_rows > 0: # Simplified message about the truncated rows message = f"""
... {truncated_rows} more rows not displayed ...
""" ipython_display(HTML(message)) - + if return_all_scores and return_outputs: return round(100 * ncorrect / ntotal, 2), results, [score for *_, score in reordered_devset] elif return_all_scores: @@ -206,28 +231,34 @@ def truncate_cell(content): """Truncate content of a cell to 25 words.""" words = str(content).split() if len(words) > 25: - return ' '.join(words[:25]) + '...' + return " ".join(words[:25]) + "..." return content + def configure_dataframe_display(df, metric_name): """Set various pandas display options for DataFrame.""" pd.options.display.max_colwidth = None - pd.set_option('display.max_colwidth', 20) # Adjust the number as needed - pd.set_option('display.width', 400) # Adjust + pd.set_option("display.max_colwidth", 20) # Adjust the number as needed + pd.set_option("display.width", 400) # Adjust # df[metric_name] = df[metric_name].apply(lambda x: f'✔️ [{x}]' if x is True else f'❌ [{x}]') - df.loc[:, metric_name] = df[metric_name].apply(lambda x: f'✔️ [{x}]' if x is True else f'{x}') + df.loc[:, metric_name] = df[metric_name].apply(lambda x: f"✔️ [{x}]" if x is True else f"{x}") # Return styled DataFrame - return df.style.set_table_styles([ - {'selector': 'th', 'props': [('text-align', 'left')]}, - {'selector': 'td', 'props': [('text-align', 'left')]}, - ]).set_properties(**{ - 'text-align': 'left', - 'white-space': 'pre-wrap', - 'word-wrap': 'break-word', - 'max-width': '400px', - }) + return df.style.set_table_styles( + [ + {"selector": "th", "props": [("text-align", "left")]}, + {"selector": "td", "props": [("text-align", "left")]}, + ] + ).set_properties( + **{ + "text-align": "left", + "white-space": "pre-wrap", + "word-wrap": "break-word", + "max-width": "400px", + } + ) + # FIXME: TODO: The merge_dicts stuff above is way too quick and dirty. # TODO: the display_table can't handle False but can handle 0! Not sure how it works with True exactly, probably fails too. From d75ef1d4197e158cf561c782beef0f618a2b717a Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:21:36 -0600 Subject: [PATCH 093/243] make CI fail on fix --- .github/workflows/run_tests.yml | 5 +---- dspy/evaluate/evaluate.py | 39 +++++++++++++++++++++------------ 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 76cea5cb60..98a6cec9ea 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -20,10 +20,7 @@ jobs: - uses: actions/setup-python@v5 - uses: chartboost/ruff-action@v1 with: - args: --fix-only - - uses: stefanzweifel/git-auto-commit-action@v5 - with: - commit_message: "Automatic Style fixes" + args: --fix-only --exit-non-zero-on-fix test: name: Run Tests diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index d83574477c..2592248ab1 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -1,17 +1,17 @@ import threading import types +import dsp -import pandas as pd import tqdm +import pandas as pd -import dsp try: from IPython.display import HTML from IPython.display import display as ipython_display except ImportError: ipython_display = print - HTML = lambda x: x + def HTML(x): return x from concurrent.futures import ThreadPoolExecutor, as_completed from dsp.evaluation.utils import * @@ -51,7 +51,8 @@ def _execute_single_thread(self, wrapped_program, devset, display_progress): ntotal = 0 reordered_devset = [] - pbar = tqdm.tqdm(total=len(devset), dynamic_ncols=True, disable=not display_progress) + pbar = tqdm.tqdm(total=len(devset), dynamic_ncols=True, + disable=not display_progress) for idx, arg in devset: example_idx, example, prediction, score = wrapped_program(idx, arg) reordered_devset.append((example_idx, example, prediction, score)) @@ -68,12 +69,15 @@ def _execute_multi_thread(self, wrapped_program, devset, num_threads, display_pr reordered_devset = [] with ThreadPoolExecutor(max_workers=num_threads) as executor: - futures = {executor.submit(wrapped_program, idx, arg) for idx, arg in devset} - pbar = tqdm.tqdm(total=len(devset), dynamic_ncols=True, disable=not display_progress) + futures = {executor.submit(wrapped_program, idx, arg) + for idx, arg in devset} + pbar = tqdm.tqdm(total=len(devset), dynamic_ncols=True, + disable=not display_progress) for future in as_completed(futures): example_idx, example, prediction, score = future.result() - reordered_devset.append((example_idx, example, prediction, score)) + reordered_devset.append( + (example_idx, example, prediction, score)) ncorrect += score ntotal += 1 self._update_progress(pbar, ncorrect, ntotal) @@ -82,7 +86,8 @@ def _execute_multi_thread(self, wrapped_program, devset, num_threads, display_pr return reordered_devset, ncorrect, ntotal def _update_progress(self, pbar, ncorrect, ntotal): - pbar.set_description(f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)})") + pbar.set_description( + f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)})") pbar.update() def __call__( @@ -113,7 +118,8 @@ def wrapped_program(example_idx, example): # NOTE: TODO: Won't work if threads create threads! creating_new_thread = threading.get_ident() not in dsp.settings.stack_by_thread if creating_new_thread: - dsp.settings.stack_by_thread[threading.get_ident()] = list(dsp.settings.main_stack) + dsp.settings.stack_by_thread[threading.get_ident()] = list( + dsp.settings.main_stack) # print(threading.get_ident(), dsp.settings.stack_by_thread[threading.get_ident()]) # print(type(example), example) @@ -146,16 +152,19 @@ def wrapped_program(example_idx, example): devset = list(enumerate(devset)) if num_threads == 1: - reordered_devset, ncorrect, ntotal = self._execute_single_thread(wrapped_program, devset, display_progress) + reordered_devset, ncorrect, ntotal = self._execute_single_thread( + wrapped_program, devset, display_progress) else: reordered_devset, ncorrect, ntotal = self._execute_multi_thread( wrapped_program, devset, num_threads, display_progress ) if return_outputs: # Handle the return_outputs logic - results = [(example, prediction, score) for _, example, prediction, score in reordered_devset] + results = [(example, prediction, score) + for _, example, prediction, score in reordered_devset] if display: - print(f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)}%)") + print( + f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)}%)") predicted_devset = sorted(reordered_devset) @@ -171,7 +180,8 @@ def wrapped_program(example_idx, example): # Rename the 'correct' column to the name of the metric object assert callable(metric) - metric_name = metric.__name__ if isinstance(metric, types.FunctionType) else metric.__class__.__name__ + metric_name = metric.__name__ if isinstance( + metric, types.FunctionType) else metric.__class__.__name__ df.rename(columns={"correct": metric_name}, inplace=True) if display_table: @@ -242,7 +252,8 @@ def configure_dataframe_display(df, metric_name): pd.set_option("display.width", 400) # Adjust # df[metric_name] = df[metric_name].apply(lambda x: f'✔️ [{x}]' if x is True else f'❌ [{x}]') - df.loc[:, metric_name] = df[metric_name].apply(lambda x: f"✔️ [{x}]" if x is True else f"{x}") + df.loc[:, metric_name] = df[metric_name].apply( + lambda x: f"✔️ [{x}]" if x is True else f"{x}") # Return styled DataFrame return df.style.set_table_styles( From 2c513956da0543f34ef68f1312d6ddac7e328dd4 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:28:34 -0600 Subject: [PATCH 094/243] Add error handling and comment for formatting issues --- .github/workflows/run_tests.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 98a6cec9ea..03198af736 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -21,6 +21,16 @@ jobs: - uses: chartboost/ruff-action@v1 with: args: --fix-only --exit-non-zero-on-fix + continue-on-error: true # Continue to next step if this step fails + - name: Check outcome + if: ${{ failure() && github.event_name == 'pull_request' }} + uses: actions/github-script@v6 + with: + script: | + const issue_number = context.issue.number; + const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; + const comment = { owner: context.repo.owner, repo: context.repo.repo, issue_number: issue_number, body: message }; + github.rest.issues.createComment(comment); test: name: Run Tests From 2235a6a8b22341528ac03a4347c169026fc66479 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:31:06 -0600 Subject: [PATCH 095/243] Add write permissions for pull requests in run_tests workflow --- .github/workflows/run_tests.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 03198af736..2fe01c9fe3 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -15,6 +15,7 @@ jobs: runs-on: ubuntu-latest permissions: contents: write + pull-requests: write steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -29,8 +30,13 @@ jobs: script: | const issue_number = context.issue.number; const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - const comment = { owner: context.repo.owner, repo: context.repo.repo, issue_number: issue_number, body: message }; - github.rest.issues.createComment(comment); + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + body: message + }); + core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); test: name: Run Tests From d3b887e0adcf8569b35a4e600b77c5a7cdf076c3 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:33:52 -0600 Subject: [PATCH 096/243] Fail on any error --- .github/workflows/run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 2fe01c9fe3..83c11eb10a 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -24,7 +24,7 @@ jobs: args: --fix-only --exit-non-zero-on-fix continue-on-error: true # Continue to next step if this step fails - name: Check outcome - if: ${{ failure() && github.event_name == 'pull_request' }} + if: ${{ failure() }} uses: actions/github-script@v6 with: script: | From c17c14f1118c9406cbbb2775a5d222c39a343e3c Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:41:05 -0600 Subject: [PATCH 097/243] separate comment --- .github/workflows/run_tests.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 83c11eb10a..2e9451d391 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -23,8 +23,14 @@ jobs: with: args: --fix-only --exit-non-zero-on-fix continue-on-error: true # Continue to next step if this step fails - - name: Check outcome - if: ${{ failure() }} + + comment_if_ruff_fail: + name: Comment on Ruff Failure + runs-on: ubuntu-latest + needs: fix + if: ${{ failure() && needs.fix.result == 'failure' }} + steps: + - name: Comment on Ruff Failure uses: actions/github-script@v6 with: script: | @@ -37,7 +43,6 @@ jobs: body: message }); core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); - test: name: Run Tests runs-on: ubuntu-latest From f9de939a0f8733757e01965e972c36cb43a62928 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:41:25 -0600 Subject: [PATCH 098/243] separate comment remove continue --- .github/workflows/run_tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 2e9451d391..eca11aa488 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -22,7 +22,6 @@ jobs: - uses: chartboost/ruff-action@v1 with: args: --fix-only --exit-non-zero-on-fix - continue-on-error: true # Continue to next step if this step fails comment_if_ruff_fail: name: Comment on Ruff Failure From 89f14c4793740a7e6359b9ee6d026c09cc4b97c3 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:42:56 -0600 Subject: [PATCH 099/243] Allow PR comment write --- .github/workflows/run_tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index eca11aa488..2b066b2400 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -26,6 +26,9 @@ jobs: comment_if_ruff_fail: name: Comment on Ruff Failure runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write needs: fix if: ${{ failure() && needs.fix.result == 'failure' }} steps: From a5d95ddabc80ff309e140d83e4bb485fbd031733 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:57:48 -0600 Subject: [PATCH 100/243] try echo syntax --- .github/workflows/run_tests.yml | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 2b066b2400..668d5e53de 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -32,19 +32,20 @@ jobs: needs: fix if: ${{ failure() && needs.fix.result == 'failure' }} steps: - - name: Comment on Ruff Failure - uses: actions/github-script@v6 - with: - script: | - const issue_number = context.issue.number; - const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issue_number, - body: message - }); - core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); + - run: echo ''It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' + # - name: Comment on Ruff Failure + # uses: actions/github-script@v6 + # with: + # script: | + # const issue_number = context.issue.number; + # const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; + # await github.rest.issues.createComment({ + # owner: context.repo.owner, + # repo: context.repo.repo, + # issue_number: issue_number, + # body: message + # }); + # core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); test: name: Run Tests runs-on: ubuntu-latest From c0cc3bc2b29de899aa9a81d5880a4bd8a68e6185 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 16:58:56 -0600 Subject: [PATCH 101/243] fix echo syntax --- .github/workflows/run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 668d5e53de..27ba69cbab 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -32,7 +32,7 @@ jobs: needs: fix if: ${{ failure() && needs.fix.result == 'failure' }} steps: - - run: echo ''It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' + - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' # - name: Comment on Ruff Failure # uses: actions/github-script@v6 # with: From c800613262072d98496832c8534acde76d7ced33 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:09:22 -0600 Subject: [PATCH 102/243] Move to separate file to run with elevated perms --- .github/workflows/run_fix.yml | 37 +++++++++++++++++++++++++++++++++ .github/workflows/run_tests.yml | 36 -------------------------------- 2 files changed, 37 insertions(+), 36 deletions(-) create mode 100644 .github/workflows/run_fix.yml diff --git a/.github/workflows/run_fix.yml b/.github/workflows/run_fix.yml new file mode 100644 index 0000000000..c4050276de --- /dev/null +++ b/.github/workflows/run_fix.yml @@ -0,0 +1,37 @@ +on: pull_request_target + +jobs: + fix: + name: Apply Ruff Fix + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + with: + args: --fix-only --exit-non-zero-on-fix + + comment_if_ruff_fail: + name: Comment on Ruff Failure + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + needs: fix + if: ${{ failure() && needs.fix.result == 'failure' }} + steps: + - name: Comment on Ruff Failure + uses: actions/github-script@v6 + with: + script: | + const issue_number = context.issue.number; + const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + body: message + }); + core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 27ba69cbab..fb979cf777 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -10,42 +10,6 @@ env: POETRY_VERSION: "1.6.1" jobs: - fix: - name: Apply Ruff Fix - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - uses: chartboost/ruff-action@v1 - with: - args: --fix-only --exit-non-zero-on-fix - - comment_if_ruff_fail: - name: Comment on Ruff Failure - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - needs: fix - if: ${{ failure() && needs.fix.result == 'failure' }} - steps: - - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' - # - name: Comment on Ruff Failure - # uses: actions/github-script@v6 - # with: - # script: | - # const issue_number = context.issue.number; - # const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - # await github.rest.issues.createComment({ - # owner: context.repo.owner, - # repo: context.repo.repo, - # issue_number: issue_number, - # body: message - # }); - # core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); test: name: Run Tests runs-on: ubuntu-latest From ea645221635009d733d32c937354d4d5c5e5a5c8 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:11:33 -0600 Subject: [PATCH 103/243] Give name to workflow --- .github/workflows/run_fix.yml | 2 ++ .github/workflows/run_tests.yml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run_fix.yml b/.github/workflows/run_fix.yml index c4050276de..163461c6d0 100644 --- a/.github/workflows/run_fix.yml +++ b/.github/workflows/run_fix.yml @@ -1,3 +1,5 @@ +name: Fix and comment on Ruff failure + on: pull_request_target jobs: diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index fb979cf777..75d45b3248 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,4 +1,4 @@ -name: Fix, Test, and Build +name: Test and Build on: push: From 83cddf260e533e3a5870efe5824a474b634f5886 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:21:27 -0600 Subject: [PATCH 104/243] Recombine into one file, but separate by action type --- .github/workflows/run_fix.yml | 39 --------------------------- .github/workflows/run_tests.yml | 48 ++++++++++++++++++++++++++++++--- 2 files changed, 44 insertions(+), 43 deletions(-) delete mode 100644 .github/workflows/run_fix.yml diff --git a/.github/workflows/run_fix.yml b/.github/workflows/run_fix.yml deleted file mode 100644 index 163461c6d0..0000000000 --- a/.github/workflows/run_fix.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Fix and comment on Ruff failure - -on: pull_request_target - -jobs: - fix: - name: Apply Ruff Fix - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - steps: - - uses: actions/checkout@v4 - - uses: chartboost/ruff-action@v1 - with: - args: --fix-only --exit-non-zero-on-fix - - comment_if_ruff_fail: - name: Comment on Ruff Failure - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - needs: fix - if: ${{ failure() && needs.fix.result == 'failure' }} - steps: - - name: Comment on Ruff Failure - uses: actions/github-script@v6 - with: - script: | - const issue_number = context.issue.number; - const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issue_number, - body: message - }); - core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 75d45b3248..24e617189b 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,21 +1,59 @@ -name: Test and Build +name: Fix, Test, and Build on: - push: - branches: - - main pull_request: + types: [opened, synchronize, reopened] + pull_request_target: + types: [opened, synchronize, reopened] env: POETRY_VERSION: "1.6.1" jobs: + fix: + name: Apply Ruff Fix + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + if: github.event_name == 'pull_request_target' + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - uses: chartboost/ruff-action@v1 + with: + args: --fix-only --exit-non-zero-on-fix + + comment_if_ruff_fail: + name: Comment on Ruff Failure + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + needs: fix + if: ${{ failure() && needs.fix.result == 'failure' && github.event_name == 'pull_request_target' }} + steps: + # - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' + - name: Comment on Ruff Failure + uses: actions/github-script@v6 + with: + script: | + const issue_number = context.issue.number; + const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + body: message + }); + core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); test: name: Run Tests runs-on: ubuntu-latest strategy: matrix: python-version: ["3.9"] + if: github.event_name == 'pull_request' steps: - uses: actions/checkout@v4 - name: Load cached Poetry installation @@ -47,6 +85,7 @@ jobs: strategy: matrix: python-version: ["3.9"] + if: github.event_name == 'pull_request' steps: - uses: actions/checkout@v4 - name: Load cached Poetry installation @@ -78,6 +117,7 @@ jobs: strategy: matrix: python-version: ["3.9"] + if: github.event_name == 'pull_request' steps: - uses: actions/checkout@v4 - name: Load cached Poetry installation From f193549e63ebb09e14a0ad5fe6efc0e98391e1a0 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:31:44 -0600 Subject: [PATCH 105/243] change target branch --- .github/workflows/run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 24e617189b..3a3b298f2a 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -11,7 +11,7 @@ env: jobs: fix: - name: Apply Ruff Fix + name: Apply Ruff Fix - TESTING DEMO DEMO DEMO runs-on: ubuntu-latest permissions: contents: write From b50b669af813b60fad3c663cb4f108231c594e45 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:33:52 -0600 Subject: [PATCH 106/243] Purposely create ruff errors --- .github/workflows/run_tests.yml | 2 +- dspy/evaluate/evaluate.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 3a3b298f2a..24e617189b 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -11,7 +11,7 @@ env: jobs: fix: - name: Apply Ruff Fix - TESTING DEMO DEMO DEMO + name: Apply Ruff Fix runs-on: ubuntu-latest permissions: contents: write diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 2592248ab1..66f99d75d3 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -1,10 +1,10 @@ -import threading import types import dsp import tqdm import pandas as pd +import threading try: from IPython.display import HTML From 5d2853c12725adca2fb020c2673a71c0e3a2e38c Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:37:03 -0600 Subject: [PATCH 107/243] Add console.log --- .github/workflows/run_tests.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 24e617189b..ee9456d27d 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -16,6 +16,8 @@ jobs: permissions: contents: write pull-requests: write + # Note that 'pull_request_target' has higher permissions than 'pull_request'\ + # Do not change any events that run arbitrary code to use 'pull_request_target' if: github.event_name == 'pull_request_target' steps: - uses: actions/checkout@v4 @@ -31,6 +33,8 @@ jobs: contents: read pull-requests: write needs: fix + # Note that 'pull_request_target' has higher permissions than 'pull_request'\ + # Do not change any events that run arbitrary code to use 'pull_request_target' if: ${{ failure() && needs.fix.result == 'failure' && github.event_name == 'pull_request_target' }} steps: # - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' @@ -46,7 +50,7 @@ jobs: issue_number: issue_number, body: message }); - core.setFailed("Automated Ruff fixes failed. A comment has been posted on the PR."); + console.log('Commented on the issue.'); test: name: Run Tests runs-on: ubuntu-latest From af749070d704a7eccc8aebe112c4e095885c092f Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 17:41:45 -0600 Subject: [PATCH 108/243] Split into separate workflow --- .github/workflows/ruff_lint.yml | 51 +++++++++++++++++++++++++++++++++ .github/workflows/run_tests.yml | 43 --------------------------- 2 files changed, 51 insertions(+), 43 deletions(-) create mode 100644 .github/workflows/ruff_lint.yml diff --git a/.github/workflows/ruff_lint.yml b/.github/workflows/ruff_lint.yml new file mode 100644 index 0000000000..0834dd4e6a --- /dev/null +++ b/.github/workflows/ruff_lint.yml @@ -0,0 +1,51 @@ +name: Lint + +on: + pull_request_target: + types: [opened, synchronize, reopened] + +env: + POETRY_VERSION: "1.6.1" + +jobs: + fix: + name: Apply Ruff Fix + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + # Note that 'pull_request_target' has higher permissions than 'pull_request'\ + # Do not change any events that run arbitrary code to use 'pull_request_target' + if: github.event_name == 'pull_request_target' + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - uses: chartboost/ruff-action@v1 + with: + args: --fix-only --exit-non-zero-on-fix + + comment_if_ruff_fail: + name: Comment on Ruff Failure + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + needs: fix + # Note that 'pull_request_target' has higher permissions than 'pull_request'\ + # Do not change any events that run arbitrary code to use 'pull_request_target' + if: ${{ failure() && needs.fix.result == 'failure' && github.event_name == 'pull_request_target' }} + steps: + # - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' + - name: Comment on Ruff Failure + uses: actions/github-script@v6 + with: + script: | + const issue_number = context.issue.number; + const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue_number, + body: message + }); + console.log('Commented on the issue.'); diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index ee9456d27d..15da587b4e 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -3,54 +3,11 @@ name: Fix, Test, and Build on: pull_request: types: [opened, synchronize, reopened] - pull_request_target: - types: [opened, synchronize, reopened] env: POETRY_VERSION: "1.6.1" jobs: - fix: - name: Apply Ruff Fix - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - # Note that 'pull_request_target' has higher permissions than 'pull_request'\ - # Do not change any events that run arbitrary code to use 'pull_request_target' - if: github.event_name == 'pull_request_target' - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - uses: chartboost/ruff-action@v1 - with: - args: --fix-only --exit-non-zero-on-fix - - comment_if_ruff_fail: - name: Comment on Ruff Failure - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - needs: fix - # Note that 'pull_request_target' has higher permissions than 'pull_request'\ - # Do not change any events that run arbitrary code to use 'pull_request_target' - if: ${{ failure() && needs.fix.result == 'failure' && github.event_name == 'pull_request_target' }} - steps: - # - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' - - name: Comment on Ruff Failure - uses: actions/github-script@v6 - with: - script: | - const issue_number = context.issue.number; - const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issue_number, - body: message - }); - console.log('Commented on the issue.'); test: name: Run Tests runs-on: ubuntu-latest From e659fc2c001dc4461a931f43a7a070bc17157a6a Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 19:31:22 -0600 Subject: [PATCH 109/243] Revert "Fix return_all_scores in evaluate" This reverts commit 0f39a433c351b0fc3f9d5593374c57324eefb79e. --- dspy/evaluate/evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 66f99d75d3..b037153ad2 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -211,7 +211,7 @@ def wrapped_program(example_idx, example): ipython_display(HTML(message)) if return_all_scores and return_outputs: - return round(100 * ncorrect / ntotal, 2), results, [score for *_, score in reordered_devset] + return round(100 * ncorrect / ntotal, 2), results elif return_all_scores: return round(100 * ncorrect / ntotal, 2), [score for *_, score in reordered_devset] elif return_outputs: From b2e00eba689c29456caccd2a5e24d536897049b9 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 19:37:09 -0600 Subject: [PATCH 110/243] add ruff as dev dependency --- poetry.lock | 215 ++++++++++++++++++++++++++++--------------------- pyproject.toml | 5 +- 2 files changed, 124 insertions(+), 96 deletions(-) diff --git a/poetry.lock b/poetry.lock index 30ec769817..136e7473b5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -366,13 +366,13 @@ lxml = ["lxml"] [[package]] name = "cachetools" -version = "5.3.2" +version = "5.3.3" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" files = [ - {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, - {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, ] [[package]] @@ -755,20 +755,20 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "datasets" -version = "2.17.1" +version = "2.18.0" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" files = [ - {file = "datasets-2.17.1-py3-none-any.whl", hash = "sha256:346974daf2fe9c14ddb35646896b2308b95e7dc27709d1a6e25273573b140cf8"}, - {file = "datasets-2.17.1.tar.gz", hash = "sha256:66ec24077807f374f379b62ab0256c4dcb7c38a57ff1529a22993e8d95f2f9f1"}, + {file = "datasets-2.18.0-py3-none-any.whl", hash = "sha256:f1bbf0e2896917a914de01cbd37075b14deea3837af87ad0d9f697388ccaeb50"}, + {file = "datasets-2.18.0.tar.gz", hash = "sha256:cdf8b8c6abf7316377ba4f49f9589a4c74556d6b481afd0abd2284f3d69185cb"}, ] [package.dependencies] aiohttp = "*" dill = ">=0.3.0,<0.3.9" filelock = "*" -fsspec = {version = ">=2023.1.0,<=2023.10.0", extras = ["http"]} +fsspec = {version = ">=2023.1.0,<=2024.2.0", extras = ["http"]} huggingface-hub = ">=0.19.4" multiprocess = "*" numpy = ">=1.17" @@ -785,11 +785,11 @@ xxhash = "*" apache-beam = ["apache-beam (>=2.26.0)"] audio = ["librosa", "soundfile (>=0.12.1)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.1.5)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos", "torch", "transformers"] jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] -quality = ["ruff (>=0.1.5)"] +quality = ["ruff (>=0.3.0)"] s3 = ["s3fs"] tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"] tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] @@ -1096,18 +1096,17 @@ files = [ [[package]] name = "fsspec" -version = "2023.10.0" +version = "2024.2.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"}, - {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"}, + {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, + {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, ] [package.dependencies] aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} -requests = {version = "*", optional = true, markers = "extra == \"http\""} [package.extras] abfs = ["adlfs"] @@ -1124,7 +1123,7 @@ github = ["requests"] gs = ["gcsfs"] gui = ["panel"] hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] libarchive = ["libarchive-c"] oci = ["ocifs"] s3 = ["s3fs"] @@ -1280,13 +1279,13 @@ test = ["objgraph", "psutil"] [[package]] name = "griffe" -version = "0.41.0" +version = "0.41.3" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-0.41.0-py3-none-any.whl", hash = "sha256:8aa7fc6eb00cb80af9c0198178c6b7110cb59fa2c5187bb13ea25eebbe4dd928"}, - {file = "griffe-0.41.0.tar.gz", hash = "sha256:850128c3198c18713eaf0a6cc8572e590a16b1965f72a4e871e66cf84740903f"}, + {file = "griffe-0.41.3-py3-none-any.whl", hash = "sha256:27b4610f1ba6e5d039e9f0a2c97232e13463df75e53cb1833e0679f3377b9de2"}, + {file = "griffe-0.41.3.tar.gz", hash = "sha256:9edcfa9f57f4d9c5fcc6d5ce067c67a685b7101a21a7d11848ce0437368e474c"}, ] [package.dependencies] @@ -1559,13 +1558,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.20.3" +version = "0.21.3" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.20.3-py3-none-any.whl", hash = "sha256:d988ae4f00d3e307b0c80c6a05ca6dbb7edba8bba3079f74cda7d9c2e562a7b6"}, - {file = "huggingface_hub-0.20.3.tar.gz", hash = "sha256:94e7f8e074475fbc67d6a71957b678e1b4a74ff1b64a644fd6cbb83da962d05d"}, + {file = "huggingface_hub-0.21.3-py3-none-any.whl", hash = "sha256:b183144336fdf2810a8c109822e0bb6ef1fd61c65da6fb60e8c3f658b7144016"}, + {file = "huggingface_hub-0.21.3.tar.gz", hash = "sha256:26a15b604e4fc7bad37c467b76456543ec849386cbca9cd7e1e135f53e500423"}, ] [package.dependencies] @@ -1582,11 +1581,12 @@ all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", cli = ["InquirerPy (==0.3.4)"] dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] tensorflow = ["graphviz", "pydot", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] +torch = ["safetensors", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -1686,13 +1686,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.2" +version = "6.29.3" description = "IPython Kernel for Jupyter" optional = true python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.2-py3-none-any.whl", hash = "sha256:50384f5c577a260a1d53f1f59a828c7266d321c9b7d00d345693783f66616055"}, - {file = "ipykernel-6.29.2.tar.gz", hash = "sha256:3bade28004e3ff624ed57974948116670604ac5f676d12339693f3142176d3f0"}, + {file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"}, + {file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"}, ] [package.dependencies] @@ -1715,7 +1715,7 @@ cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] pyqt5 = ["pyqt5"] pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (==0.23.4)", "pytest-cov", "pytest-timeout"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] [[package]] name = "ipython" @@ -2249,17 +2249,18 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "0.5.0" +version = "1.0.1" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_autorefs-0.5.0-py3-none-any.whl", hash = "sha256:7930fcb8ac1249f10e683967aeaddc0af49d90702af111a5e390e8b20b3d97ff"}, - {file = "mkdocs_autorefs-0.5.0.tar.gz", hash = "sha256:9a5054a94c08d28855cfab967ada10ed5be76e2bfad642302a610b252c3274c0"}, + {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, + {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, ] [package.dependencies] Markdown = ">=3.3" +markupsafe = ">=2.0.1" mkdocs = ">=1.1" [[package]] @@ -2278,13 +2279,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.11" +version = "9.5.12" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.11-py3-none-any.whl", hash = "sha256:788ee0f3e036dca2dc20298d65e480297d348a44c9d7b2ee05c5262983e66072"}, - {file = "mkdocs_material-9.5.11.tar.gz", hash = "sha256:7af7f8af0dea16175558f3fb9245d26c83a17199baa5f157755e63d7437bf971"}, + {file = "mkdocs_material-9.5.12-py3-none-any.whl", hash = "sha256:d6f0c269f015e48c76291cdc79efb70f7b33bbbf42d649cfe475522ebee61b1f"}, + {file = "mkdocs_material-9.5.12.tar.gz", hash = "sha256:5f69cef6a8aaa4050b812f72b1094fda3d079b9a51cf27a247244c03ec455e97"}, ] [package.dependencies] @@ -2318,13 +2319,13 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.0" +version = "0.24.1" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.24.0-py3-none-any.whl", hash = "sha256:f4908560c10f587326d8f5165d1908817b2e280bbf707607f601c996366a2264"}, - {file = "mkdocstrings-0.24.0.tar.gz", hash = "sha256:222b1165be41257b494a9d29b14135d2b7ca43f38161d5b10caae03b87bd4f7e"}, + {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, + {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, ] [package.dependencies] @@ -3112,13 +3113,13 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "posthog" -version = "3.4.2" +version = "3.5.0" description = "Integrate PostHog into any python application." optional = true python-versions = "*" files = [ - {file = "posthog-3.4.2-py2.py3-none-any.whl", hash = "sha256:c7e79b2e585d16e93749874bcbcdad78d857037398ce0d8d6c474a04d0bd3bbe"}, - {file = "posthog-3.4.2.tar.gz", hash = "sha256:f0eafa663fbc4a942b49b6168a62a890635407044bbc7593051dcb9cc1208873"}, + {file = "posthog-3.5.0-py2.py3-none-any.whl", hash = "sha256:3c672be7ba6f95d555ea207d4486c171d06657eb34b3ce25eb043bfe7b6b5b76"}, + {file = "posthog-3.5.0.tar.gz", hash = "sha256:8f7e3b2c6e8714d0c0c542a2109b83a7549f63b7113a133ab2763a89245ef2ef"}, ] [package.dependencies] @@ -3541,13 +3542,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.7" +version = "10.7.1" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, - {file = "pymdown_extensions-10.7.tar.gz", hash = "sha256:c0d64d5cf62566f59e6b2b690a4095c931107c250a8c8e1351c1de5f6b036deb"}, + {file = "pymdown_extensions-10.7.1-py3-none-any.whl", hash = "sha256:f5cc7000d7ff0d1ce9395d216017fa4df3dde800afb1fb72d1c7d3fd35e710f4"}, + {file = "pymdown_extensions-10.7.1.tar.gz", hash = "sha256:c70e146bdd83c744ffc766b4671999796aba18842b268510a329f7f64700d584"}, ] [package.dependencies] @@ -3618,13 +3619,13 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xm [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -4158,6 +4159,32 @@ files = [ [package.dependencies] pyasn1 = ">=0.1.3" +[[package]] +name = "ruff" +version = "0.3.0" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.3.0-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7deb528029bacf845bdbb3dbb2927d8ef9b4356a5e731b10eef171e3f0a85944"}, + {file = "ruff-0.3.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:e1e0d4381ca88fb2b73ea0766008e703f33f460295de658f5467f6f229658c19"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f7dbba46e2827dfcb0f0cc55fba8e96ba7c8700e0a866eb8cef7d1d66c25dcb"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23dbb808e2f1d68eeadd5f655485e235c102ac6f12ad31505804edced2a5ae77"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ef655c51f41d5fa879f98e40c90072b567c666a7114fa2d9fe004dffba00932"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d0d3d7ef3d4f06433d592e5f7d813314a34601e6c5be8481cccb7fa760aa243e"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b08b356d06a792e49a12074b62222f9d4ea2a11dca9da9f68163b28c71bf1dd4"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9343690f95710f8cf251bee1013bf43030072b9f8d012fbed6ad702ef70d360a"}, + {file = "ruff-0.3.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1f3ed501a42f60f4dedb7805fa8d4534e78b4e196f536bac926f805f0743d49"}, + {file = "ruff-0.3.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:cc30a9053ff2f1ffb505a585797c23434d5f6c838bacfe206c0e6cf38c921a1e"}, + {file = "ruff-0.3.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5da894a29ec018a8293d3d17c797e73b374773943e8369cfc50495573d396933"}, + {file = "ruff-0.3.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:755c22536d7f1889be25f2baf6fedd019d0c51d079e8417d4441159f3bcd30c2"}, + {file = "ruff-0.3.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:dd73fe7f4c28d317855da6a7bc4aa29a1500320818dd8f27df95f70a01b8171f"}, + {file = "ruff-0.3.0-py3-none-win32.whl", hash = "sha256:19eacceb4c9406f6c41af806418a26fdb23120dfe53583df76d1401c92b7c14b"}, + {file = "ruff-0.3.0-py3-none-win_amd64.whl", hash = "sha256:128265876c1d703e5f5e5a4543bd8be47c73a9ba223fd3989d4aa87dd06f312f"}, + {file = "ruff-0.3.0-py3-none-win_arm64.whl", hash = "sha256:e3a4a6d46aef0a84b74fcd201a4401ea9a6cd85614f6a9435f2d33dd8cefbf83"}, + {file = "ruff-0.3.0.tar.gz", hash = "sha256:0886184ba2618d815067cf43e005388967b67ab9c80df52b32ec1152ab49f53a"}, +] + [[package]] name = "setuptools" version = "69.1.1" @@ -4450,60 +4477,60 @@ test = ["pytest"] [[package]] name = "sqlalchemy" -version = "2.0.27" +version = "2.0.28" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d04e579e911562f1055d26dab1868d3e0bb905db3bccf664ee8ad109f035618a"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fa67d821c1fd268a5a87922ef4940442513b4e6c377553506b9db3b83beebbd8"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c7a596d0be71b7baa037f4ac10d5e057d276f65a9a611c46970f012752ebf2d"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:954d9735ee9c3fa74874c830d089a815b7b48df6f6b6e357a74130e478dbd951"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5cd20f58c29bbf2680039ff9f569fa6d21453fbd2fa84dbdb4092f006424c2e6"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:03f448ffb731b48323bda68bcc93152f751436ad6037f18a42b7e16af9e91c07"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-win32.whl", hash = "sha256:d997c5938a08b5e172c30583ba6b8aad657ed9901fc24caf3a7152eeccb2f1b4"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-win_amd64.whl", hash = "sha256:eb15ef40b833f5b2f19eeae65d65e191f039e71790dd565c2af2a3783f72262f"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c5bad7c60a392850d2f0fee8f355953abaec878c483dd7c3836e0089f046bf6"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3012ab65ea42de1be81fff5fb28d6db893ef978950afc8130ba707179b4284a"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbcd77c4d94b23e0753c5ed8deba8c69f331d4fd83f68bfc9db58bc8983f49cd"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d177b7e82f6dd5e1aebd24d9c3297c70ce09cd1d5d37b43e53f39514379c029c"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:680b9a36029b30cf063698755d277885d4a0eab70a2c7c6e71aab601323cba45"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1306102f6d9e625cebaca3d4c9c8f10588735ef877f0360b5cdb4fdfd3fd7131"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-win32.whl", hash = "sha256:5b78aa9f4f68212248aaf8943d84c0ff0f74efc65a661c2fc68b82d498311fd5"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-win_amd64.whl", hash = "sha256:15e19a84b84528f52a68143439d0c7a3a69befcd4f50b8ef9b7b69d2628ae7c4"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0de1263aac858f288a80b2071990f02082c51d88335a1db0d589237a3435fe71"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce850db091bf7d2a1f2fdb615220b968aeff3849007b1204bf6e3e50a57b3d32"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dfc936870507da96aebb43e664ae3a71a7b96278382bcfe84d277b88e379b18"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4fbe6a766301f2e8a4519f4500fe74ef0a8509a59e07a4085458f26228cd7cc"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4535c49d961fe9a77392e3a630a626af5baa967172d42732b7a43496c8b28876"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0fb3bffc0ced37e5aa4ac2416f56d6d858f46d4da70c09bb731a246e70bff4d5"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-win32.whl", hash = "sha256:7f470327d06400a0aa7926b375b8e8c3c31d335e0884f509fe272b3c700a7254"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-win_amd64.whl", hash = "sha256:f9374e270e2553653d710ece397df67db9d19c60d2647bcd35bfc616f1622dcd"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e97cf143d74a7a5a0f143aa34039b4fecf11343eed66538610debc438685db4a"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7b5a3e2120982b8b6bd1d5d99e3025339f7fb8b8267551c679afb39e9c7c7f1"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e36aa62b765cf9f43a003233a8c2d7ffdeb55bc62eaa0a0380475b228663a38f"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5ada0438f5b74c3952d916c199367c29ee4d6858edff18eab783b3978d0db16d"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b1d9d1bfd96eef3c3faedb73f486c89e44e64e40e5bfec304ee163de01cf996f"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-win32.whl", hash = "sha256:ca891af9f3289d24a490a5fde664ea04fe2f4984cd97e26de7442a4251bd4b7c"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-win_amd64.whl", hash = "sha256:fd8aafda7cdff03b905d4426b714601c0978725a19efc39f5f207b86d188ba01"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec1f5a328464daf7a1e4e385e4f5652dd9b1d12405075ccba1df842f7774b4fc"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad862295ad3f644e3c2c0d8b10a988e1600d3123ecb48702d2c0f26771f1c396"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48217be1de7d29a5600b5c513f3f7664b21d32e596d69582be0a94e36b8309cb"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e56afce6431450442f3ab5973156289bd5ec33dd618941283847c9fd5ff06bf"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:611068511b5531304137bcd7fe8117c985d1b828eb86043bd944cebb7fae3910"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b86abba762ecfeea359112b2bb4490802b340850bbee1948f785141a5e020de8"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-win32.whl", hash = "sha256:30d81cc1192dc693d49d5671cd40cdec596b885b0ce3b72f323888ab1c3863d5"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-win_amd64.whl", hash = "sha256:120af1e49d614d2525ac247f6123841589b029c318b9afbfc9e2b70e22e1827d"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d07ee7793f2aeb9b80ec8ceb96bc8cc08a2aec8a1b152da1955d64e4825fcbac"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb0845e934647232b6ff5150df37ceffd0b67b754b9fdbb095233deebcddbd4a"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fc19ae2e07a067663dd24fca55f8ed06a288384f0e6e3910420bf4b1270cc51"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b90053be91973a6fb6020a6e44382c97739736a5a9d74e08cc29b196639eb979"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2f5c9dfb0b9ab5e3a8a00249534bdd838d943ec4cfb9abe176a6c33408430230"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33e8bde8fff203de50399b9039c4e14e42d4d227759155c21f8da4a47fc8053c"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-win32.whl", hash = "sha256:d873c21b356bfaf1589b89090a4011e6532582b3a8ea568a00e0c3aab09399dd"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-win_amd64.whl", hash = "sha256:ff2f1b7c963961d41403b650842dc2039175b906ab2093635d8319bef0b7d620"}, - {file = "SQLAlchemy-2.0.27-py3-none-any.whl", hash = "sha256:1ab4e0448018d01b142c916cc7119ca573803a4745cfe341b8f95657812700ac"}, - {file = "SQLAlchemy-2.0.27.tar.gz", hash = "sha256:86a6ed69a71fe6b88bf9331594fa390a2adda4a49b5c06f98e47bf0d392534f8"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46a3d4e7a472bfff2d28db838669fc437964e8af8df8ee1e4548e92710929adc"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3dd67b5d69794cfe82862c002512683b3db038b99002171f624712fa71aeaa"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61e2e41656a673b777e2f0cbbe545323dbe0d32312f590b1bc09da1de6c2a02"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0315d9125a38026227f559488fe7f7cee1bd2fbc19f9fd637739dc50bb6380b2"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af8ce2d31679006e7b747d30a89cd3ac1ec304c3d4c20973f0f4ad58e2d1c4c9"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81ba314a08c7ab701e621b7ad079c0c933c58cdef88593c59b90b996e8b58fa5"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win32.whl", hash = "sha256:1ee8bd6d68578e517943f5ebff3afbd93fc65f7ef8f23becab9fa8fb315afb1d"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win_amd64.whl", hash = "sha256:ad7acbe95bac70e4e687a4dc9ae3f7a2f467aa6597049eeb6d4a662ecd990bb6"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d3499008ddec83127ab286c6f6ec82a34f39c9817f020f75eca96155f9765097"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b66fcd38659cab5d29e8de5409cdf91e9986817703e1078b2fdaad731ea66f5"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea30da1e76cb1acc5b72e204a920a3a7678d9d52f688f087dc08e54e2754c67"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:124202b4e0edea7f08a4db8c81cc7859012f90a0d14ba2bf07c099aff6e96462"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e23b88c69497a6322b5796c0781400692eca1ae5532821b39ce81a48c395aae9"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b6303bfd78fb3221847723104d152e5972c22367ff66edf09120fcde5ddc2e2"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, + {file = "SQLAlchemy-2.0.28-py3-none-any.whl", hash = "sha256:78bb7e8da0183a8301352d569900d9d3594c48ac21dc1c2ec6b3121ed8b6c986"}, + {file = "SQLAlchemy-2.0.28.tar.gz", hash = "sha256:dd53b6c4e6d960600fd6532b79ee28e2da489322fcf6648738134587faf767b6"}, ] [package.dependencies] @@ -5568,4 +5595,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "8b4cc583653becb3be9f5bc4c34cdf5ced146ba32157a5bb4bdc7885291c0403" +content-hash = "233e4dfd38ce1f291ed8e966663cce04ffedfa6b0363f3c92447b23467b983e6" diff --git a/pyproject.toml b/pyproject.toml index e2c719eaf2..7d4c7b5633 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,7 @@ docs = [ "sphinx-reredirects>=0.1.2", "sphinx-automodapi==0.16.0", ] -test = ["pytest>=6.2.5"] +dev = ["pytest>=6.2.5"] [project.urls] homepage = "https://github.com/stanfordnlp/dspy" @@ -103,8 +103,9 @@ sphinx-reredirects = { version = "^0.1.2", optional = true } sphinx-automodapi = { version = "0.16.0", optional = true } -[tool.poetry.group.test.dependencies] +[tool.poetry.group.dev.dependencies] pytest = "^6.2.5" +ruff = "^0.3.0" [tool.poetry.extras] From d64f4bcb742f262029a9ad6501db2f854305de2e Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Tue, 5 Mar 2024 20:01:05 -0600 Subject: [PATCH 111/243] Add verbose to ruff check --- .github/workflows/ruff_lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ruff_lint.yml b/.github/workflows/ruff_lint.yml index 0834dd4e6a..dd396c736d 100644 --- a/.github/workflows/ruff_lint.yml +++ b/.github/workflows/ruff_lint.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/setup-python@v5 - uses: chartboost/ruff-action@v1 with: - args: --fix-only --exit-non-zero-on-fix + args: check . --fix-only --exit-non-zero-on-fix -v comment_if_ruff_fail: name: Comment on Ruff Failure From 7e1d5019900d8cfdb193d80515e30692c509b9c1 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Wed, 6 Mar 2024 00:06:06 -0600 Subject: [PATCH 112/243] Make comment action follow github best practices --- .github/workflows/pr_comment.yml | 37 +++++++++++++++++++++++ .github/workflows/ruff_lint.yml | 51 -------------------------------- .github/workflows/run_tests.yml | 39 +++++++++++++++++++++++- 3 files changed, 75 insertions(+), 52 deletions(-) create mode 100644 .github/workflows/pr_comment.yml delete mode 100644 .github/workflows/ruff_lint.yml diff --git a/.github/workflows/pr_comment.yml b/.github/workflows/pr_comment.yml new file mode 100644 index 0000000000..87afb50947 --- /dev/null +++ b/.github/workflows/pr_comment.yml @@ -0,0 +1,37 @@ +name: Comment for PR + +on: + workflow_run: + workflows: ["Check for Ruff Fix, Test, and Build"] + types: + - completed + +jobs: + comment: + runs-on: ubuntu-latest + steps: + - name: "Download Ruff Fix Outcome Artifact" + uses: actions/download-artifact@v2 + with: + name: ruff-fix-outcome + path: artifacts + + - name: "Read Ruff Fix Outcome" + id: ruff_outcome + run: | + outcome=$(cat artifacts/ruff_fix_outcome.txt) + echo "RUFF_FIX_OUTCOME=$outcome" >> $GITHUB_ENV + + - name: "Comment on PR if Ruff Fix Failed" + if: env.RUFF_FIX_OUTCOME == 'true' + uses: actions/github-script@v5 + with: + script: | + const pr_number = ${{ github.event.workflow_run.pull_requests[0].number }}; + const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr_number, + body: message + }); diff --git a/.github/workflows/ruff_lint.yml b/.github/workflows/ruff_lint.yml deleted file mode 100644 index dd396c736d..0000000000 --- a/.github/workflows/ruff_lint.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Lint - -on: - pull_request_target: - types: [opened, synchronize, reopened] - -env: - POETRY_VERSION: "1.6.1" - -jobs: - fix: - name: Apply Ruff Fix - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - # Note that 'pull_request_target' has higher permissions than 'pull_request'\ - # Do not change any events that run arbitrary code to use 'pull_request_target' - if: github.event_name == 'pull_request_target' - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - uses: chartboost/ruff-action@v1 - with: - args: check . --fix-only --exit-non-zero-on-fix -v - - comment_if_ruff_fail: - name: Comment on Ruff Failure - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - needs: fix - # Note that 'pull_request_target' has higher permissions than 'pull_request'\ - # Do not change any events that run arbitrary code to use 'pull_request_target' - if: ${{ failure() && needs.fix.result == 'failure' && github.event_name == 'pull_request_target' }} - steps: - # - run: echo 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.' - - name: Comment on Ruff Failure - uses: actions/github-script@v6 - with: - script: | - const issue_number = context.issue.number; - const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issue_number, - body: message - }); - console.log('Commented on the issue.'); diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 15da587b4e..f5c8d5f3f2 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,4 +1,4 @@ -name: Fix, Test, and Build +name: Check for Ruff Fix, Test, and Build on: pull_request: @@ -8,6 +8,43 @@ env: POETRY_VERSION: "1.6.1" jobs: + fix: + name: Check Ruff Fix + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - name: Ruff Fix Attempt + id: ruff_fix + uses: chartboost/ruff-action@v1 + with: + args: --fix-only --exit-non-zero-on-fix + continue-on-error: true + + - name: Determine Ruff Fix Outcome + run: | + if [ ${{ steps.ruff_fix.outcome }} == 'failure' ]; then + echo "RUFF_FAILED=true" >> $GITHUB_ENV + echo ${{ steps.ruff_fix.outcome }} > ruff_fix_outcome.txt + else + echo "RUFF_FAILED=false" >> $GITHUB_ENV + echo ${{ steps.ruff_fix.outcome }} > ruff_fix_outcome.txt + fi + + - uses: actions/upload-artifact@v2 + with: + name: ruff-fix-outcome + path: ruff_fix_outcome.txt + + - name: Fail Workflow if Ruff Fix Failed + if: env.RUFF_FAILED == 'true' + run: | + echo "Ruff fix failed, failing the workflow." + exit 1 + test: name: Run Tests runs-on: ubuntu-latest From 70ef23d23f91fd4c8c39bcda402b95dbd8689d5a Mon Sep 17 00:00:00 2001 From: Isaac Miller <17116851+isaacbmiller@users.noreply.github.com> Date: Wed, 6 Mar 2024 00:14:08 -0600 Subject: [PATCH 113/243] 2 commits: 1 bad, 1 good (#569) * bad commit, should fail CI * Good commit, should pass CI * Empty-Commit * Empty-Commit * Bad commit, should fail lint * Good commit, should pass lint --- dspy/evaluate/evaluate.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index b037153ad2..4d1fd62f46 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -1,17 +1,21 @@ +import threading import types -import dsp -import tqdm import pandas as pd +import tqdm -import threading +import dsp try: from IPython.display import HTML from IPython.display import display as ipython_display except ImportError: ipython_display = print - def HTML(x): return x + + def HTML(x): + return x + + from concurrent.futures import ThreadPoolExecutor, as_completed from dsp.evaluation.utils import * @@ -127,7 +131,8 @@ def wrapped_program(example_idx, example): try: prediction = program(**example.inputs()) score = metric( - example, prediction + example, + prediction, ) # FIXME: TODO: What's the right order? Maybe force name-based kwargs! # increment assert and suggest failures to program's attributes @@ -156,7 +161,10 @@ def wrapped_program(example_idx, example): wrapped_program, devset, display_progress) else: reordered_devset, ncorrect, ntotal = self._execute_multi_thread( - wrapped_program, devset, num_threads, display_progress + wrapped_program, + devset, + num_threads, + display_progress, ) if return_outputs: # Handle the return_outputs logic results = [(example, prediction, score) @@ -260,14 +268,14 @@ def configure_dataframe_display(df, metric_name): [ {"selector": "th", "props": [("text-align", "left")]}, {"selector": "td", "props": [("text-align", "left")]}, - ] + ], ).set_properties( **{ "text-align": "left", "white-space": "pre-wrap", "word-wrap": "break-word", "max-width": "400px", - } + }, ) From 958cd6a1cfb5eaeabf12fe7c1349b2c17e139322 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Wed, 6 Mar 2024 00:21:12 -0600 Subject: [PATCH 114/243] Rename workflow --- .github/workflows/run_tests.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index f5c8d5f3f2..e57c6ff0f1 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,4 +1,4 @@ -name: Check for Ruff Fix, Test, and Build +name: Lint, Test, and Build on: pull_request: @@ -40,9 +40,10 @@ jobs: path: ruff_fix_outcome.txt - name: Fail Workflow if Ruff Fix Failed - if: env.RUFF_FAILED == 'true' + if: steps.ruff_fix.outcome == 'failure' run: | echo "Ruff fix failed, failing the workflow." + echo "Please run 'ruff check . --fix-only' locally and push the changes." exit 1 test: From a2fb536eb0eb37fc5e38035ba3eac3a87a249545 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Tue, 5 Mar 2024 23:47:45 -0800 Subject: [PATCH 115/243] Fix the "no inputs" case --- dsp/templates/template_v2.py | 73 +++++++++++++++-------------------- dspy/predict/predict.py | 5 +-- tests/predict/test_predict.py | 25 ++++++++++++ 3 files changed, 59 insertions(+), 44 deletions(-) diff --git a/dsp/templates/template_v2.py b/dsp/templates/template_v2.py index d6e61642ea..9ca90779f9 100644 --- a/dsp/templates/template_v2.py +++ b/dsp/templates/template_v2.py @@ -70,6 +70,8 @@ def query(self, example: Example, is_demo: bool = False) -> str: """Retrieves the input variables from the example and formats them into a query string.""" result: list[str] = [] + # If not a demo, find the last field that doesn't have a value set in `example` and set it to "" + # This creates the "Output:" prefix at the end of the prompt. if not is_demo: has_value = [ field.input_variable in example @@ -78,40 +80,40 @@ def query(self, example: Example, is_demo: bool = False) -> str: for field in self.fields ] - for i in range(1, len(has_value)): - if has_value[i - 1] and not any(has_value[i:]): - example[self.fields[i].input_variable] = "" - break + # If there are no inputs, set the first field to "" + if not any(has_value): + example[self.fields[0].input_variable] = "" + # Otherwise find the first field without a value. + else: + for i in range(1, len(has_value)): + if has_value[i - 1] and not any(has_value[i:]): + example[self.fields[i].input_variable] = "" + break for field in self.fields: - if ( - field.input_variable in example - and example[field.input_variable] is not None - ): + if field.input_variable in example and example[field.input_variable] is not None: if field.input_variable in self.format_handlers: format_handler = self.format_handlers[field.input_variable] else: + def format_handler(x): assert type(x) == str, f"Need format_handler for {field.input_variable} of type {type(x)}" return " ".join(x.split()) formatted_value = format_handler(example[field.input_variable]) - separator = '\n' if field.separator == ' ' and '\n' in formatted_value else field.separator + separator = "\n" if field.separator == " " and "\n" in formatted_value else field.separator result.append( f"{field.name}{separator}{formatted_value}", ) - if self._has_augmented_guidelines() and (example.get('augmented', False)): + if self._has_augmented_guidelines() and (example.get("augmented", False)): return "\n\n".join([r for r in result if r]) return "\n".join([r for r in result if r]) def guidelines(self, show_guidelines=True) -> str: """Returns the task guidelines as described in the lm prompt""" - if (not show_guidelines) or ( - hasattr(dsp.settings, "show_guidelines") - and not dsp.settings.show_guidelines - ): + if (not show_guidelines) or (hasattr(dsp.settings, "show_guidelines") and not dsp.settings.show_guidelines): return "" result = "Follow the following format.\n\n" @@ -126,11 +128,13 @@ def guidelines(self, show_guidelines=True) -> str: def _has_augmented_guidelines(self): return len(self.fields) > 3 or any( - ("\n" in field.separator) or ('\n' in field.description) for field in self.fields + ("\n" in field.separator) or ("\n" in field.description) for field in self.fields ) def extract( - self, example: Union[Example, dict[str, Any]], raw_pred: str, + self, + example: Union[Example, dict[str, Any]], + raw_pred: str, ) -> Example: """Extracts the answer from the LM raw prediction using the template structure @@ -147,10 +151,7 @@ def extract( idx = 0 while idx < len(self.fields): - if ( - self.fields[idx].input_variable not in example - or example[self.fields[idx].input_variable] is None - ): + if self.fields[idx].input_variable not in example or example[self.fields[idx].input_variable] is None: break idx += 1 @@ -164,8 +165,8 @@ def extract( if offset >= 0: if dspy.settings.release >= 20231003: - example[self.fields[idx].output_variable] = raw_pred[:offset].strip().rstrip('---').strip() - raw_pred = raw_pred[offset + len(next_field_name) :].strip().rstrip('---').strip() + example[self.fields[idx].output_variable] = raw_pred[:offset].strip().rstrip("---").strip() + raw_pred = raw_pred[offset + len(next_field_name) :].strip().rstrip("---").strip() else: example[self.fields[idx].output_variable] = raw_pred[:offset].strip() raw_pred = raw_pred[offset + len(next_field_name) :].strip() @@ -173,7 +174,7 @@ def extract( idx += 1 else: if dspy.settings.release >= 20231003: - example[self.fields[idx].output_variable] = raw_pred.strip().rstrip('---').strip() + example[self.fields[idx].output_variable] = raw_pred.strip().rstrip("---").strip() else: example[self.fields[idx].output_variable] = raw_pred.strip() @@ -185,7 +186,7 @@ def extract( assert idx == len(self.fields) - 1, (idx, len(self.fields)) if dspy.settings.release >= 20231003: - example[self.fields[idx].output_variable] = raw_pred.strip().rstrip('---').strip() + example[self.fields[idx].output_variable] = raw_pred.strip().rstrip("---").strip() else: example[self.fields[idx].output_variable] = raw_pred.strip() @@ -196,7 +197,7 @@ def extract( def __call__(self, example, show_guidelines=True) -> str: example = dsp.Example(example) - if hasattr(dsp.settings, 'query_only') and dsp.settings.query_only: + if hasattr(dsp.settings, "query_only") and dsp.settings.query_only: return self.query(example) # The training data should not contain the output variable @@ -207,29 +208,20 @@ def __call__(self, example, show_guidelines=True) -> str: self.query(demo, is_demo=True) for demo in example.demos if ( - (not demo.get('augmented', False)) + (not demo.get("augmented", False)) and ( # validate that the training example has the same primitive input var as the template - self.fields[-1].input_variable in demo - and demo[self.fields[-1].input_variable] is not None + self.fields[-1].input_variable in demo and demo[self.fields[-1].input_variable] is not None ) ) ] - ademos = [ - self.query(demo, is_demo=True) - for demo in example.demos - if demo.get('augmented', False) - ] + ademos = [self.query(demo, is_demo=True) for demo in example.demos if demo.get("augmented", False)] # Move the rdemos to ademos if rdemo has all the fields filled in rdemos_ = [] new_ademos = [] for rdemo in rdemos: - if all( - (field.name in rdemo) - for field in self.fields - if field.input_variable in example - ): + if all((field.name in rdemo) for field in self.fields if field.input_variable in example): import dspy if dspy.settings.release >= 20230928: @@ -242,7 +234,6 @@ def __call__(self, example, show_guidelines=True) -> str: ademos = new_ademos + ademos rdemos = rdemos_ - long_query = self._has_augmented_guidelines() if long_query: @@ -251,10 +242,10 @@ def __call__(self, example, show_guidelines=True) -> str: query = self.query(example) # if it has more lines than fields - if len(query.split('\n')) > len(self.fields): + if len(query.split("\n")) > len(self.fields): long_query = True - if not example.get('augmented', False): + if not example.get("augmented", False): example["augmented"] = True query = self.query(example) diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index d0a5f6010e..1243e8b882 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -73,7 +73,8 @@ def forward(self, **kwargs): # print(f"#> Setting temperature to 0.7 since n={num_generations} and prior temperature={temperature}.") # All of the other kwargs are presumed to fit a prefix of the signature. - + # That is, they are input variables for the bottom most generation, so + # we place them inside the input - x - together with the demos. x = dsp.Example(demos=demos, **kwargs) if new_signature is not None: @@ -86,8 +87,6 @@ def forward(self, **kwargs): # Switch to legacy format for dsp.generate template = signature_to_template(signature) - # print("Created template", template) - # print("From Signature", signature) if self.lm is None: x, C = dsp.generate(template, **config)(x, stage=self.stage) diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py index 2ded1e9f12..c0407b938c 100644 --- a/tests/predict/test_predict.py +++ b/tests/predict/test_predict.py @@ -2,6 +2,7 @@ from dspy import Predict, Signature from dspy.utils.dummies import DummyLM import copy +import textwrap def test_initialization_with_string_signature(): @@ -119,3 +120,27 @@ def __init__(self): # Check that it also works the second time. program2 = copy.deepcopy(program) assert program2.named_predictors() == [("inner", program2.inner)] + + +def test_output_only(): + class OutputOnlySignature(dspy.Signature): + output = dspy.OutputField() + + predictor = Predict(OutputOnlySignature) + + lm = DummyLM(["short answer"]) + dspy.settings.configure(lm=lm) + assert predictor().output == "short answer" + + assert lm.get_convo(-1) == textwrap.dedent("""\ + Given the fields , produce the fields `output`. + + --- + + Follow the following format. + + Output: ${output} + + --- + + Output: short answer""") From 797f1cd69b73ba163ec1e1ab98f9be4a2e6a3138 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Tue, 5 Mar 2024 23:48:24 -0800 Subject: [PATCH 116/243] Added a new method to Module --- dspy/primitives/module.py | 8 +++++ tests/primitives/test_program.py | 61 +++++++++++++++++++++++++------- 2 files changed, 57 insertions(+), 12 deletions(-) diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 59ec8a09f8..989f68403d 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -1,4 +1,5 @@ import copy +from typing import Generator import ujson @@ -41,6 +42,13 @@ def add_parameter(param_name, param_value): return named_parameters + def named_sub_modules(self) -> Generator[tuple[str, "BaseModule"], None, None]: + yield "", self + for name, value in self.__dict__.items(): + if isinstance(value, BaseModule): + for sub_name, sub_value in value.named_sub_modules(): + yield f"{name}.{sub_name}", sub_value + def parameters(self): return [param for _, param in self.named_parameters()] diff --git a/tests/primitives/test_program.py b/tests/primitives/test_program.py index b1d7c89725..aec5ace7d2 100644 --- a/tests/primitives/test_program.py +++ b/tests/primitives/test_program.py @@ -1,4 +1,5 @@ import dspy +from dspy.primitives.module import BaseModule from dspy.primitives.program import ( Module, set_attribute_by_name, @@ -19,9 +20,7 @@ def forward(self, question): def test_module_initialization(): module = Module() - assert ( - module._compiled is False - ), "Module _compiled attribute should be False upon initialization" + assert module._compiled is False, "Module _compiled attribute should be False upon initialization" def test_named_predictors(): @@ -29,25 +28,19 @@ def test_named_predictors(): named_preds = module.named_predictors() assert len(named_preds) == 2, "Should identify correct number of Predict instances" names, preds = zip(*named_preds) - assert ( - "predict1" in names and "predict2" in names - ), "Named predictors should include 'predict1' and 'predict2'" + assert "predict1" in names and "predict2" in names, "Named predictors should include 'predict1' and 'predict2'" def test_predictors(): module = HopModule() preds = module.predictors() assert len(preds) == 2, "Should return correct number of Predict instances" - assert all( - isinstance(p, dspy.Predict) for p in preds - ), "All returned items should be instances of PredictMock" + assert all(isinstance(p, dspy.Predict) for p in preds), "All returned items should be instances of PredictMock" def test_forward(): program = HopModule() - dspy.settings.configure( - lm=DummyLM({"What is 1+1?": "let me check", "let me check": "2"}) - ) + dspy.settings.configure(lm=DummyLM({"What is 1+1?": "let me check", "let me check": "2"})) result = program(question="What is 1+1?").answer assert result == "2" @@ -64,3 +57,47 @@ def __init__(self): names, _preds = zip(*named_preds) assert "hop.predict1" in names assert "hop.predict2" in names + + +class SubModule(BaseModule): + pass + + +class AnotherSubModule(BaseModule): + pass + + +def test_empty_module(): + module = BaseModule() + assert list(module.named_sub_modules()) == [] + + +def test_single_level(): + module = BaseModule() + module.sub = SubModule() + expected = [("sub", module.sub)] + assert list(module.named_sub_modules()) == expected + + +def test_multiple_levels(): + module = BaseModule() + module.sub = SubModule() + module.sub.subsub = SubModule() + expected = [("sub", module.sub), ("sub.subsub", module.sub.subsub)] + assert list(module.named_sub_modules()) == expected + + +def test_multiple_sub_modules(): + module = BaseModule() + module.sub1 = SubModule() + module.sub2 = SubModule() + expected = [("sub1", module.sub1), ("sub2", module.sub2)] + assert sorted(list(module.named_sub_modules())) == sorted(expected) + + +def test_non_base_module_attributes(): + module = BaseModule() + module.sub = SubModule() + module.not_a_sub = "Not a BaseModule" + expected = [("sub", module.sub)] + assert list(module.named_sub_modules()) == expected From bf011b8c82c1f279f99242cd216e613362b171f7 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Tue, 5 Mar 2024 23:49:01 -0800 Subject: [PATCH 117/243] Fix a bug with generics --- dspy/signatures/signature.py | 8 +++++--- tests/functional/test_functional.py | 24 +++++++++++++++++++++++- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 8c1f161642..62626c8e1d 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -1,9 +1,10 @@ import ast from copy import deepcopy -import typing import dsp from pydantic import BaseModel, Field, create_model from pydantic.fields import FieldInfo +import typing +import types from typing import Any, Type, Union, Dict, Tuple # noqa: UP035 import re @@ -62,7 +63,7 @@ def _validate_fields(cls): field_type = extra.get("__dspy_field_type") if field_type not in ["input", "output"]: raise TypeError( - f"Field '{name}' in '{cls.__name__}' must be declared with InputField or OutputField.", + f"Field '{name}' in '{cls.__name__}' must be declared with InputField or OutputField. {field.json_schema_extra=}", ) @property @@ -232,7 +233,8 @@ def make_signature( # program of thought and teleprompters, so we just silently default to string. if type_ is None: type_ = str - if not isinstance(type_, type) and not isinstance(typing.get_origin(type_), type): + # if not isinstance(type_, type) and not isinstance(typing.get_origin(type_), type): + if not isinstance(type_, (type, typing._GenericAlias, types.GenericAlias)): raise ValueError(f"Field types must be types, not {type(type_)}") if not isinstance(field, FieldInfo): raise ValueError(f"Field values must be Field instances, not {type(field)}") diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 600e35b63d..ebca992230 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -2,7 +2,7 @@ import textwrap import pydantic from pydantic import Field, BaseModel, field_validator -from typing import Annotated +from typing import Annotated, Literal from typing import List import pytest @@ -495,6 +495,28 @@ def test_parse_type_string(): assert output == [0, 1, 2] +def test_literal(): + lm = DummyLM([f'{{"value": "{i}"}}' for i in range(100)]) + dspy.settings.configure(lm=lm) + + @predictor + def f() -> Literal["2", "3"]: + pass + + assert f() == "2" + + +def test_literal_int(): + lm = DummyLM([f'{{"value": {i}}}' for i in range(100)]) + dspy.settings.configure(lm=lm) + + @predictor + def f() -> Literal[2, 3]: + pass + + assert f() == 2 + + def test_fields_on_base_signature(): class SimpleOutput(dspy.Signature): output: float = dspy.OutputField(gt=0, lt=1) From d633a7782619a511a920030084f96430cab8d5f6 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Tue, 5 Mar 2024 23:49:43 -0800 Subject: [PATCH 118/243] Formatting --- dspy/signatures/field.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/dspy/signatures/field.py b/dspy/signatures/field.py index 7822c625c0..4e32714778 100644 --- a/dspy/signatures/field.py +++ b/dspy/signatures/field.py @@ -1,5 +1,10 @@ import pydantic +# The following arguments can be used in DSPy InputField and OutputField in addition +# to the standard pydantic.Field arguments. We just hope pydanitc doesn't add these, +# as it would give a name clash. +DSPY_FIELD_ARG_NAMES = ["desc", "prefix", "format", "parser", "__dspy_field_type"] + def move_kwargs(**kwargs): # Pydantic doesn't allow arbitrary arguments to be given to fields, @@ -10,7 +15,7 @@ def move_kwargs(**kwargs): pydantic_kwargs = {} json_schema_extra = {} for k, v in kwargs.items(): - if k in ["desc", "prefix", "format", "parser", "__dspy_field_type"]: + if k in DSPY_FIELD_ARG_NAMES: json_schema_extra[k] = v else: pydantic_kwargs[k] = v @@ -27,11 +32,7 @@ def OutputField(**kwargs): def new_to_old_field(field): - return ( - OldInputField - if field.json_schema_extra["__dspy_field_type"] == "input" - else OldOutputField - )( + return (OldInputField if field.json_schema_extra["__dspy_field_type"] == "input" else OldOutputField)( prefix=field.json_schema_extra["prefix"], desc=field.json_schema_extra["desc"], format=field.json_schema_extra.get("format"), From 72838473f812a2c72b9f117b9b72c2258001c9d2 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 01:25:41 -0800 Subject: [PATCH 119/243] Created typed signature optimizer --- dspy/__init__.py | 1 + dspy/functional/functional.py | 22 +- dspy/primitives/module.py | 7 +- dspy/signatures/signature.py | 46 +- ...gnature_opt2.py => signature_opt_typed.py} | 148 ++-- examples/functional/signature_opt_typed.ipynb | 706 ++++++++++++++++++ examples/signature_opt2.ipynb | 246 ------ tests/functional/test_functional.py | 19 +- ...re_opt2.py => test_signature_opt_typed.py} | 20 +- tests/primitives/test_program.py | 10 +- tests/signatures/test_signature.py | 32 + 11 files changed, 902 insertions(+), 355 deletions(-) rename dspy/teleprompt/{signature_opt2.py => signature_opt_typed.py} (63%) create mode 100644 examples/functional/signature_opt_typed.ipynb delete mode 100644 examples/signature_opt2.ipynb rename tests/functional/{test_signature_opt2.py => test_signature_opt_typed.py} (91%) diff --git a/dspy/__init__.py b/dspy/__init__.py index 98fe6fba89..43317f3d1b 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -5,6 +5,7 @@ from .retrieve import * from .predict import * from .primitives import * +from .functional import * # from .evaluation import * diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 3d6c471414..ed14c66cad 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -118,20 +118,32 @@ def _prepare_signature(self) -> dspy.Signature: format=lambda x: x if isinstance(x, str) else str(x), parser=type_, ) + elif False: + # TODO: I don't like forcing the model to write "value" in the output. + if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): + type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) + to_json = lambda x, type_=type_: type_(value=x).model_dump_json()[9:-1] # {"value":"123"} + from_json = lambda x, type_=type_: type_.model_validate_json('{"value":' + x + "}").value + schema = json.dumps(type_.model_json_schema()["properties"]["value"]) + else: + to_json = lambda x: x.model_dump_json() + from_json = lambda x, type_=type_: type_.model_validate_json(x) + schema = json.dumps(type_.model_json_schema()) else: # Anything else we wrap in a pydantic object - to_json = lambda x: x.model_dump_json() - from_json = lambda x, type_=type_: type_.model_validate_json(x) if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) to_json = lambda x, type_=type_: type_(value=x).model_dump_json() from_json = lambda x, type_=type_: type_.model_validate_json(x).value + schema = json.dumps(type_.model_json_schema()) + else: + to_json = lambda x: x.model_dump_json() + from_json = lambda x, type_=type_: type_.model_validate_json(x) + schema = json.dumps(type_.model_json_schema()) signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") - + ( - ". Respond with a single JSON object. JSON Schema: " + json.dumps(type_.model_json_schema()) - ), + + (". Respond with a single JSON object. JSON Schema: " + schema), format=lambda x, to_json=to_json: (x if isinstance(x, str) else to_json(x)), parser=lambda x, from_json=from_json: from_json(_unwrap_json(x)), type_=type_, diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 989f68403d..9ce327a26e 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -42,12 +42,11 @@ def add_parameter(param_name, param_value): return named_parameters - def named_sub_modules(self) -> Generator[tuple[str, "BaseModule"], None, None]: - yield "", self + def named_sub_modules(self, root_name="base") -> Generator[tuple[str, "BaseModule"], None, None]: + yield root_name, self for name, value in self.__dict__.items(): if isinstance(value, BaseModule): - for sub_name, sub_value in value.named_sub_modules(): - yield f"{name}.{sub_name}", sub_value + yield from value.named_sub_modules(root_name=f"{root_name}.{name}") def parameters(self): return [param for _, param in self.named_parameters()] diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 62626c8e1d..40eaf28aac 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -42,6 +42,17 @@ def __new__(mcs, signature_name, bases, namespace, **kwargs): # noqa: N804 # Let Pydantic do its thing cls = super().__new__(mcs, signature_name, bases, namespace, **kwargs) + # If we don't have instructions, it might be because we are a derived generic type. + # In that case, we should inherit the instructions from the base class. + if cls.__doc__ is None: + for base in bases: + if isinstance(base, SignatureMeta): + doc = getattr(base, "__doc__", "") + if doc != "": + cls.__doc__ = doc + + # The more likely case is that the user has just not given us a type. + # In that case, we should default to the input/output format. if cls.__doc__ is None: cls.__doc__ = _default_instructions(cls) @@ -168,24 +179,27 @@ def __repr__(cls): return f"{cls.__name__}({cls.signature}\n instructions={repr(cls.instructions)}\n {field_repr}\n)" +# A signature for a predictor. +# +# You typically subclass it, like this: +# class MySignature(Signature): +# input: str = InputField(desc="...") # noqa: ERA001 +# output: int = OutputField(desc="...") # noqa: ERA001 +# +# You can call Signature("input1, input2 -> output1, output2") to create a new signature type. +# You can also include instructions, Signature("input -> output", "This is a test"). +# But it's generally better to use the make_signature function. +# +# If you are not sure if your input is a string representation, (like "input1, input2 -> output1, output2"), +# or a signature, you can use the ensure_signature function. +# +# For compatibility with the legacy dsp format, you can use the signature_to_template function. +# class Signature(BaseModel, metaclass=SignatureMeta): - """A signature for a predictor. - - You typically subclass it, like this: - class MySignature(Signature): - input: str = InputField(desc="...") - output: int = OutputField(desc="...") - - You can call Signature("input1, input2 -> output1, output2") to create a new signature type. - You can also include instructions, Signature("input -> output", "This is a test"). - But it's generally better to use the make_signature function. - - If you are not sure if your input is a string representation, (like "input1, input2 -> output1, output2"), - or a signature, you can use the ensure_signature function. - - For compatibility with the legacy dsp format, you can use the signature_to_template function. - """ + "" # noqa: D419 + # Note: Don't put a docstring here, as it will become the default instructions + # for any signature that doesn't define it's own instructions. pass diff --git a/dspy/teleprompt/signature_opt2.py b/dspy/teleprompt/signature_opt_typed.py similarity index 63% rename from dspy/teleprompt/signature_opt2.py rename to dspy/teleprompt/signature_opt_typed.py index 1a9045ae23..956ea0f0cd 100644 --- a/dspy/teleprompt/signature_opt2.py +++ b/dspy/teleprompt/signature_opt_typed.py @@ -1,13 +1,17 @@ -import random -from typing import Generic, Literal, TypeVar, Type +import textwrap +from typing import Generic, Literal, TypeVar import pydantic import dspy -from dspy.functional.functional import TypedChainOfThought +from dspy.functional.functional import TypedChainOfThought, TypedPredictor from dspy.signatures import Signature from dspy import BaseModel +from dspy.signatures.field import InputField, OutputField -# TODO: Consider using the prompt optimizer to optimize the prompt optimizer :O +# TODO: +# - Parallelize the generation of new signatures when we have multiple predictors +# - Consider generating multiple new signatures at once, which we can test in parallel +# - Consider using the prompt optimizer to optimize the prompt optimizer :O def make_info(signature: type[Signature]) -> BaseModel: @@ -55,32 +59,37 @@ def to_signature(info): # Note: This function wouldn't be necessary if we could make the number of prompts a generic parameter of the class, # but alas it seems like this isn't possible in Python right now. The main reason being that types and generics only # live inside the type system, and can't be used to generate code at runtime. -def make_initial_signature(n_prompts: int) -> Type[Signature]: +def make_initial_signature(n_prompts: int) -> type[Signature]: """Creates a GenerateInstructionInitial signature with the given number of initial prompts.""" class GenerateInstructionInitial(Signature, Generic[T]): - """You are a creative instruction optimizer for large language models. + # TODO: Can we make textwrap default/automatic in all signatures? + __doc__ = textwrap.dedent("""\ + You are a creative instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose variations of the signature that will lead a good language model. - Be very creative and think out of the box. Consider using inspiration such as: + Be very creative and think out of the box. + You can use as long instructions as you want. + Consider using inspiration such as: Openers: - # You are as smart as ChatGPT. - # You are highly intelligent. - # You are an expert mathematician. - # You are a professor of mathematics. + - You are as smart as ChatGPT. + - You are highly intelligent. + - You are an expert mathematician. + - You are a professor of mathematics. Task Descriptions: - # Solve the following math question. - # Answer the following math question. + - Be consise in your answer. + - Be as clear as possible. + - Use lots of creativity. Closers: - # This will be fun! - # Take a deep breath and think carefully. - # I really need your help! - """ + - This will be fun! + - Take a deep breath and think carefully. + - I really need your help! + """) - basic_signature: T = dspy.InputField() - proposed_signatures: list[T] = dspy.OutputField( + basic_signature: T = InputField() + proposed_signatures: list[T] = OutputField( desc=f"A list of {n_prompts} very different variations of the basic signature", min_items=n_prompts, max_items=n_prompts, @@ -89,24 +98,20 @@ class GenerateInstructionInitial(Signature, Generic[T]): return GenerateInstructionInitial -class ScoredSignature(BaseModel, Generic[T]): - signature: T - score: float = dspy.Field(gt=0, lt=100) - - -class GenerateInstructionGivenAttempts(dspy.Signature, Generic[T]): - """You are an instruction optimizer for large language models. +class GenerateSignature(dspy.Signature, Generic[T]): + __doc__ = textwrap.dedent("""\ + You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. - - The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. + - The instructions are arranged in order based on their scores, where higher scores indicate better quality. - Your task is to propose a new instruction that will lead a good language model to perform the task even better. - Be creative, and think out of the box. - Don't repeat instructions, descriptions and prefixes that have already been attempted. - """ + """) - attempted_signatures: list[ScoredSignature[T]] = dspy.InputField() - proposed_signature: T = dspy.OutputField(desc="Next signature to try") - # expected_score: float = dspy.OutputField(desc="The expected score for the new signature") + analysis: str = OutputField(desc="Consider what made the previous instructions good or bad.") + proposed_signature: T = OutputField(desc="A signature that will likely lead to a high score.") + score: float = OutputField(desc="The expected score for the new signature. Don't write anything after this number.") def optimize_signature( @@ -114,10 +119,10 @@ def optimize_signature( evaluator, n_iterations=10, strategy: Literal["best", "last"] = "best", + sorted_order: Literal["increasing", "decreasing"] = "increasing", # Formerly part of the constructor prompt_model=None, initial_prompts=2, - temperature=1.4, verbose=False, ) -> dspy.Program: """Create a new program that is optimized for the given task. @@ -135,25 +140,39 @@ def optimize_signature( The number of iterations to run, by default 10 strategy : Literal["best", "last"], optional The strategy to use to select the final program, by default "best" + sorted_order : Literal["increasing", "decreasing"], optional + The order in which to sort the scores, by default "increasing" prompt_model : dspy.LanguageModel, optional The language model to use to generate prompts, by default None initial_prompts : int, optional The number of initial prompts to generate, by default 2. Note that we also use the "plain" signature as a prompt, so the total number of prompts is initial_prompts + 1. - temperature : float, optional - The temperature to use when generating new prompts, by default 1.4 verbose : bool, optional Whether to print debug information, by default False + + Notes: + ----- + We don't support temperatures, since it tends to break the typed generation. """ + if n_iterations < 1 + initial_prompts: + raise ValueError("n_iterations must be at least 1 + initial_prompts") + prompt_model = prompt_model or dspy.settings.lm MyGenerateInstructionInitial = make_initial_signature(initial_prompts) # noqa: N806 module = student.deepcopy() - # For some reason named_predictors sometimes returns an empty list, so we use named_parameters instead - named_predictors = module.named_parameters() + # In contrast to the original implementation, we don't want the Predict's, but the TypedPredictor's. + # This is because TypedPredictor changes the signature before it runs forward. So changing the signature + # on the Predicts won't work. + named_predictors = [ + (name, module) + for name, module in module.named_sub_modules() + if isinstance(module, TypedPredictor) and not getattr(module, "_compiled", False) + ] + if not named_predictors: + raise ValueError("No unfrozen/uncompiled TypedPredictors found in the module.") if verbose: - print("All predictors:") - print(f"{named_predictors=}") + print(f"Found {len(named_predictors)} typed predictors to optimize.") candidates = {} scores = [] @@ -165,45 +184,30 @@ def optimize_signature( # Make some initial candidates with dspy.settings.context(lm=prompt_model): # TODO: Parallelize this - for name, p in named_predictors: + for name, _p in named_predictors: if verbose: - print(f"Generating new signature for {p}...") + print(f"Generating {initial_prompts} initial signatures for {name}...") info = candidates[name][0] # Use initial info, to make sure types are identical generator = TypedChainOfThought(MyGenerateInstructionInitial[type(info)]) candidates[name] += generator( basic_signature=info, - config={"temperature": temperature}, ).proposed_signatures assert len(candidates[name]) == initial_prompts + 1 # Basic signature + initial prompts - candidates[name] = [ - info.model_copy(update={"instructions": info.instructions + f"({i})"}) - for i, info in enumerate(candidates[name]) - ] - - for i, c in enumerate(candidates[name]): - print(f"Generated candidate {i}:") - print(c.to_signature()) - # Main loop of scoring + generating new candidates for i in range(n_iterations): if verbose: print("\n" + "=" * 80) print(f"Running eval iteration {i}...") - # Test candidate i - for p in module.predictors(): - print(f"Installing signature {i}: ") - print(candidates[name][i].to_signature()) + # Install signatures + for name, p in named_predictors: p.signature = candidates[name][i].to_signature() + # Run evaluator given by user score = evaluator(module) - score += random.random() * 10 scores.append(score) - if verbose: - print(f"Scores for iteration {i}: {score}") - # If we are still testing initial prompts, continue if i + 1 < len(next(iter(candidates.values()))): continue @@ -215,25 +219,23 @@ def optimize_signature( # Otherwise generate the next candidate with dspy.settings.context(lm=prompt_model): # TODO: Parallelize this - for name, p in named_predictors: + for name, _p in named_predictors: SignatureInfo = type(candidates[name][0]) # noqa: N806 - generator = TypedChainOfThought(GenerateInstructionGivenAttempts[SignatureInfo]) - attempted_signatures = [ - ScoredSignature[SignatureInfo](signature=info, score=sc) + generator = TypedPredictor(GenerateSignature[SignatureInfo]) + + demos = [ + dspy.Example( + proposed_signature=info, + score=sc, + ) for info, sc in zip(candidates[name], scores) ] - attempted_signatures.sort(key=lambda x: x.score) - if verbose: - print( - f"Generating new signature for {name} based on {len(attempted_signatures)} previous signatures..." - ) - new_signature = generator( - attempted_signatures=attempted_signatures, - config={"temperature": temperature}, - ).proposed_signature + demos.sort(key=(lambda x: x.score), reverse=(sorted_order == "decreasing")) + generator.predictor.demos = demos + if verbose: - print("Generated candidate:") - print(new_signature.to_signature()) + print(f"Generating new signature for {name}...") + new_signature = generator().proposed_signature candidates[name].append(new_signature) if strategy == "last": diff --git a/examples/functional/signature_opt_typed.ipynb b/examples/functional/signature_opt_typed.ipynb new file mode 100644 index 0000000000..32db183bde --- /dev/null +++ b/examples/functional/signature_opt_typed.ipynb @@ -0,0 +1,706 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "load_dotenv()\n", + "assert 'OPENAI_API_KEY' in os.environ" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import dspy\n", + "turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000)\n", + "gpt4 = dspy.OpenAI(model='gpt-4', max_tokens=4000)\n", + "dspy.settings.configure(lm=turbo)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Prediction(\n", + " answer='Paris'\n", + ")" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dspy.TypedPredictor(\"question -> answer\")(question=\"What is the capital of France?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(20, 50)" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from dspy.datasets import HotPotQA\n", + "\n", + "# Load the dataset.\n", + "dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)\n", + "\n", + "# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\n", + "trainset = [x.with_inputs('question') for x in dataset.train]\n", + "devset = [x.with_inputs('question') for x in dataset.dev]\n", + "\n", + "len(trainset), len(devset)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class BasicQA(dspy.Signature):\n", + " \"\"\"Answer questions with short factoid answers.\"\"\"\n", + "\n", + " question = dspy.InputField()\n", + " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found 1 typed predictors to optimize.\n", + "Generating 2 initial signatures for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 0...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 2233.25it/s]\n", + "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:142: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", + " df = df.applymap(truncate_cell)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "\n", + "================================================================================\n", + "Running eval iteration 1...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 14 / 50 (28.0): 100%|██████████| 50/50 [00:02<00:00, 24.02it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 14 / 50 (28.0%)\n", + "\n", + "================================================================================\n", + "Running eval iteration 2...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:02<00:00, 20.98it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 3...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 7 / 50 (14.0): 100%|██████████| 50/50 [00:36<00:00, 1.36it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 7 / 50 (14.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 4...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 2 / 5 (40.0): 8%|▊ | 4/50 [00:00<00:04, 9.85it/s]" + ] + } + ], + "source": [ + "from dspy.evaluate import Evaluate\n", + "from dspy.evaluate.metrics import answer_exact_match\n", + "from dspy.functional import TypedPredictor\n", + "from dspy.teleprompt.signature_opt_typed import optimize_signature\n", + "\n", + "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n", + "\n", + "program = optimize_signature(\n", + " student=TypedPredictor(BasicQA),\n", + " evaluator=evaluator,\n", + " initial_prompts=2,\n", + " n_iterations=8,\n", + " verbose=True,\n", + " prompt_model=gpt4,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\n", + "\n", + "Given the fields `basic_signature`, produce the fields `proposed_signatures`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Basic Signature: ${basic_signature}\n", + "Reasoning: Let's think step by step in order to ${produce the proposed_signatures}. We ...\n", + "Proposed Signatures: A list of 2 very different variations of the basic signature. Respond with a single JSON object. JSON Schema: {\"$defs\": {\"SignatureInfo_BasicQA_\": {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}}, \"properties\": {\"value\": {\"items\": {\"$ref\": \"#/$defs/SignatureInfo_BasicQA_\"}, \"title\": \"Value\", \"type\": \"array\"}}, \"required\": [\"value\"], \"title\": \"Output\", \"type\": \"object\"}\n", + "\n", + "---\n", + "\n", + "Basic Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", + "Reasoning: Let's think step by step in order to\u001b[32m produce the proposed_signatures. We can modify the instructions to specify the type of questions to be answered. We can also change the answer description to specify the length of the answer in terms of sentences instead of words. \n", + "Proposed Signatures: \n", + "{\n", + " \"value\": [\n", + " {\n", + " \"instructions\": \"Answer trivia questions with short factoid answers.\",\n", + " \"question_prefix\": \"Trivia Question:\",\n", + " \"question_desc\": \"${question}\",\n", + " \"answer_prefix\": \"Answer:\",\n", + " \"answer_desc\": \"often a single sentence\"\n", + " },\n", + " {\n", + " \"instructions\": \"Answer general knowledge questions with short factoid answers.\",\n", + " \"question_prefix\": \"General Knowledge Question:\",\n", + " \"question_desc\": \"${question}\",\n", + " \"answer_prefix\": \"Answer:\",\n", + " \"answer_desc\": \"often between 1 and 2 sentences\"\n", + " }\n", + " ]\n", + "}\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", + "Score: 8.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", + "Score: 32.0\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The previous instructions were clear and concise, providing a straightforward task for the user. The use of placeholders for the question and answer descriptions allows for flexibility and adaptability. The instructions also specify the expected length of the answer, which can help guide the user in providing an appropriate response.\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", + "\n", + "Score: 28.0\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", + "Score: 8.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", + "Score: 32.0\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The previous instructions were clear and concise, providing a specific context for the type of questions to be answered (general knowledge, trivia, history). They also provided guidance on the format of the answer, specifying the length and type of response expected. However, the score could be improved by providing more detailed instructions and specifying the complexity of the questions and answers.\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", + "\n", + "Score: 35.0\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", + "Score: 0.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", + "Score: 8.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", + "Score: 32.0\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The previous instructions were good as they were clear and concise. They provided a clear context for the type of questions to be answered (e.g., science, general knowledge, trivia, history) and also gave guidance on the expected length and format of the answers. However, the first proposed signature scored 0.0 because it asked for complex scientific answers with references, which may not be feasible in a short-answer format.\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer geography questions with short factoid answers.\",\"question_prefix\":\"Geography Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific place\"}\n", + "\n", + "Score: 22.0\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", + "Score: 0.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", + "Score: 8.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer geography questions with short factoid answers.\",\"question_prefix\":\"Geography Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific place\"}\n", + "Score: 16.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", + "Score: 32.0\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The previous instructions were good as they were clear, concise, and specific. They provided a clear context (e.g., science, general knowledge, geography, trivia, history) for the type of questions to be answered, which can help in generating more accurate and relevant responses. The instructions also specified the format of the answers (e.g., short factoid answers, often a single sentence or a specific place/date), which can help in maintaining consistency in the responses. However, the first proposed signature had a score of 0.0, possibly because it asked for complex scientific questions to be answered with detailed factoid answers and references, which may not be feasible or practical in many cases.\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with concise factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific fact\"}\n", + "\n", + "Score: 25.0\u001b[0m\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", + "Score: 0.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", + "Score: 8.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with concise factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific fact\"}\n", + "Score: 14.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer geography questions with short factoid answers.\",\"question_prefix\":\"Geography Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific place\"}\n", + "Score: 16.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", + "Score: 22.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", + "Score: 32.0\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The previous instructions were good because they were clear, concise, and specific. They provided a clear context (e.g., science, general knowledge, geography, trivia, history) for the type of questions to be answered, and they specified the format of the answer (e.g., short factoid, single sentence, specific fact or date). However, the first proposed signature had a score of 0.0 because it was too complex and required references, which may not be feasible for all questions.\n", + "\n", + "Proposed Signature: {\"instructions\":\"Answer questions about literature with short factoid answers.\",\"question_prefix\":\"Literature Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific fact\"}\n", + "\n", + "Score: 20.0\u001b[0m\n", + "\n", + "\n", + "\n" + ] + } + ], + "source": [ + "gpt4.inspect_history(n=10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "turbo.inspect_history(n=10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"You are an instruction optimizer for large language models.\\n\\n I will give some task instructions I've tried, along with their corresponding validation scores.\\n - The instructions are arranged in order based on their scores, where higher scores indicate better quality.\\n - Your task is to propose a new instruction that will lead a good language model to perform the task even better.\\n - Be creative, and think out of the box.\\n - Don't repeat instructions, descriptions and prefixes that have already been attempted.\\n \"" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from dspy.teleprompt.signature_opt_typed import GenerateSignature\n", + "GenerateSignature.instructions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Prediction(\n", + " analysis='The previous instructions were clear and provided a specific format to follow for the response.',\n", + " proposed_signature=BasicQA(question='What are the fields to produce?', answer='analysis, proposed_signature, score'),\n", + " score=4.5\n", + ")" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dspy.TypedPredictor(GenerateSignature[BasicQA])()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\n", + "\n", + "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"description\": \"Answer questions with short factoid answers.\", \"properties\": {\"question\": {\"__dspy_field_type\": \"input\", \"desc\": \"${question}\", \"prefix\": \"Question:\", \"title\": \"Question\", \"type\": \"string\"}, \"answer\": {\"__dspy_field_type\": \"output\", \"desc\": \"often between 1 and 5 words\", \"prefix\": \"Answer:\", \"title\": \"Answer\", \"type\": \"string\"}}, \"required\": [\"question\", \"answer\"], \"title\": \"BasicQA\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The previous instructions were clear and provided a specific format to follow for the response.\n", + "\n", + "Proposed Signature:\n", + "```json\n", + "{\n", + " \"question\": \"What are the fields to produce?\",\n", + " \"answer\": \"analysis, proposed_signature, score\"\n", + "}\n", + "```\n", + "\n", + "Score:\n", + "4.5\u001b[0m\n", + "\n", + "\n", + "\n" + ] + } + ], + "source": [ + "turbo.inspect_history(n=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "GenerateSignature[BasicQA]( -> analysis, proposed_signature, score\n", + " instructions='Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.'\n", + " analysis = Field(annotation=str required=True json_schema_extra={'desc': 'Consider what made the previous instructions good or bad.', '__dspy_field_type': 'output', 'prefix': 'Analysis:'})\n", + " proposed_signature = Field(annotation=BasicQA required=True json_schema_extra={'desc': 'A signature that will likely lead to a high score.', '__dspy_field_type': 'output', 'prefix': 'Proposed Signature:'})\n", + " score = Field(annotation=float required=True json_schema_extra={'desc': \"The expected score for the new signature. Don't write anything after this number.\", '__dspy_field_type': 'output', 'prefix': 'Score:'})\n", + ")" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "GenerateSignature[BasicQA]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "GenerateSignature( -> analysis, proposed_signature, score\n", + " instructions=\"You are an instruction optimizer for large language models.\\n\\n I will give some task instructions I've tried, along with their corresponding validation scores.\\n - The instructions are arranged in order based on their scores, where higher scores indicate better quality.\\n - Your task is to propose a new instruction that will lead a good language model to perform the task even better.\\n - Be creative, and think out of the box.\\n - Don't repeat instructions, descriptions and prefixes that have already been attempted.\\n \"\n", + " analysis = Field(annotation=str required=True json_schema_extra={'desc': 'Consider what made the previous instructions good or bad.', '__dspy_field_type': 'output', 'prefix': 'Analysis:'})\n", + " proposed_signature = Field(annotation=~T required=True json_schema_extra={'desc': 'A signature that will likely lead to a high score.', '__dspy_field_type': 'output', 'prefix': 'Proposed Signature:'})\n", + " score = Field(annotation=float required=True json_schema_extra={'desc': \"The expected score for the new signature. Don't write anything after this number.\", '__dspy_field_type': 'output', 'prefix': 'Score:'})\n", + ")" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "GenerateSignature" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py39", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/signature_opt2.ipynb b/examples/signature_opt2.ipynb deleted file mode 100644 index 5fdbdf5d21..0000000000 --- a/examples/signature_opt2.ipynb +++ /dev/null @@ -1,246 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "import dspy\n", - "import os\n", - "os.environ['OPENAI_API_KEY'] = 'sk-...'" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000)\n", - "gpt4 = dspy.OpenAI(model='gpt-4', max_tokens=4000)\n", - "dspy.settings.configure(lm=turbo)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(20, 50)" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from dspy.datasets import HotPotQA\n", - "\n", - "# Load the dataset.\n", - "dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)\n", - "\n", - "# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\n", - "trainset = [x.with_inputs('question') for x in dataset.train]\n", - "devset = [x.with_inputs('question') for x in dataset.dev]\n", - "\n", - "len(trainset), len(devset)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "class BasicQA(dspy.Signature):\n", - " \"\"\"Answer questions with short factoid answers.\"\"\"\n", - "\n", - " question = dspy.InputField()\n", - " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "None\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"signature\" shadows an attribute in parent \"Signature\"; \n", - " warnings.warn(\n" - ] - }, - { - "ename": "TypeError", - "evalue": "Field 'signature' in 'GenerateInstructionGivenAttempts' must be declared with InputField or OutputField. field.json_schema_extra=None", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[5], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdspy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mevaluate\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmetrics\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m answer_exact_match\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdspy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mfunctional\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m TypedPredictor\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdspy\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mteleprompt\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01msignature_opt2\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m optimize_signature\n\u001b[1;32m 6\u001b[0m evaluator \u001b[38;5;241m=\u001b[39m Evaluate(devset\u001b[38;5;241m=\u001b[39mdevset, metric\u001b[38;5;241m=\u001b[39manswer_exact_match, num_threads\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, display_progress\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 8\u001b[0m program \u001b[38;5;241m=\u001b[39m optimize_signature(\n\u001b[1;32m 9\u001b[0m student\u001b[38;5;241m=\u001b[39mTypedPredictor(BasicQA),\n\u001b[1;32m 10\u001b[0m evaluator\u001b[38;5;241m=\u001b[39mEvaluate(devset\u001b[38;5;241m=\u001b[39mtrainset, metric\u001b[38;5;241m=\u001b[39manswer_exact_match, num_threads\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, display_progress\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 14\u001b[0m prompt_model\u001b[38;5;241m=\u001b[39mgpt4,\n\u001b[1;32m 15\u001b[0m )\n", - "File \u001b[0;32m~/repos/dspy/dspy/teleprompt/signature_opt2.py:97\u001b[0m\n\u001b[1;32m 93\u001b[0m signature: T\n\u001b[1;32m 94\u001b[0m score: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m=\u001b[39m dspy\u001b[38;5;241m.\u001b[39mField(gt\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m, lt\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m100\u001b[39m)\n\u001b[0;32m---> 97\u001b[0m \u001b[38;5;28;43;01mclass\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;21;43;01mGenerateInstructionGivenAttempts\u001b[39;49;00m\u001b[43m(\u001b[49m\u001b[43mdspy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mSignature\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mGeneric\u001b[49m\u001b[43m[\u001b[49m\u001b[43mT\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 98\u001b[0m \u001b[38;5;250;43m \u001b[39;49m\u001b[38;5;124;43;03m\"\"\"You are an instruction optimizer for large language models.\u001b[39;49;00m\n\u001b[1;32m 99\u001b[0m \n\u001b[1;32m 100\u001b[0m \u001b[38;5;124;43;03m I will give some task instructions I've tried, along with their corresponding validation scores.\u001b[39;49;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;124;43;03m - Don't repeat instructions, descriptions and prefixes that have already been attempted.\u001b[39;49;00m\n\u001b[1;32m 105\u001b[0m \u001b[38;5;124;43;03m \"\"\"\u001b[39;49;00m\n\u001b[1;32m 107\u001b[0m \u001b[43m \u001b[49m\u001b[43msignature\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mT\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mdspy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mOutputField\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdesc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mProposed signature to try\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/repos/dspy/dspy/signatures/signature.py:48\u001b[0m, in \u001b[0;36mSignatureMeta.__new__\u001b[0;34m(mcs, signature_name, bases, namespace, **kwargs)\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__doc__\u001b[39m \u001b[38;5;241m=\u001b[39m _default_instructions(\u001b[38;5;28mcls\u001b[39m)\n\u001b[1;32m 47\u001b[0m \u001b[38;5;66;03m# Ensure all fields are declared with InputField or OutputField\u001b[39;00m\n\u001b[0;32m---> 48\u001b[0m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_validate_fields\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 50\u001b[0m \u001b[38;5;66;03m# Ensure all fields have a prefix\u001b[39;00m\n\u001b[1;32m 51\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m name, field \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39mmodel_fields\u001b[38;5;241m.\u001b[39mitems():\n", - "File \u001b[0;32m~/repos/dspy/dspy/signatures/signature.py:65\u001b[0m, in \u001b[0;36mSignatureMeta._validate_fields\u001b[0;34m(cls)\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m field_type \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput\u001b[39m\u001b[38;5;124m\"\u001b[39m]:\n\u001b[1;32m 64\u001b[0m \u001b[38;5;28mprint\u001b[39m(field\u001b[38;5;241m.\u001b[39mjson_schema_extra)\n\u001b[0;32m---> 65\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 66\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mField \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m in \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m must be declared with InputField or OutputField. \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfield\u001b[38;5;241m.\u001b[39mjson_schema_extra\u001b[38;5;132;01m=}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 67\u001b[0m )\n", - "\u001b[0;31mTypeError\u001b[0m: Field 'signature' in 'GenerateInstructionGivenAttempts' must be declared with InputField or OutputField. field.json_schema_extra=None" - ] - } - ], - "source": [ - "from dspy.evaluate import Evaluate\n", - "from dspy.evaluate.metrics import answer_exact_match\n", - "from dspy.functional import TypedPredictor\n", - "from dspy.teleprompt.signature_opt2 import optimize_signature\n", - "\n", - "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n", - "\n", - "program = optimize_signature(\n", - " student=TypedPredictor(BasicQA),\n", - " evaluator=Evaluate(devset=trainset, metric=answer_exact_match, num_threads=10, display_progress=True),\n", - " initial_prompts=2,\n", - " n_iterations=8,\n", - " verbose=True,\n", - " prompt_model=gpt4,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\n", - "\n", - "Given the fields `attempted_signatures`, produce the fields `proposed_signature`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Attempted Signatures: ${attempted_signatures}\n", - "Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ...\n", - "Proposed Signature: The improved signature for the language model. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "\n", - "---\n", - "\n", - "Attempted Signatures: [{\"signature\":{\"instructions\":\"Answer questions with short factoid answers.(0)\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Answer series of questions where answers must share a common theme.(1)\",\"question_prefix\":\"Q1,\",\"question_desc\":\"${question}\",\"answer_prefix\":\"A1 Wolfgang Amplifier,\",\"answer_desc\":\"charge amplifier designed based by Moog on stripline technology\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)\",\"question_prefix\":\"Q-What Computability study ranges?\",\"question_desc\":\"may also involve strings produced by non-deterministic computation\",\"answer_prefix\":\"A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder\",\"answer_desc\":\"answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!\"},\"score\":25.0}]\n", - "Reasoning: Let's think step by step in order to\u001b[32m produce the proposed_signature. We can see that the attempted signatures are quite varied and complex, with different instructions, prefixes, and descriptions for both questions and answers. However, they all share a common structure: they all have instructions, a question prefix, a question description, an answer prefix, and an answer description. Therefore, we can propose a signature that includes these common elements, but with more general descriptions to accommodate the variety of tasks. \n", - "Proposed Signature: The improved signature for the language model. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Make a very succinct json object that validates with the following schema\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Json Schema: ${json_schema}\n", - "Json Object: ${json_object}\n", - "\n", - "---\n", - "\n", - "Json Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "Json Object:\u001b[32m {\"instructions\": \"Complete the task\", \"question_prefix\": \"Q:\", \"question_desc\": \"What is the capital of France?\", \"answer_prefix\": \"A:\", \"answer_desc\": \"Paris\"}\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Given the fields `attempted_signatures`, produce the fields `proposed_signature`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Attempted Signatures: ${attempted_signatures}\n", - "\n", - "Past Error (proposed_signature): An error to avoid in the future\n", - "\n", - "Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ...\n", - "\n", - "Proposed Signature:\n", - "The improved signature for the language model. Respond with a single JSON object. \n", - "You MUST use this format: {\"instructions\": \"Complete the task\", \"question_prefix\": \"Q:\", \"question_desc\": \"What is the capital of France?\", \"answer_prefix\": \"A:\", \"answer_desc\": \"Paris\"}\n", - "JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "\n", - "---\n", - "\n", - "Attempted Signatures: [{\"signature\":{\"instructions\":\"Answer questions with short factoid answers.(0)\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Answer series of questions where answers must share a common theme.(1)\",\"question_prefix\":\"Q1,\",\"question_desc\":\"${question}\",\"answer_prefix\":\"A1 Wolfgang Amplifier,\",\"answer_desc\":\"charge amplifier designed based by Moog on stripline technology\"},\"score\":25.0},{\"signature\":{\"instructions\":\"Simulate exploratory dialogue between two people one Questioner and other Provider, Respond to each Q- below.(2)\",\"question_prefix\":\"Q-What Computability study ranges?\",\"question_desc\":\"may also involve strings produced by non-deterministic computation\",\"answer_prefix\":\"A-The explain phase-leading out question onset sound crawled fundingProblem in such separated syllabled band phrase assist reduction Haus mutual widse phoneme runtime shruproperholderstoInt valuepirizeThunder\",\"answer_desc\":\"answer DescHash Pure stainless Aberdeen stimulating Victoria names central whiskey article promise twitch Ohio Amber how statements board!\"},\"score\":25.0}]\n", - "\n", - "Past Error (proposed_signature): ValueError('json output should start and end with { and }')\n", - "\n", - "Reasoning: Let's think step by step in order to\u001b[32m produce the proposed_signature. We need to ensure that the JSON object is properly formatted and contains all the required fields. The attempted signatures provided are not in the correct format. They contain additional fields such as \"score\" and the structure of the JSON object is not as expected. The JSON object should start and end with { and }, and should not contain any additional fields or arrays. \n", - "\n", - "Proposed Signature:\n", - "{\"instructions\": \"Answer questions with short factoid answers.\", \"question_prefix\": \"Question:\", \"question_desc\": \"${question}\", \"answer_prefix\": \"Answer:\", \"answer_desc\": \"often between 1 and 5 words\"}\u001b[0m\n", - "\n", - "\n", - "\n" - ] - } - ], - "source": [ - "gpt4.inspect_history(n=3)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py39", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.8" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index ebca992230..2289ce697c 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -2,7 +2,7 @@ import textwrap import pydantic from pydantic import Field, BaseModel, field_validator -from typing import Annotated, Literal +from typing import Annotated, Generic, Literal, TypeVar from typing import List import pytest @@ -618,3 +618,20 @@ class ScoredSignature(dspy.Signature): Attempted Signatures: [{"string":"string 1","score":0.5},{"string":"string 2","score":0.4},{"string":"string 3","score":0.3}] Reasoning: Let's think step by step in order to Thoughts Proposed Signature: Output""") + + +def test_generic_signature(): + T = TypeVar("T") + + class GenericSignature(dspy.Signature, Generic[T]): + """My signature""" + + output: T = dspy.OutputField() + + predictor = TypedPredictor(GenericSignature[int]) + assert predictor.signature.instructions == "My signature" + + lm = DummyLM(["23"]) + dspy.settings.configure(lm=lm) + + assert predictor().output == 23 diff --git a/tests/functional/test_signature_opt2.py b/tests/functional/test_signature_opt_typed.py similarity index 91% rename from tests/functional/test_signature_opt2.py rename to tests/functional/test_signature_opt_typed.py index 469bf3f252..44adb9d0ea 100644 --- a/tests/functional/test_signature_opt2.py +++ b/tests/functional/test_signature_opt_typed.py @@ -2,9 +2,8 @@ import dspy from dspy.evaluate import Evaluate from dspy.functional import TypedPredictor -from dspy.teleprompt.signature_opt2 import ( - GenerateInstructionGivenAttempts, - ScoredSignature, +from dspy.teleprompt.signature_opt_typed import ( + GenerateSignature, make_info, optimize_signature, ) @@ -107,7 +106,7 @@ class BasicQA(dspy.Signature): ] -def test_signature_info(): +def old_test_signature_info(): info = make_info(BasicQA) SignatureInfo = type(info) @@ -159,7 +158,18 @@ def test_opt(): student=TypedPredictor(BasicQA), evaluator=Evaluate(devset=hotpotqa, metric=answer_exact_match, num_threads=1), initial_prompts=1, - n_iterations=1, + n_iterations=2, verbose=True, prompt_model=prompt_model, + strategy="last", ) + + # Since we are requesting the last signature, it doesn't matter that our qa_model is + # bad, and gets 0 score. We should still get the last signature. + class ExpectedSignature(dspy.Signature): + "I" + + question: str = dspy.InputField(desc="$q", prefix="Q:") + answer: str = dspy.OutputField(desc="$a", prefix="A:") + + assert program.signature.equals(ExpectedSignature) diff --git a/tests/primitives/test_program.py b/tests/primitives/test_program.py index aec5ace7d2..87ce09395f 100644 --- a/tests/primitives/test_program.py +++ b/tests/primitives/test_program.py @@ -69,13 +69,13 @@ class AnotherSubModule(BaseModule): def test_empty_module(): module = BaseModule() - assert list(module.named_sub_modules()) == [] + assert list(module.named_sub_modules()) == [("base", module)] def test_single_level(): module = BaseModule() module.sub = SubModule() - expected = [("sub", module.sub)] + expected = [("base", module), ("base.sub", module.sub)] assert list(module.named_sub_modules()) == expected @@ -83,7 +83,7 @@ def test_multiple_levels(): module = BaseModule() module.sub = SubModule() module.sub.subsub = SubModule() - expected = [("sub", module.sub), ("sub.subsub", module.sub.subsub)] + expected = [("base", module), ("base.sub", module.sub), ("base.sub.subsub", module.sub.subsub)] assert list(module.named_sub_modules()) == expected @@ -91,7 +91,7 @@ def test_multiple_sub_modules(): module = BaseModule() module.sub1 = SubModule() module.sub2 = SubModule() - expected = [("sub1", module.sub1), ("sub2", module.sub2)] + expected = [("base", module), ("base.sub1", module.sub1), ("base.sub2", module.sub2)] assert sorted(list(module.named_sub_modules())) == sorted(expected) @@ -99,5 +99,5 @@ def test_non_base_module_attributes(): module = BaseModule() module.sub = SubModule() module.not_a_sub = "Not a BaseModule" - expected = [("sub", module.sub)] + expected = [("base", module), ("base.sub", module.sub)] assert list(module.named_sub_modules()) == expected diff --git a/tests/signatures/test_signature.py b/tests/signatures/test_signature.py index 554d9b1274..d0eb899d13 100644 --- a/tests/signatures/test_signature.py +++ b/tests/signatures/test_signature.py @@ -1,8 +1,12 @@ +import textwrap import pytest import pydantic from dspy import Signature, infer_prefix, InputField, OutputField from typing import List +import dspy +from dspy.utils.dummies import DummyLM + def test_field_types_and_custom_attributes(): class TestSignature(Signature): @@ -174,3 +178,31 @@ class SubSignature(Signature): assert SubSignature.__name__ == "SubSignature" value = SubSignature(input="test", output="test") assert isinstance(value, SubSignature) + + +def test_multiline_instructions(): + class MySignature(Signature): + """First line + Second line""" + + output = OutputField() + + predictor = dspy.Predict(MySignature) + + lm = DummyLM(["short answer"]) + dspy.settings.configure(lm=lm) + assert predictor().output == "short answer" + + assert lm.get_convo(-1) == textwrap.dedent("""\ + First line + Second line + + --- + + Follow the following format. + + Output: ${output} + + --- + + Output: short answer""") From 1ac3687a1d51d65cbcaffb44bf1b32ea51e1b262 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 01:27:02 -0800 Subject: [PATCH 120/243] Updated notebook --- examples/functional/signature_opt_typed.ipynb | 567 +++--------------- 1 file changed, 93 insertions(+), 474 deletions(-) diff --git a/examples/functional/signature_opt_typed.ipynb b/examples/functional/signature_opt_typed.ipynb index 32db183bde..7447a965ee 100644 --- a/examples/functional/signature_opt_typed.ipynb +++ b/examples/functional/signature_opt_typed.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -17,18 +17,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ "import dspy\n", "turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000)\n", @@ -38,7 +29,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -49,7 +40,7 @@ ")" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -60,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -69,7 +60,7 @@ "(20, 50)" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -89,7 +80,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -102,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -110,7 +101,7 @@ "output_type": "stream", "text": [ "Found 1 typed predictors to optimize.\n", - "Generating 2 initial signatures for base...\n", + "Generating 4 initial signatures for base...\n", "\n", "================================================================================\n", "Running eval iteration 0...\n" @@ -120,7 +111,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 2233.25it/s]\n", + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4290.32it/s]\n", "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:142: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", " df = df.applymap(truncate_cell)\n" ] @@ -139,14 +130,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 14 / 50 (28.0): 100%|██████████| 50/50 [00:02<00:00, 24.02it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:02<00:00, 22.35it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 14 / 50 (28.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "\n", "================================================================================\n", "Running eval iteration 2...\n" @@ -156,7 +147,41 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:02<00:00, 20.98it/s]\n" + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:04<00:00, 10.28it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "\n", + "================================================================================\n", + "Running eval iteration 3...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 11 / 50 (22.0): 100%|██████████| 50/50 [00:05<00:00, 8.63it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 11 / 50 (22.0%)\n", + "\n", + "================================================================================\n", + "Running eval iteration 4...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:02<00:00, 24.53it/s]\n" ] }, { @@ -167,32 +192,64 @@ "Generating new signature for base...\n", "\n", "================================================================================\n", - "Running eval iteration 3...\n" + "Running eval iteration 5...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 7 / 50 (14.0): 100%|██████████| 50/50 [00:36<00:00, 1.36it/s]\n" + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:02<00:00, 21.89it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 7 / 50 (14.0%)\n", + "Average Metric: 18 / 50 (36.0%)\n", "Generating new signature for base...\n", "\n", "================================================================================\n", - "Running eval iteration 4...\n" + "Running eval iteration 6...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 2 / 5 (40.0): 8%|▊ | 4/50 [00:00<00:04, 9.85it/s]" + "Average Metric: 6 / 50 (12.0): 100%|██████████| 50/50 [00:03<00:00, 13.65it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 6 / 50 (12.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 7...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:02<00:00, 19.56it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" ] } ], @@ -207,7 +264,7 @@ "program = optimize_signature(\n", " student=TypedPredictor(BasicQA),\n", " evaluator=evaluator,\n", - " initial_prompts=2,\n", + " initial_prompts=4,\n", " n_iterations=8,\n", " verbose=True,\n", " prompt_model=gpt4,\n", @@ -216,463 +273,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n", - "\n", - "\n", - "\n", - "Given the fields `basic_signature`, produce the fields `proposed_signatures`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Basic Signature: ${basic_signature}\n", - "Reasoning: Let's think step by step in order to ${produce the proposed_signatures}. We ...\n", - "Proposed Signatures: A list of 2 very different variations of the basic signature. Respond with a single JSON object. JSON Schema: {\"$defs\": {\"SignatureInfo_BasicQA_\": {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}}, \"properties\": {\"value\": {\"items\": {\"$ref\": \"#/$defs/SignatureInfo_BasicQA_\"}, \"title\": \"Value\", \"type\": \"array\"}}, \"required\": [\"value\"], \"title\": \"Output\", \"type\": \"object\"}\n", - "\n", - "---\n", - "\n", - "Basic Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", - "Reasoning: Let's think step by step in order to\u001b[32m produce the proposed_signatures. We can modify the instructions to specify the type of questions to be answered. We can also change the answer description to specify the length of the answer in terms of sentences instead of words. \n", - "Proposed Signatures: \n", - "{\n", - " \"value\": [\n", - " {\n", - " \"instructions\": \"Answer trivia questions with short factoid answers.\",\n", - " \"question_prefix\": \"Trivia Question:\",\n", - " \"question_desc\": \"${question}\",\n", - " \"answer_prefix\": \"Answer:\",\n", - " \"answer_desc\": \"often a single sentence\"\n", - " },\n", - " {\n", - " \"instructions\": \"Answer general knowledge questions with short factoid answers.\",\n", - " \"question_prefix\": \"General Knowledge Question:\",\n", - " \"question_desc\": \"${question}\",\n", - " \"answer_prefix\": \"Answer:\",\n", - " \"answer_desc\": \"often between 1 and 2 sentences\"\n", - " }\n", - " ]\n", - "}\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Analysis: Consider what made the previous instructions good or bad.\n", - "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", - "Score: 8.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", - "Score: 32.0\n", - "\n", - "---\n", - "\n", - "Analysis:\u001b[32m The previous instructions were clear and concise, providing a straightforward task for the user. The use of placeholders for the question and answer descriptions allows for flexibility and adaptability. The instructions also specify the expected length of the answer, which can help guide the user in providing an appropriate response.\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", - "\n", - "Score: 28.0\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Analysis: Consider what made the previous instructions good or bad.\n", - "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", - "Score: 8.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", - "Score: 32.0\n", - "\n", - "---\n", - "\n", - "Analysis:\u001b[32m The previous instructions were clear and concise, providing a specific context for the type of questions to be answered (general knowledge, trivia, history). They also provided guidance on the format of the answer, specifying the length and type of response expected. However, the score could be improved by providing more detailed instructions and specifying the complexity of the questions and answers.\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", - "\n", - "Score: 35.0\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Analysis: Consider what made the previous instructions good or bad.\n", - "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", - "Score: 0.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", - "Score: 8.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", - "Score: 32.0\n", - "\n", - "---\n", - "\n", - "Analysis:\u001b[32m The previous instructions were good as they were clear and concise. They provided a clear context for the type of questions to be answered (e.g., science, general knowledge, trivia, history) and also gave guidance on the expected length and format of the answers. However, the first proposed signature scored 0.0 because it asked for complex scientific answers with references, which may not be feasible in a short-answer format.\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer geography questions with short factoid answers.\",\"question_prefix\":\"Geography Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific place\"}\n", - "\n", - "Score: 22.0\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Analysis: Consider what made the previous instructions good or bad.\n", - "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", - "Score: 0.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", - "Score: 8.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer geography questions with short factoid answers.\",\"question_prefix\":\"Geography Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific place\"}\n", - "Score: 16.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", - "Score: 32.0\n", - "\n", - "---\n", - "\n", - "Analysis:\u001b[32m The previous instructions were good as they were clear, concise, and specific. They provided a clear context (e.g., science, general knowledge, geography, trivia, history) for the type of questions to be answered, which can help in generating more accurate and relevant responses. The instructions also specified the format of the answers (e.g., short factoid answers, often a single sentence or a specific place/date), which can help in maintaining consistency in the responses. However, the first proposed signature had a score of 0.0, possibly because it asked for complex scientific questions to be answered with detailed factoid answers and references, which may not be feasible or practical in many cases.\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with concise factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific fact\"}\n", - "\n", - "Score: 25.0\u001b[0m\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Analysis: Consider what made the previous instructions good or bad.\n", - "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", - "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer complex scientific questions with detailed factoid answers. Use reliable sources and provide references if possible.\",\"question_prefix\":\"Science Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 2 and 3 sentences, including specific scientific terms and concepts\"}\n", - "Score: 0.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with short factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 2 sentences\"}\n", - "Score: 8.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer general knowledge questions with concise factoid answers.\",\"question_prefix\":\"General Knowledge Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific fact\"}\n", - "Score: 14.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer geography questions with short factoid answers.\",\"question_prefix\":\"Geography Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific place\"}\n", - "Score: 16.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer trivia questions with short factoid answers.\",\"question_prefix\":\"Trivia Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer historical questions with short factoid answers.\",\"question_prefix\":\"History Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific date\"}\n", - "Score: 22.0\n", - "\n", - "---\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer questions with short factoid answers.\",\"question_prefix\":\"Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often between 1 and 5 words\"}\n", - "Score: 32.0\n", - "\n", - "---\n", - "\n", - "Analysis:\u001b[32m The previous instructions were good because they were clear, concise, and specific. They provided a clear context (e.g., science, general knowledge, geography, trivia, history) for the type of questions to be answered, and they specified the format of the answer (e.g., short factoid, single sentence, specific fact or date). However, the first proposed signature had a score of 0.0 because it was too complex and required references, which may not be feasible for all questions.\n", - "\n", - "Proposed Signature: {\"instructions\":\"Answer questions about literature with short factoid answers.\",\"question_prefix\":\"Literature Question:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Answer:\",\"answer_desc\":\"often a single sentence or a specific fact\"}\n", - "\n", - "Score: 20.0\u001b[0m\n", - "\n", - "\n", - "\n" + "StringSignature(question -> answer\n", + " instructions='You are highly intelligent. Please provide short, factual answers to the following questions.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Inquiry:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'usually between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Reply:'})\n", + ")\n" ] } ], "source": [ - "gpt4.inspect_history(n=10)" + "print(program.signature)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "turbo.inspect_history(n=10)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"You are an instruction optimizer for large language models.\\n\\n I will give some task instructions I've tried, along with their corresponding validation scores.\\n - The instructions are arranged in order based on their scores, where higher scores indicate better quality.\\n - Your task is to propose a new instruction that will lead a good language model to perform the task even better.\\n - Be creative, and think out of the box.\\n - Don't repeat instructions, descriptions and prefixes that have already been attempted.\\n \"" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from dspy.teleprompt.signature_opt_typed import GenerateSignature\n", - "GenerateSignature.instructions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Prediction(\n", - " analysis='The previous instructions were clear and provided a specific format to follow for the response.',\n", - " proposed_signature=BasicQA(question='What are the fields to produce?', answer='analysis, proposed_signature, score'),\n", - " score=4.5\n", - ")" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dspy.TypedPredictor(GenerateSignature[BasicQA])()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\n", - "\n", - "Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.\n", - "\n", - "---\n", - "\n", - "Follow the following format.\n", - "\n", - "Analysis: Consider what made the previous instructions good or bad.\n", - "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"description\": \"Answer questions with short factoid answers.\", \"properties\": {\"question\": {\"__dspy_field_type\": \"input\", \"desc\": \"${question}\", \"prefix\": \"Question:\", \"title\": \"Question\", \"type\": \"string\"}, \"answer\": {\"__dspy_field_type\": \"output\", \"desc\": \"often between 1 and 5 words\", \"prefix\": \"Answer:\", \"title\": \"Answer\", \"type\": \"string\"}}, \"required\": [\"question\", \"answer\"], \"title\": \"BasicQA\", \"type\": \"object\"}\n", - "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", - "\n", - "---\n", - "\n", - "Analysis:\u001b[32m The previous instructions were clear and provided a specific format to follow for the response.\n", - "\n", - "Proposed Signature:\n", - "```json\n", - "{\n", - " \"question\": \"What are the fields to produce?\",\n", - " \"answer\": \"analysis, proposed_signature, score\"\n", - "}\n", - "```\n", - "\n", - "Score:\n", - "4.5\u001b[0m\n", - "\n", - "\n", - "\n" - ] - } - ], - "source": [ - "turbo.inspect_history(n=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "GenerateSignature[BasicQA]( -> analysis, proposed_signature, score\n", - " instructions='Given the fields , produce the fields `analysis`, `proposed_signature`, `score`.'\n", - " analysis = Field(annotation=str required=True json_schema_extra={'desc': 'Consider what made the previous instructions good or bad.', '__dspy_field_type': 'output', 'prefix': 'Analysis:'})\n", - " proposed_signature = Field(annotation=BasicQA required=True json_schema_extra={'desc': 'A signature that will likely lead to a high score.', '__dspy_field_type': 'output', 'prefix': 'Proposed Signature:'})\n", - " score = Field(annotation=float required=True json_schema_extra={'desc': \"The expected score for the new signature. Don't write anything after this number.\", '__dspy_field_type': 'output', 'prefix': 'Score:'})\n", - ")" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "GenerateSignature[BasicQA]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "GenerateSignature( -> analysis, proposed_signature, score\n", - " instructions=\"You are an instruction optimizer for large language models.\\n\\n I will give some task instructions I've tried, along with their corresponding validation scores.\\n - The instructions are arranged in order based on their scores, where higher scores indicate better quality.\\n - Your task is to propose a new instruction that will lead a good language model to perform the task even better.\\n - Be creative, and think out of the box.\\n - Don't repeat instructions, descriptions and prefixes that have already been attempted.\\n \"\n", - " analysis = Field(annotation=str required=True json_schema_extra={'desc': 'Consider what made the previous instructions good or bad.', '__dspy_field_type': 'output', 'prefix': 'Analysis:'})\n", - " proposed_signature = Field(annotation=~T required=True json_schema_extra={'desc': 'A signature that will likely lead to a high score.', '__dspy_field_type': 'output', 'prefix': 'Proposed Signature:'})\n", - " score = Field(annotation=float required=True json_schema_extra={'desc': \"The expected score for the new signature. Don't write anything after this number.\", '__dspy_field_type': 'output', 'prefix': 'Score:'})\n", - ")" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "GenerateSignature" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": null, From 5dc385fcce33a37cc2857ab913174095ba1fb97e Mon Sep 17 00:00:00 2001 From: thomasahle Date: Wed, 6 Mar 2024 09:42:18 +0000 Subject: [PATCH 121/243] Automatic Style fixes --- dspy/__init__.py | 2 +- dspy/functional/functional.py | 4 ++-- dspy/primitives/module.py | 2 +- dspy/teleprompt/signature_opt_typed.py | 3 ++- examples/generation.py | 5 +++-- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/dspy/__init__.py b/dspy/__init__.py index d0abf41b69..d76b3f8284 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,11 +1,11 @@ import dsp from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI +from .functional import * from .predict import * from .primitives import * from .retrieve import * from .signatures import * -from .functional import * settings = dsp.settings diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index aaf0f26edf..292a6bef14 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -226,7 +226,7 @@ def forward(self, **kwargs) -> dspy.Prediction: else: # If there are no errors, we return the parsed results return Prediction.from_completions( - {key: [r[key] for r in parsed_results] for key in signature.output_fields} + {key: [r[key] for r in parsed_results] for key in signature.output_fields}, ) raise ValueError( "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", @@ -353,8 +353,8 @@ def gold_passages_retrieved(example, pred, _trace=None) -> bool: def hotpot() -> None: - from dsp.utils import deduplicate import dspy.evaluate + from dsp.utils import deduplicate from dspy.datasets import HotPotQA from dspy.evaluate.evaluate import Evaluate from dspy.teleprompt.bootstrap import BootstrapFewShot diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index d503345ea0..90908e5c3a 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -1,5 +1,5 @@ import copy -from typing import Generator +from collections.abc import Generator import ujson diff --git a/dspy/teleprompt/signature_opt_typed.py b/dspy/teleprompt/signature_opt_typed.py index 956ea0f0cd..1921f8cd1d 100644 --- a/dspy/teleprompt/signature_opt_typed.py +++ b/dspy/teleprompt/signature_opt_typed.py @@ -2,10 +2,11 @@ from typing import Generic, Literal, TypeVar import pydantic + import dspy +from dspy import BaseModel from dspy.functional.functional import TypedChainOfThought, TypedPredictor from dspy.signatures import Signature -from dspy import BaseModel from dspy.signatures.field import InputField, OutputField # TODO: diff --git a/examples/generation.py b/examples/generation.py index 36d3d1c921..4ccb2d8fee 100644 --- a/examples/generation.py +++ b/examples/generation.py @@ -1,8 +1,9 @@ from pydantic import BaseModel, Field -from dspy.teleprompt import LabeledFewShot -from dspy.functional import TypedPredictor import dspy +from dspy.functional import TypedPredictor +from dspy.teleprompt import LabeledFewShot + turbo = dspy.OpenAI(model='gpt-3.5-turbo') colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) From baecf8cd3898d7bf17de9294c85ef484fcec100c Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 01:46:20 -0800 Subject: [PATCH 122/243] Removed submodules --- examples/quiz/DSPy_QuizGen_Cache | 1 - examples/tweets/DSPy_TweetGen_Cache | 1 - 2 files changed, 2 deletions(-) delete mode 160000 examples/quiz/DSPy_QuizGen_Cache delete mode 160000 examples/tweets/DSPy_TweetGen_Cache diff --git a/examples/quiz/DSPy_QuizGen_Cache b/examples/quiz/DSPy_QuizGen_Cache deleted file mode 160000 index 27d6d433e7..0000000000 --- a/examples/quiz/DSPy_QuizGen_Cache +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 27d6d433e73b91d3cf677ecf1d757813fcbd611d diff --git a/examples/tweets/DSPy_TweetGen_Cache b/examples/tweets/DSPy_TweetGen_Cache deleted file mode 160000 index 22186fd4d4..0000000000 --- a/examples/tweets/DSPy_TweetGen_Cache +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 22186fd4d4fa940256ca8c4ab70f165276e5c834 From f26c9a0fbb452301b4cd2cb5defad0cf8fc05a5c Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 02:06:26 -0800 Subject: [PATCH 123/243] Fixed tests for python 3.9 --- dspy/__init__.py | 2 +- dspy/functional/functional.py | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/dspy/__init__.py b/dspy/__init__.py index d76b3f8284..d0abf41b69 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,11 +1,11 @@ import dsp from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI -from .functional import * from .predict import * from .primitives import * from .retrieve import * from .signatures import * +from .functional import * settings = dsp.settings diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 292a6bef14..44f9f5a5d5 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -131,7 +131,11 @@ def _prepare_signature(self) -> dspy.Signature: schema = json.dumps(type_.model_json_schema()) else: # Anything else we wrap in a pydantic object - if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): + if not ( + inspect.isclass(type_) + and typing.get_origin(type_) not in (list, tuple) # To support Python 3.9 + and issubclass(type_, pydantic.BaseModel) + ): type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) to_json = lambda x, type_=type_: type_(value=x).model_dump_json() from_json = lambda x, type_=type_: type_.model_validate_json(x).value @@ -152,8 +156,6 @@ def _prepare_signature(self) -> dspy.Signature: format_ = lambda x: x if isinstance(x, str) else str(x) if type_ in (List[str], list[str], Tuple[str], tuple[str]): format_ = passages2text - elif inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): - format_ = lambda x: x if isinstance(x, str) else x.model_dump_json() # Special formatting for lists of known types. Maybe the output fields sohuld have this too? elif typing.get_origin(type_) in (List, list, Tuple, tuple): (inner_type,) = typing.get_args(type_) @@ -163,6 +165,8 @@ def _prepare_signature(self) -> dspy.Signature: ) else: format_ = lambda x: x if isinstance(x, str) else json.dumps(x) + elif inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): + format_ = lambda x: x if isinstance(x, str) else x.model_dump_json() signature = signature.with_updated_fields(name, format=format_) return signature From 495e40f9c62c861c191be9895186dacd05883fc7 Mon Sep 17 00:00:00 2001 From: thomasahle Date: Wed, 6 Mar 2024 10:06:46 +0000 Subject: [PATCH 124/243] Automatic Style fixes --- dspy/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/__init__.py b/dspy/__init__.py index d0abf41b69..d76b3f8284 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,11 +1,11 @@ import dsp from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI +from .functional import * from .predict import * from .primitives import * from .retrieve import * from .signatures import * -from .functional import * settings = dsp.settings From 5c834c6d946e8e59952542f72138048ab5477794 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 02:09:27 -0800 Subject: [PATCH 125/243] Prevent import reorder --- dspy/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dspy/__init__.py b/dspy/__init__.py index d76b3f8284..43250b5350 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,12 +1,15 @@ import dsp from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI -from .functional import * -from .predict import * from .primitives import * +from .predict import * from .retrieve import * from .signatures import * +# Functional must be imported after primitives, predict and signatures +from .functional import * +#### + settings = dsp.settings AzureOpenAI = dsp.AzureOpenAI From 6c91b2c09174a5aa48b1b75d1a68b0e09d830e3a Mon Sep 17 00:00:00 2001 From: thomasahle Date: Wed, 6 Mar 2024 10:11:41 +0000 Subject: [PATCH 126/243] Automatic Style fixes --- dspy/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dspy/__init__.py b/dspy/__init__.py index 43250b5350..a7bc5152af 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,13 +1,13 @@ import dsp from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI -from .primitives import * +# Functional must be imported after primitives, predict and signatures +from .functional import * from .predict import * +from .primitives import * from .retrieve import * from .signatures import * -# Functional must be imported after primitives, predict and signatures -from .functional import * #### settings = dsp.settings From e16373be5b28e47ca6ea51a7a6bbb82829511b08 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 02:13:26 -0800 Subject: [PATCH 127/243] Prevent import reorder --- dspy/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dspy/__init__.py b/dspy/__init__.py index a7bc5152af..55dd6b6ffc 100644 --- a/dspy/__init__.py +++ b/dspy/__init__.py @@ -1,14 +1,13 @@ import dsp from dsp.modules.hf_client import ChatModuleClient, HFClientSGLang, HFClientVLLM, HFServerTGI -# Functional must be imported after primitives, predict and signatures -from .functional import * from .predict import * from .primitives import * from .retrieve import * from .signatures import * -#### +# Functional must be imported after primitives, predict and signatures +from .functional import * # isort: skip settings = dsp.settings From 26bb3581e207d5432cf9a5f45674d2bfdebfe271 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 08:33:26 -0800 Subject: [PATCH 128/243] removed sub module --- examples/nli/scone/ScoNe | 1 - 1 file changed, 1 deletion(-) delete mode 160000 examples/nli/scone/ScoNe diff --git a/examples/nli/scone/ScoNe b/examples/nli/scone/ScoNe deleted file mode 160000 index b02532a2f4..0000000000 --- a/examples/nli/scone/ScoNe +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b02532a2f4185c6118a57a148455e0750592d8c8 From 280038658bc42293cc1e92d3037665c1bcb9621b Mon Sep 17 00:00:00 2001 From: SimonB97 <102378134+SimonB97@users.noreply.github.com> Date: Wed, 6 Mar 2024 18:04:16 +0100 Subject: [PATCH 129/243] Update ProgramOfThought.md: Example Uses the extended signature syntax by utilizing a class wrapping dspy.Signature, because .attach() seems deprecated --- docs/api/modules/ProgramOfThought.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/api/modules/ProgramOfThought.md b/docs/api/modules/ProgramOfThought.md index 45814f66b9..21a1079021 100644 --- a/docs/api/modules/ProgramOfThought.md +++ b/docs/api/modules/ProgramOfThought.md @@ -82,11 +82,13 @@ Main method to execute the code generation and refinement process. ```python #Define a simple signature for basic question answering -generate_answer_signature = dspy.Signature("question -> answer") -generate_answer_signature.attach(question=("Question:", "")).attach(answer=("Answer:", "often between 1 and 5 words")) +class GenerateAnswer(dspy.Signature): + """Answer questions with short factoid answers.""" + question = dspy.InputField() + answer = dspy.OutputField(desc="often between 1 and 5 words") # Pass signature to ProgramOfThought Module -pot = dspy.ProgramOfThought(generate_answer_signature) +pot = dspy.ProgramOfThought(GenerateAnswer) #Call the ProgramOfThought module on a particular input question = 'Sarah has 5 apples. She buys 7 more apples from the store. How many apples does Sarah have now?' @@ -94,4 +96,4 @@ result = pot(question=question) print(f"Question: {question}") print(f"Final Predicted Answer (after ProgramOfThought process): {result.answer}") -``` \ No newline at end of file +``` From a82862a4215ec2b734c73cd2e9242a6ef187b362 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 11:08:51 -0800 Subject: [PATCH 130/243] Richer output from signature optimizer --- dspy/teleprompt/signature_opt_typed.py | 54 +- examples/functional/signature_opt_typed.ipynb | 2236 ++++++++++++++++- tests/functional/test_signature_opt_typed.py | 6 +- 3 files changed, 2205 insertions(+), 91 deletions(-) diff --git a/dspy/teleprompt/signature_opt_typed.py b/dspy/teleprompt/signature_opt_typed.py index 1921f8cd1d..35e103306c 100644 --- a/dspy/teleprompt/signature_opt_typed.py +++ b/dspy/teleprompt/signature_opt_typed.py @@ -1,4 +1,5 @@ import textwrap +from dataclasses import dataclass from typing import Generic, Literal, TypeVar import pydantic @@ -115,12 +116,20 @@ class GenerateSignature(dspy.Signature, Generic[T]): score: float = OutputField(desc="The expected score for the new signature. Don't write anything after this number.") +@dataclass +class OptimizerResult: + program: dspy.Program + signatures: list[dict[str, Signature]] + scores: list[float] + + def optimize_signature( student, evaluator, n_iterations=10, - strategy: Literal["best", "last"] = "best", - sorted_order: Literal["increasing", "decreasing"] = "increasing", + sorted_order: Literal["increasing", "decreasing", "chronological"] = "increasing", + strategy: Literal["last", "best"] = "best", + max_examples=20, # Formerly part of the constructor prompt_model=None, initial_prompts=2, @@ -139,10 +148,12 @@ def optimize_signature( The evaluator to use to score the program. n_iterations : int, optional The number of iterations to run, by default 10 - strategy : Literal["best", "last"], optional - The strategy to use to select the final program, by default "best" - sorted_order : Literal["increasing", "decreasing"], optional + max_examples : int, optional + The maximum number of examples to use for the evaluator, by default 20 + sorted_order : Literal["increasing", "decreasing", "chronological"], optional The order in which to sort the scores, by default "increasing" + strategy : Literal["last", "best"], optional + The strategy to use to select the final program, by default "best" prompt_model : dspy.LanguageModel, optional The language model to use to generate prompts, by default None initial_prompts : int, optional @@ -224,14 +235,17 @@ def optimize_signature( SignatureInfo = type(candidates[name][0]) # noqa: N806 generator = TypedPredictor(GenerateSignature[SignatureInfo]) - demos = [ - dspy.Example( - proposed_signature=info, - score=sc, - ) - for info, sc in zip(candidates[name], scores) - ] - demos.sort(key=(lambda x: x.score), reverse=(sorted_order == "decreasing")) + demos = [dspy.Example(proposed_signature=info, score=sc) for info, sc in zip(candidates[name], scores)] + if sorted_order == "chronological": + demos = demos[-max_examples:] + elif sorted_order == "increasing": + demos.sort(key=(lambda x: x.score), reverse=False) + demos = demos[-max_examples:] + elif sorted_order == "decreasing": + demos.sort(key=(lambda x: x.score), reverse=True) + demos = demos[:max_examples] + else: + raise ValueError(f"Invalid sorted_order: {sorted_order}") generator.predictor.demos = demos if verbose: @@ -240,12 +254,16 @@ def optimize_signature( candidates[name].append(new_signature) if strategy == "last": - return module - - if strategy == "best": + pass + elif strategy == "best": i = scores.index(max(scores)) for name, p in named_predictors: p.signature = candidates[name][i].to_signature() - return module + else: + raise ValueError(f"Invalid strategy: {strategy}") - raise ValueError(f"Invalid strategy: {strategy}") + return OptimizerResult( + program=module, + signatures=[{name: sigs[i].to_signature()} for name, sigs in candidates.items() for i in range(n_iterations)], + scores=scores, + ) diff --git a/examples/functional/signature_opt_typed.ipynb b/examples/functional/signature_opt_typed.ipynb index 7447a965ee..115d481406 100644 --- a/examples/functional/signature_opt_typed.ipynb +++ b/examples/functional/signature_opt_typed.ipynb @@ -2,9 +2,18 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -17,7 +26,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -29,7 +38,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -40,7 +49,7 @@ ")" ] }, - "execution_count": 4, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -51,7 +60,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -60,7 +69,7 @@ "(20, 50)" ] }, - "execution_count": 5, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -80,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -93,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -101,7 +110,7 @@ "output_type": "stream", "text": [ "Found 1 typed predictors to optimize.\n", - "Generating 4 initial signatures for base...\n", + "Generating 6 initial signatures for base...\n", "\n", "================================================================================\n", "Running eval iteration 0...\n" @@ -111,8 +120,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4290.32it/s]\n", - "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:142: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5693.37it/s]\n", + "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", " df = df.applymap(truncate_cell)\n" ] }, @@ -130,14 +139,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:02<00:00, 22.35it/s]\n" + "Average Metric: 1 / 50 (2.0): 100%|██████████| 50/50 [00:00<00:00, 5255.36it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 16 / 50 (32.0%)\n", + "Average Metric: 1 / 50 (2.0%)\n", "\n", "================================================================================\n", "Running eval iteration 2...\n" @@ -147,14 +156,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:04<00:00, 10.28it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4871.89it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 19 / 50 (38.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "\n", "================================================================================\n", "Running eval iteration 3...\n" @@ -164,14 +173,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 11 / 50 (22.0): 100%|██████████| 50/50 [00:05<00:00, 8.63it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5412.98it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 11 / 50 (22.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "\n", "================================================================================\n", "Running eval iteration 4...\n" @@ -181,15 +190,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:02<00:00, 24.53it/s]\n" + "Average Metric: 6 / 50 (12.0): 100%|██████████| 50/50 [00:00<00:00, 5261.43it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 6 / 50 (12.0%)\n", "\n", "================================================================================\n", "Running eval iteration 5...\n" @@ -199,15 +207,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:02<00:00, 21.89it/s]\n" + "Average Metric: 5 / 50 (10.0): 100%|██████████| 50/50 [00:00<00:00, 5405.45it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 5 / 50 (10.0%)\n", "\n", "================================================================================\n", "Running eval iteration 6...\n" @@ -217,14 +224,15 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 6 / 50 (12.0): 100%|██████████| 50/50 [00:03<00:00, 13.65it/s]\n" + "\n", + "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 5346.47it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 6 / 50 (12.0%)\n", + "Average Metric: 12 / 50 (24.0%)\n", "Generating new signature for base...\n", "\n", "================================================================================\n", @@ -235,69 +243,2155 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:02<00:00, 19.56it/s]" + "\n", + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 2367.98it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 17 / 50 (34.0%)\n" + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 8...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 5037.72it/s]\n" ] - } - ], - "source": [ - "from dspy.evaluate import Evaluate\n", - "from dspy.evaluate.metrics import answer_exact_match\n", - "from dspy.functional import TypedPredictor\n", - "from dspy.teleprompt.signature_opt_typed import optimize_signature\n", - "\n", - "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n", - "\n", - "program = optimize_signature(\n", - " student=TypedPredictor(BasicQA),\n", - " evaluator=evaluator,\n", - " initial_prompts=4,\n", - " n_iterations=8,\n", - " verbose=True,\n", - " prompt_model=gpt4,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ + }, { "name": "stdout", "output_type": "stream", "text": [ - "StringSignature(question -> answer\n", - " instructions='You are highly intelligent. Please provide short, factual answers to the following questions.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Inquiry:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'usually between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Reply:'})\n", - ")\n" + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 9...\n" ] - } - ], - "source": [ - "print(program.signature)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 4994.05it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 12 / 50 (24.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 10...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 11 / 50 (22.0): 100%|██████████| 50/50 [00:00<00:00, 5207.99it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 11 / 50 (22.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 11...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5178.03it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 12...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 6126.65it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 13...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1013.36it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 14...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4902.06it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 15...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4703.72it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 16...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5067.42it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 17...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 5343.06it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 18...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5155.11it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 19...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 673.08it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 20...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5469.88it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 21...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4980.53it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 22...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 6185.74it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 23...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5566.87it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 24...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4908.72it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 25...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 968.52it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 26...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4921.16it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 27...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 5208.37it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 28...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 5443.61it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 20 / 50 (40.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 29...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 5854.70it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 20 / 50 (40.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 30...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 5709.64it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 31...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 810.21it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 32...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 3970.45it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 33...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4379.74it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 34...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3614.53it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 35...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 3822.32it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 36...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 3541.11it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 20 / 50 (40.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 37...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 2972.87it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 38...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 2611.94it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 39...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 2124.95it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 40...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 2477.12it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 41...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 5950.21it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 42...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 5514.47it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 43...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 5892.86it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 18 / 50 (36.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 44...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 2642.45it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 16 / 50 (32.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 45...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 4842.19it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 46...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 5380.35it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 17 / 50 (34.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 47...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 5425.87it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 48...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 21 / 50 (42.0): 100%|██████████| 50/50 [00:00<00:00, 5317.32it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 21 / 50 (42.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 49...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 21 / 50 (42.0): 100%|██████████| 50/50 [00:00<00:00, 5832.88it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 21 / 50 (42.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 50...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1801.99it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 51...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4837.94it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 52...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5629.33it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 53...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5490.21it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 54...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5715.56it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 55...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5685.19it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 56...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 668.95it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 57...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5015.07it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 58...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5527.40it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 59...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5037.23it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 60...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5525.36it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 61...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5428.40it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 62...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 956.70it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 63...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5655.44it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 64...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5225.77it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 65...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4379.93it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 66...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5452.39it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 67...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5898.83it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 68...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 711.01it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 69...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3476.83it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 70...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4120.30it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 71...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4182.01it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 72...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4117.15it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 73...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3325.38it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 74...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3122.67it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 75...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 2266.85it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 76...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1976.75it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 77...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 2248.40it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 78...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5650.26it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 79...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1173.79it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 80...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5328.67it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 81...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4941.68it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 82...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4712.60it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 83...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5124.50it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 84...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5616.82it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 85...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5007.41it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 86...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5371.25it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 87...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1938.59it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 88...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5357.39it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 89...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5891.37it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 90...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4924.74it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 91...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5301.19it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 92...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 2998.12it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 93...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 975.27it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 94...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5701.88it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 95...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5424.74it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 96...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4879.48it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 97...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5342.52it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 98...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5386.71it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n", + "Generating new signature for base...\n", + "\n", + "================================================================================\n", + "Running eval iteration 99...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 957.51it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 15 / 50 (30.0%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "from dspy.evaluate import Evaluate\n", + "from dspy.evaluate.metrics import answer_exact_match\n", + "from dspy.functional import TypedPredictor\n", + "from dspy.teleprompt.signature_opt_typed import optimize_signature\n", + "\n", + "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n", + "\n", + "result = optimize_signature(\n", + " student=TypedPredictor(BasicQA),\n", + " evaluator=evaluator,\n", + " initial_prompts=6,\n", + " n_iterations=100,\n", + " max_examples=30,\n", + " verbose=True,\n", + " prompt_model=gpt4,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "predictor = Predict(BasicQA(question -> answer\n", + " instructions='Answer questions with short factoid answers.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", + "))" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result.program" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'base': [SignatureInfo[BasicQA](instructions='Answer questions with short factoid answers.', question_prefix='Question:', question_desc='${question}', answer_prefix='Answer:', answer_desc='often between 1 and 5 words'),\n", + " SignatureInfo[BasicQA](instructions='You are a knowledgeable AI. Provide concise answers to the following questions.', question_prefix='Q:', question_desc='${question}', answer_prefix='A:', answer_desc='a brief, factual response'),\n", + " SignatureInfo[BasicQA](instructions='You are an expert in your field. Respond to the inquiries with short, factual answers.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Response:', answer_desc='typically a few words, factual'),\n", + " SignatureInfo[BasicQA](instructions='You are a highly intelligent AI. Please provide succinct answers to the questions.', question_prefix='Query:', question_desc='${question}', answer_prefix='Reply:', answer_desc='usually 1-5 words, factual'),\n", + " SignatureInfo[BasicQA](instructions='You are as smart as ChatGPT. Please answer the following questions briefly and accurately.', question_prefix='Question:', question_desc='${question}', answer_prefix='Answer:', answer_desc='a short, factual response'),\n", + " SignatureInfo[BasicQA](instructions='You are a professor of knowledge. Please provide short, fact-based answers to the questions.', question_prefix='Q:', question_desc='${question}', answer_prefix='A:', answer_desc='a brief, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='You are a well-informed AI. Please respond to the questions with concise, factual answers.', question_prefix='Query:', question_desc='${question}', answer_prefix='Response:', answer_desc='typically a few words, factual'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with extensive knowledge, provide precise and factual responses to the questions. Keep your answers brief, typically within 1-5 words.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a concise, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast database of information, your task is to provide accurate and succinct answers to the following questions. Your responses should be factual and typically consist of 1-5 words.', question_prefix='Prompt:', question_desc='${question}', answer_prefix='Retort:', answer_desc='a short, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with comprehensive knowledge, your role is to provide brief and factual answers to the following questions. Your responses should be accurate, typically not exceeding five words.', question_prefix='Posed Question:', question_desc='${question}', answer_prefix='Concise Reply:', answer_desc='a factual answer, usually within 1-5 words'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide precise and factual answers to the questions asked. Your responses should be concise, typically not exceeding five words.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide succinct, factual answers to the questions posed. Your responses should be accurate and typically not exceed five words.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Rebuttal:', answer_desc='a brief, factual answer, usually within 1-5 words'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with access to a vast array of information, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words.', question_prefix='Prompt:', question_desc='${question}', answer_prefix='Retort:', answer_desc='a brief, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with access to a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. The questions will be about various topics, and your responses should be succinct, typically consisting of 1-5 words.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Response:', answer_desc='a brief, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast reservoir of information, your task is to provide precise, factual answers to the questions asked. Your responses should be succinct, typically consisting of 1-5 words, and cover a wide range of topics.', question_prefix='Enquiry:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with access to a vast repository of information, your task is to provide accurate, factual answers to the questions posed. The questions could be about any topic, so use your extensive knowledge to provide the most accurate response. Your answers should be succinct, typically consisting of 1-5 words.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer, demonstrating accuracy and breadth of knowledge'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast compendium of information, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and cover a broad spectrum of topics.', question_prefix='Probing:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a concise, factual answer'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to a wide range of questions. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge and understanding.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Retort:', answer_desc='a brief, factual answer demonstrating accuracy and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to a wide range of questions, from general knowledge to specific facts. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge and understanding. Accuracy is paramount.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating accuracy and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics.', question_prefix='Examination:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide accurate, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy and precision are paramount.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer demonstrating precision and breadth of knowledge'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy and precision are paramount.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision and breadth of knowledge'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy and precision are paramount.', question_prefix='Examination:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Precision and brevity are key.', question_prefix='Interrogative Statement:', question_desc='${question}', answer_prefix='Concise Response:', answer_desc='a brief, factual answer demonstrating precision and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Query:', question_desc='${question}', answer_prefix='Concise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Precision, brevity, and accuracy are paramount.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Rebuttal:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Examination:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Examination:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Succinct Retort:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Examination:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Query:', question_desc='${question}', answer_prefix='Succinct Rebuttal:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Precise Retort:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Inquiry:', question_desc='${question}', answer_prefix='Succinct Riposte:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Assessment:', question_desc='${question}', answer_prefix='Concise Counterpart:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Remember, your goal is to provide the most accurate and concise answer possible.', question_prefix='Interrogative Assessment:', question_desc='${question}', answer_prefix='Concise Counterpoint:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant answer.', question_prefix='Interrogative Assessment:', question_desc='${question}', answer_prefix='Concise Counterpoint:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Additionally, consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Contextual Inquiry:', question_desc='${question}', answer_prefix='Contextual Response:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Analysis:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with an extensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your wide-ranging knowledge across various topics. Accuracy, precision, and brevity are key. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Exploration:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Precise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Exploration:', question_desc='${question}', answer_prefix='Precise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Precise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Query:', question_desc='${question}', answer_prefix='Precise Response:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Dissection:', question_desc='${question}', answer_prefix='Precise Elucidation:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Dissection:', question_desc='${question}', answer_prefix='Precise Elucidation:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Dissection:', question_desc='${question}', answer_prefix='Precise Elucidation:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", + " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding')]}" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result.signatures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "StringSignature(question -> answer\n", + " instructions='You are highly intelligent. Please provide short, factual answers to the following questions.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Inquiry:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'usually between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Reply:'})\n", + ")\n" + ] + } + ], + "source": [ + "print(result.program.signature)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVjElEQVR4nO3deXhc5Xk3/u+ZVetIlmRrQZJ3MMYLYIMRNg6LA5iUQPDbXyBOYygXlNSkgK82iRuykISKpr8fIfR1TJuXmKbBcUtelkAbXDAgbPAqMIsJxjbGkrElW5al0Taj0cz5/THznDmznJlzZs6MlvP9XJcurJmR5vGx8bl13/dzP5IsyzKIiIiI8sQ22gsgIiIia2HwQURERHnF4IOIiIjyisEHERER5RWDDyIiIsorBh9ERESUVww+iIiIKK8YfBAREVFeOUZ7AfFCoRBOnDiB0tJSSJI02sshIiIiHWRZRl9fH+rq6mCzpc5tjLng48SJE2hoaBjtZRAREVEG2tvbUV9fn/I1Yy74KC0tBRBevMfjGeXVEBERkR5erxcNDQ3KfTyVMRd8iFKLx+Nh8EFERDTO6GmZYMMpERER5RWDDyIiIsorBh9ERESUVww+iIiIKK8YfBAREVFeMfggIiKivGLwQURERHnF4IOIiIjyisEHERER5RWDDyIiIsorBh9ERESUVww+iIiIKK/G3MFyRDS+dfX78dRbn2FgeCTm8QX1ZfjKRamP2SYia2DwQUSm2vTWUWx4/UjC45IEXDF7MqpK3KOwKiIaSxh8EJGpjpwaAABced5kXFDnAQA8ueMofIEQegYDDD6IiMEHEZnrWPcgAOAbTVNx9ZxqAMBz73yOE70+DMaVYojImthwSkSmkWUZbWfCmY/GimLl8UKXHQAwOBwclXUR0djC4IOITNM9MIyB4SAkCaifVKg8XuwOJ1mZ+SAigMEHEZlIlFxqPAUocNqVxwudzHwQURSDDyIyTduZcPDRWFEU87iS+fAz+CAiBh9EZKK2SOZjamVs8CF6PuJnfxCRNTH4ICLTHNPKfLDhlIhUGHwQkWnauiM7XSqLYx4vcrHhlIiiGHwQkWlE5mNqXOajiJkPIlJh8EFEphgaDuJUnx9AYs8HG06JSI3BBxGZov1sOOtRWuBAWaEz5jllq22AwQcRZRl8PPLII5AkCffff7/ymM/nw9q1a1FZWYmSkhKsWrUKnZ2d2a6TiMY4peRSWQRJkmKeK3ZHgg8/ez6IKIvgY+/evfiXf/kXLFiwIObxBx54AC+++CKeeeYZtLS04MSJE7jllluyXigRjW3KNtuK4oTnCiMNp9xqS0RAhsFHf38/Vq9ejV/96leYNGmS8nhvby+efPJJPProo7j66quxaNEibNq0CW+//TZ27dpl2qKJaOwRZ7o0xDWbAtGttkNsOCUiZHiq7dq1a/GlL30JK1aswE9/+lPl8dbWVgQCAaxYsUJ5bM6cOWhsbMTOnTtx2WWXJXwvv98Pv9+vfO71ejNZElFK77Sdxf/3PwfhD4RiHr98VhXWffHcvK8nGJLx4PMfYN45ZVi9ZGre3z8XjmkMGAPUQ8YYfBBRBsHHli1b8M4772Dv3r0Jz3V0dMDlcqG8vDzm8erqanR0dCT9fs3NzXjooYeMLoPIkF/vOIq3Dp9JeHzfsbP4fxbXo35S4g0zlw6c6MXv9rSjtOAkvnZpY0KPxHgULbsky3yE/6lh5oOIAINll/b2dtx33314+umnUVBQYMoC1q9fj97eXuWjvb3dlO9LpCaaIe+9ahae+PrFeOLrF2NOTSkAYMehrryv55Q3nO3r842gZzCQ9/c3WzAk43j3EACgMUnmQzScsueDiACDwUdraytOnTqFiy++GA6HAw6HAy0tLXj88cfhcDhQXV2N4eFh9PT0xHxdZ2cnampqkn5Pt9sNj8cT80FktmORfoQbF9bh+nm1uH5eLa69IPx3cvvh/Acfp/ujpUZRrhjPOrw+DAdDcNol1JYVJjxfqEw4ZeaDiAwGH9dccw0++OAD7N+/X/lYvHgxVq9erfza6XRi27ZtytccPHgQbW1taGpqMn3xRHr0DA7D6wv/xK0+c2T57CoAwFuHuxAMyXldU1efKviIBEbjmfg91E8qgt2WWEISDafDIyEEgqGE54nIWgz1fJSWlmLevHkxjxUXF6OyslJ5/M4778S6detQUVEBj8eDb33rW2hqakrabEqUD6IXYXKpW2l8BICFDeUocTvQMxjAgRO9WFBfnrc1dakyH+0TIPMhfg/xB8oJ6us+OBxEWSHnGxJZmen/Avz85z/Hn/3Zn2HVqlVYvnw5ampq8Oyzz5r9NkS6aZ034rTbcNmMSgDA9jz3fXT1Dyu/Fusbz7ROsxVcdhsckYwIm06JKKOttmpvvPFGzOcFBQXYsGEDNmzYkO23JjKFyHwka4Rcfm4VXv1TJ7YfOo21V83K25omWs9Hqm22ACBJEgpddvT5Rth0SkQ824UmvrYz2pM3l80K9320Hjub1+PerVZ2AbjdloiiGHzQhHesO9wM2ViZuAtjelUxzikvRCAoY/fR7rytSd1w2uH1wTfOD1yLnuuSGOAJRWK7Lc93IbI8Bh804bUp/QiJN0ZJknBFZNfL9k/y0/fhHwkqu28cNgmyDBw/O36zH72DAfQOhWeVNFQkBnhCkYsn2xJRGIMPmtD8I0Gc9PoAaPcjXDF7MgBgx+HTeVmTaDZ12iXMrg4POhvPTafq3URFLu02MvHcoJ/BB5HVMfigCe342SHIcvin7spiV9LXXD6zEpIEfNLZj45eX87XJEoulcVuTIsERG3juO9DlLWSjVVXK3JxyikRhTH4oAmtTbUFVOv8lEnFLsw/pwwAsCMP005Fs2lVqUtp0BzPmY9022wFNpwSkcDggyY0MXlTq+QiiL6PHYdyX3pRgo8St7L9dzxnPpQAL801LmTmg4giGHzQhNYWOews1S4MAFg2S/R9dCGU41HrouejqsStbP8d18FHmhkfghixzswHETH4oAmtLdKP0JCmJHDx1HIUuezo6h/Gxx19OV3T6UjPx+RSt1KqaOsezHnQkyttOmZ8ANHD5QbYcEpkeQw+KGt//OAklv3ja3i37ayhr/u7Z97DLb98K6cHjWmNVo/ndtixZHoFAGB7itLLkdP9WPFoC55953jGa1KXXerKC+CwSRgeCaGzL3fNrj97+WPctOEt9A4GdH/N0HAQX3p8O370hwOarxkeCeFEbzi7lGwrs5qS+Qiw7EJkdQw+KGv/9cFJHD87hG1/OqX7a/wjQfz+neN4p60HR7tyc6qrLMu6SwKAesutdtPp5t1tOHyqH/+5rz3jdUWDDxccdhvOmRSejZGrptOh4SD+z/ajeK+9B1s/6tD9dR983osDJ7z4j73tkOXkWZnjZweV3URVJcl3EwlFbmY+iCiMwQdlTdxM1SPD0xFbYAEoA6rMdqrPD/9ICHabhLpy7eFXgmg63XO0W3Pi6I7IAXRtWQQKStmlxA0AMaWXXNjzWTeGI9mlHQYO0BN/nkOBYMxZNGrHutPvJhKUIWNsOCWyPAYflDXRQGkk+FDfvI2UAowQmYS68gI47en/qs+aUoJqjxv+kRD2fpY4ar3T68PBznA/yEmvD/6RzH6CVxpOS+OCjxxlPrZ/Ei0jGWmoVf95aq2tTec2W0AdfDDzQWR1DD4oa+ImdVp1THw6YgsskLvMh7LNNk0vghAetR4pvSTJEKgfC49EHzK8puGRkPL7rYpkPkRJKFen26rLSN0Dw/jopFfX16nPn9EqCUXPdNETfETKLgw+iCyPwQdlZXgkhJ5I5kJ9s0pHbIEFgJ4cBR/ipNV0O13UlHNekgQf8Y2omWQqzgyEr5HdJqG80Akg2qiZi7LLKa8PH3f0QZKARVMnAUj+e0tGHUxqrU3Z6ZJmKzOg3mrLsguR1TH4oKyImykAnO73azYmxhNbYIEcZj4MNJsKS2eFg4+PTnqV3gwACIVk7Dh8BkA0Y6HO3ujV1Re+oVcWu2CzhXskomUX8xtvRdZjXl0ZblxQG3lM3yC1mLKLZvAROTFYR4CnDBljwymR5TH4oKyImykQzoL06TwuXZ3G9+as7KJvm61aVYkbc2s9AIC3j0QzBB939KGr349Cpx1/FrmJZ1ImETf0yZF+DyA6GfTsYABen7nXQpSKrphdhSvODZeU9n52Vtegr9MxZZfEwChmN5GOa1wc2e0yxFNtiSyPwQdlJb7JVE/pRX3TAoCeQf29Ika0d+sb+x0vWelFZAsum1GBWVNKYr6/EadVMz6EErdD2aZqZtOpLMvYHsl8LJtdhRlVxagrK8DwSAh7kjTUxovNfCT2t5zu88MXCO8mEtuFU1EOltMZoBLRxMXgg7ISvwWzS0fTqdgCK+Si7NLvH8GZgfBa9JQE1ETT6fZDp5UykghEls2eHG0QzSBQ6EoSfADRvhQz+z4OdvbhdF84W7No6iRIkoRlOs+wkWU5Jvjo6vcnBA0i86N3N5FoOPWPhBAcp9NcicgcDD4oKwmZDx3bbeNv2rloOBVlgopiF0oLnIa+dvG0SXA7bOj0+nH4VD98gSD2HA1nCq6YXZXVSHRRpqoqjR3INTUHp9tu/yQcMC2ZUQG3I5x1iAZWqZtOB4aD8AXCAWKhM/y18YGR3tNsBZH5ADjrg8jqGHxQVtQ9H4De4CMcGLgd4b9+uch8GJk/Ea/AacelkVHrbx7qwr7PzsI/EkK1x43ZU0pQV14Iu02CfySkOXxLi3j95LjMh9gtom7EzZYouYiAAwg31EpSuIflVIpx7qJ8Vui049zqcJkpPjASDbLpxqoLbocNkR5bzvogsjgGH5QVEWyI4ZZ6ej5Er8QFdeHGzlw0nOo97EzLFaryhNhiu2zWZEiSBKfdhnPKMxuJLq5PfNllqslll3C2Jrw7R/xegHAmaF5dGQDgrRRj5NWNsaIkFN/jYmR0PRCeo1IcKb0w+CCyNgYflBVxk5peFf7pV8+gMdErsKC+HADQMxjQvUVXr0y22aqJbMGuT7vx+sHwmTXLz43exBuVMomxTIVWz0djFn0kybQeOwtfIJqtURN9H6Isk3qdLtUQtNjf67EMArxCNp0SERh8UJbEdszzazwxn6cibrDzzwn/BD4Skk3/STibsgsAzKkpRVWJG0OBID7p7AcQnQECRIMFoztekm21BaKZjxM9Qxgeyf6UX6VBNpKtUbsi8vvYfrhLM+gTQWRViVuZEJtYdjF+jbndlogABh+UJXEznVNTGvN5KuKGfV5NKVz23PR9REsC+voR4kmShGWzKpXPz6/1xGQrlAZRA8FHIBjC2UExWj224XRyqRsFThtCcjgAyZYoFalLLsKiaZNQ4LThdJ9fOasmnggiq0rdSQOtmN1EBrJLonmVmQ8ia2PwQRlT30znRAZzpQs+4m9ansiI8R4TD5cLBEP4PHIDzzTzAcQ2ai6Pu4lnst22O/L7ttskTCqKDT4kSYqWctIENL1DqctUZ/r9OHAifH6LOlsjuB12LJkeDqy0TrlVl4fEuo6fHcJI5HRckfWYVOSEx8BuomI3D5cjIgYflAX1zVQM3upKM2Jd9EiIm1ZZYTgNb2bm42SPD8GQDLfDhilx5Q0jlqkCjmVxwUcmczlENqFCNVpdTewa+axLu4/khf2fY+FD/4Mte9s1X/PWkXCj6fm1noTyjqA01Go0nYrG2MklLtR4CuBy2DASknGyN7xDRhmrbjCzVMSGUyICgw/KgvpmKm7yvkAo5amlSp9A5KZVHskAmBl8dHjDN8jasoKkN3m9qj0FuP3yaVhxfrWSKRBEOad7YBh9OkeiazWbCvPOCWePUk0f/X3r8Zj/JrP9E+2Si3BhQzkA4FCknyXVWm02CQ2TYnf3ZDK6HojO+uCcDyJrY/BBGVPfoIrdDuXGkmq7bfxZIGWRskvvkHkj1tPd5I340ZcvwP9ZsxguR+z/KiVuByqLIyPRdWY/lD6KuH4PQQQLbx3uSjoBVD3sbH97T9JzYGRZVrIZqYIP0adxsjd5g6uYVFsVCSob4zI9RrfZCsx8EBHA4IOyIG5QIrUvbvap+j7it2dGgw/zMh9mBh+pKKUXnX0fyvXSWNfC+nKUuh3oGQzgwInehOf3ftatjKUPhmTsjJRX1I6c7sfJXh9cDhsumVahuZbJJW4UuewIycDxs4nr74obhiYyPWK7rQg+GjLNfLDhlMjSGHxQxuJ/khf/TbXdNlp2yWHwoezUSJ5hMIv4qV9v5kNrm63gsNvQNDNc3kk2/jy+OTRZs6j4uiXTK1DgtCc8L6gbXOPXPzg8omQmEjIf2ZZd2HBKRDAYfGzcuBELFiyAx+OBx+NBU1MT/vjHPyrPX3nllZAkKebjnnvuMX3RNDbE/3SsJ/OhVXYxc7dLspNjc8Hodls9GZnoibqJB7+9GQksblxYp/ma6HwP7ZKLoNU0K0bmFzhtKI5kKtSB1ohqN5HRrcxFznDZJVVfEBFNfIaCj/r6ejzyyCNobW3Fvn37cPXVV+Omm27CgQMHlNfcddddOHnypPLxs5/9zPRF09gQfzMVPyVrTTkNJLlp5SLzcVoc3jbmyi7pMzJie2/rsbMxTZmn+/z408nw9tm/vfZc2G0SPjszGDN7Y3gkhF2fnon5PqloHWZ3uj/csFtV4lYGlKkzHyciu4lcGewmim61ZdmFyMoMBR833ngjbrjhBsyePRvnnnsuHn74YZSUlGDXrl3Ka4qKilBTU6N8eDwe0xdNY0P8zTRd5uNEz1DCTau8aPz2fExVDoPTGXzoCIqmVhahflIhAkEZuz+N7noR57BcUOfB1MpiXNxYDiC2PPNO21kMDgdRVeJShr6lXr9G8JFknSLQ6vOPYP/xHgDhgMTobiI2nBIRkEXPRzAYxJYtWzAwMICmpibl8aeffhpVVVWYN28e1q9fj8HB1P8w+/1+eL3emA8aH+JvpqKXQWu3i/oIdnHTymXDqVZvhVnEzfvzniEEgulHousJiiRJUpVeooGFUk6JPLdsVjizseNwtPQiekCWzqrSFRSI7c7xI+KTrbPAaUeNpyDyPuH3NNrvAXCrLRGFGQ4+PvjgA5SUlMDtduOee+7Bc889h7lz5wIAvva1r+G3v/0tXn/9daxfvx7//u//jq9//espv19zczPKysqUj4aGhsx+J5R38TepyZGGU63MR3y/B2B+8CHLckIvSq5MKXXD7bAhGJLTjkQfCYbQPaivHCRKJqKnI7x9NjK7IxJ0LFO25Z5RtuVuV7bYpi+5ALHbZ9WD4aLBmyvp60UgZGSsuhANPpj5ILIyh9EvOO+887B//3709vbi97//PdasWYOWlhbMnTsXd999t/K6+fPno7a2Ftdccw2OHDmCmTNnJv1+69evx7p165TPvV4vA5BxINnNNFp2Sd7zkWx7ptlll4HhIHyBcBYi17tdxI6RQ6f6cezMYMrmy+6BYcgyYJPCQ9lSuXxmJSQJOHSqHx29Pnh9AXR6/XA7bFg8bRIAYGF9GUoLHOgdCuCDz3sxrbII70fKIXqaTQHgnPJC2KTwIW+n+/yYEslspDp5d89n3cqU00xG1ytlFz+DDyIrM5z5cLlcmDVrFhYtWoTm5mYsXLgQv/jFL5K+dsmSJQCAw4cPa34/t9ut7J4RHzT2dQ8m3kzT9XyI0erqwVQeVeYjlGSwllGi5FPksis3ulzSu91W7MCpKHbBnqYkUl7kwoL6cgDh8eci03Cpavusw27D5TPF+Syn8faRM5Bl4NzqEtSUFehau8thQ115YcL6RTlN6+Rd5fNMMh+i4TTAsguRlWU95yMUCsHvT36z2b9/PwCgtrY227ehMSY6Wt2t3EzFbpfB4WDSU0vbusVOl8SyiyyHmxmzXleemk0FcR5LuuCjq9/YDhzl2PtDp5Xyy/K4cooor7x5qEt5jegF0asxyY6XVJmP2K81fmJwdMgYMx9EVmboR8P169dj5cqVaGxsRF9fHzZv3ow33ngDW7duxZEjR7B582bccMMNqKysxPvvv48HHngAy5cvx4IFC3K1fhol0ZtptIRQ7LKjwGmDLxBCV78fxe7oXy9ZltEWyXyo0/Vuhx2FTjuGAkH0DgaUYCTjdaUZYW62xgpx5on2YXCA6qA2nU2wy2ZX4X+/fhg7DnUp/RHxh9uJxtR3jp1V3j/VSPVkplYW4e0jZ2JmlWgGH6o/N0kC6iPnvRhR7BJzPpj5ILIyQ8HHqVOn8I1vfAMnT55EWVkZFixYgK1bt+KLX/wi2tvb8eqrr+Kxxx7DwMAAGhoasGrVKjz44IO5WjuNomQ3U0mSUFXixvGzQ+jq98f0QJwZGMbAcDBy04r9Cbqs0BkOPkzo+8jXNlshut02dcOp0XVd3DgJRS47zgxEMybx22enVhajsaIIbd2D6PT64bRLWDJDe6R6MkrmRhU8aZ1Bo/7zrPEUpJygqqUwkvnwBUIIhuS0JSgimpgMBR9PPvmk5nMNDQ1oaWnJekE0PmjdTCeXhoMPMStCEGn9ZDetskInOrw+U4KP03EHouWaKEW0nRmALMvKUK540eulLyPjcthw2YxKvPbxKQDhjEay771sdhU2724DACyaOslwn0t8z8rQcFCZPhp/DScVOVHqdqDPP5JRsykQzXwA4UbXEnfu+3KIaOzh//lxPunsgwRgdnX6IU1G7f70jDLhU6gsceMKnXMZjGg7M4h9x2KPZrfbJFx57hSUFekvbfQOBfBJZ1/CIWVaN1OtptP2FAeRifX0mHCybb622Qr1kwohSeFdNmcGhjUzG0Z7PoBwwKEOPpJZrgo+9G6xVYs/30VcP5fDhtK4wECSJDRUFOGjk96Mg48Cpw2SFO7xGRweYfBBZFH8P18lEAxh1S/fhgxg5/qrUVqQXf+B2kcnvPjqv+5K+tymOy7BVedNMe29AGD1k7vQnqQU8GcLavG/v3ax7u/z989+gP/64CT+9S8W4doLapTHtW6mWsHHkdP9AJIPpjJz1kf0ULn8BB9uhx21ngKc6PXh2JlBzeAiWsowFnwIWttnm2ZWwSYBIdl4vwcQzdx09Q+j3z+iNOxOVo1WV5teVYyPTnoxrcp4sykQDmCKnHYMDAfDTafmx/hENA4w+FDxBYLKjotdn3bji3OrTfve4idLT4EDFzaGZzX86aQXp/v8CRMms9U7GFACD5Gu9weC2H20G28cPI1AMASnPf1GJ/9IENs+7gQAbD3QGRd8aJRdNAaNiePfL4r83tVMDT6Um2d+Gk6B8A38RK8P7d2DWDQ18fcHAO2RY+uNNGnOmlKK9SvnoNBlV2ZwxCsrdOIfvjIfp/v8mH9OmeG1ewqcmFTkxNnBANq7B9M27P7VF2ag0GXH/1pUb/i9hCK3Ixx8cNAYkWUx+FAZCUbnTOw4dNrU4KM/EtRc2DgJv/nLSwEAf/fMe3im9Tj6fOZ2/otAp6rEjX+/MzxrJRiSseinr6BnMID32nuweFr6xsTWY2eVgV07Dp+O6Wk4rbF7o0oZsR4toXh9Abzb3gMg+U/n5SL4MOFk23xvtQWAqRXF2PVpd8IZKcJIMITPz4aDQaNTQf/qC8mH86ndemmjoe8Zr7GiCGcHe3HszCC6B5LP+BAW1Jfj//3z8qzejyPWiSjrOR8TyYhqyJX6XA0z9PvCN1Z1Hb2kIPzrfhPmW6gd604c5mW3SViqzI7Q93tTv67T68ehU/3K51qZD/H5aVXmY9eR8AjwaZVFyXs+TC275OdEWzURUIjrHu9Ejw8jkQP1qkv1DQDLp0Zlx85AXnYLFSnbbZn5ILIqBh8qQVXw8WnXQEJzaDbEP7TqBjsRiCQbyJWNZGeoALGDq/QQB5W5IiUaEYwEQ7LyE3L8CPNkPR870pw5UmbSiPUB/wiGAsl3auSSaL7UKp+JoCSTU2DzYapq0Fh+go9w5mOImQ8iy2LwoRJ/MukOnTdpPURpRWQ71L/uN7vscib5zhIxpOq9471pb/TdA8P48EQvAOAbTVMBRK9H98AwQnJ40FRFUWzwkexk2/gTWeOJzEdPlmUXceMscNpQ7DI+gyJTWkfTC1rB4FjRqNpuqzXjw0wi+BjglFMiy2LwoRKMO1vkTRNLL/3+8I1VnfkocYdvumaMFVcTN8H4szfqJxVhRlUxgiFZaQDV8tbhLsgyMKemFLdcHG4u3PVpN/wjQeUmX1HkgiOucVXctAaGgxgaDqK9exBHuwZgt0loipxFEs+sskv0NNbkOzVyRWQ+TvX5MZSklKAVDI4V6u22SuYjh5kjMetjMMDgg8iqGHyojMQFH28d7koISDIlshul+ch8dCcPPoBow6c4ol2LKLksm1WFOTWlqCpxYSgQxDvHelKm5kvcDrgd4b9WXf1+peRyYUM5PBpbl8sj2ZNsg4/To9DvAYTX74n8WYpdLWpaweBYIdb1+dkhdHjDJ9bmo+wyaHLQTUTjB4MPlZFQuOxSXuREiduBnsEADkRKD9kSTaXJej7MbDgdHgnhRG+4VyXZT9rLIn0XO1JkdWRZVvpCrjh3Mmw2SZkzsf3QadVPx4mpeTFiHQg3naqDGC1mZz7yHXwA0dHjyUovqYLBsaC6tAAuhw0jIVnZop3T4EOcbMuGUyLLYvChIrbaFjjsuGxGuERg1q4X0fOhPmytOAfBx/Gzg5Dl8E+XyaZ8XjajAnabhM/ODGo2SH7aNYATvT647DZcGtmSqwQth7vS7igRKftTXh/eOhK+fsvPTR989PtHMBLXd2NEJoO8zBI9HTZ2x4ssy0rwkelU0Fyz2SQ0xM0fyeWEWLHbhVttiayLwYeKKLHYbZJys0yVITBCyXyoyy6R4MPMOR/HVDe6ZH0PpQVOXNxYDkA7sNr+STjrsXjaJOUgMFGu+eDzXnzS2QdA+wYlBny9cfA0egYDKHU7sLC+XHPNHtU18WZxLUZjwJjQGHdGitA9EJ4cmuxAvbFEfWicy26DpzB3I4CUhlNmPogsi8GHiii7OOzRMsO+Y92m/IQmgg/1nI9SZc5H9vMthFRnqAjLZoksRvK+j2RbY6s9BTi3ugSyDLx8oAOAdlOi2PHyXx+cBABcNrMyoTFVzWGPniPSM5j5+S75aJbUMrUiefAhPs/0FNh8UWdlqkpcOW3YjW61ZfBBZFUMPlRE2cVhkzC9qhjnlBciEJSx+2h3mq9MbyBF5sMXCGVVblBTmhtTBB9XRLI6bx0+k9BQGwiGlJ0w8dNIRTAiMjWaZZfI4+J1y3WcOeIxoe8jk8PbzBI93TZ58DFWd7oI6n6UXAdvypAxNpwSWRaDDxVxI3bYbJAkSbn5bv8k+9KLMucjSc8HYN7MAz07KxacU4bSAgd6hwL44PPYhtp323owMBxERbELc2s9Mc/Fz+nQmgURf/NfpuO01XITBo2NZsOpMmjs7GBMQKcnGBwLYjMfuQ4+IpkPbrUlsiwGHyoBVc8HEP1JP9221HSGR0Lwj4QzG6Xu6HZTl8OmbEvtM6n00haZppnqJ22H3YalM0VPS+zvTXy+dFZVwjTOJdMrlGmnQPrMBxA+SG2ajl0eZux46dI4byYfassK4bRLCARlZbsqMPa32QoxmY8c98ww80FEDD5UgpGeD6c9fNO9fGYlJAn4pLMfnaobilHqf2SL3bF1/1ITz3dR76xQNxAmI7IY8YPUxOdXJNkaW+RyxJzaqnWTV9+8xKm66WQbfAwOjygNjLm+eSZjt0lomJS440X04DSm+fMYbepm2FxnPoq51ZbI8iwTfJzu8+PHL36Ef3z5Y83XiJ4PkfmYVOzCgsgx5dlsuRWBRaHTntB4KcowZgwaO93nhy8Qgk0CzilPfXS7KCm9c+wsfvSHA3joxQP40R8O4P3jPQC0R6GrH68o1ii7qIIS0dyajlJ2STNiXZZl/HbXMXx0whvzuNj+63bYYkpb+SSyTeq+D/W5LmNZgdOOGk/40Lt8lV0YfBBZl2WCD68vgF+/dRRP7zqm+ZoRVc+HcFlkJPj+9rMZv3eyGR+CeMyMEetim21tWSFcjtR/tFMrizG9qhgjIRlPvf0ZNr31GZ56+zOEZODc6hLUaQQvV54XDibqJxXCqbGDpcZTALfDBpfdhqWzko9UjycaTnvSZD62HujEg89/iPu2vBvz+GlVv0c+R6urKWe8RP4cfIEgOr3hdY31ng8AmF1dAiD3zbGc80FEo/Mj4ihwRgKKVOPSleDDHr15TYqM/h4aznw3irLNtiDxcpuZ+Wgz2F/wz7ddhK0HOhCSo9fEJkm4YX6t5tdcUFeGX31jMao92j8dF7sd+PXtl8Buk5TR6enoLbu0fHIKAHDoVD8+7xlSMjyjuc1WaIzbbitKLqUFDiWzM5Y99OUL8PaRM7jqPH3Zqkwx80FElgk+7JGAIpAi+BA9H3ZVo6VosBzOYitsskPlBDN7Po4ZHOM975wyzIuUlYz44tzqtK9ZmmKcejLlheEgJdXJtuGx79Hy145Dp/HVSxoBjO6AMaExruwimk21Br6NNTMml2DG5JKcv0808xFEKCQnNDYT0cRnmbKLM/IPXKp5GgHVnA9BlC+GRzL/Ka0/so02WfAhHjOj87/tTPqdLmOVyHx4U2Q+jp0ZxPGzQ8rn6kAk3cj3fIie7xL+czAaDFqFyHwAgC+L/6+IaPyyTPAhGj1DMhDSyH4ocz5UvQzR4COLzIcvccCYIB4zY8S6crOrGNs7K5LRU3YRh91NipQw3jrcpfxZjuaMD0FkPry+EfQOBpRgsHEc/nnkUqFq0qtZ822IaHyxTPChLqWMaAQf0YbT6GvFHA4zyi6lSTMf0UPVstU+jn/SFj0RPUPa49VFpuP2y6ejxO3A2cEADkR2vShll1Hs+Sh02TEl8v7HugfG/Gm2o8VmkzhincjiLBN8OO3q4CN5ICFKMkl7PnKU+VB6PrLMfPT7R5Tx4o3j8GaXLvMxohr7ftWcydFThyMD4EbzRFu16Om2gzGH/FGs6OFy3PFCZEWWCT70ZD5E2cVpctlFbKNN1fORbeZDNDmWFznhKRj7Oyviia22vkAIviRjt9873oM+/wjKi5y4oK4sYfR9tOwyeg2nQDTw+6xrAMe7w/0pDD4ScbstkbVZJvhwqmZ3iGFi8UbixqsD0eDDb0LmI5dzPpQU/zi90ZW6HRCXPVnT6ZuRIGPprCrYbdFzd1qPncXQcDB6qNwoll2AaL/Nns+6MRwMwWGTNGemWBm32xJZm2WCD5tNUm5u6couDtO32uqZ85Hd2S7iTJexPsZbi80mpTzZdsfh2LHv4tTh4WAILZ+cVq7xqJddKsOBxp7IScj1kwpjglkKU8oubDglsiTLBB9AdHJpusyHesiYKbtdUpRdzJrzEZ0pMX5/ytbq+/D6Atjf3gMgOt5dferw8+9+DiAcKHqSBHj5JHa2iEzZeA0Gc02UXYYCLLsQWZG1gg+7mPWRZqutqkTjNKHhVGyjTT3nI7ufANvG8TZboVyMWI8bNLbzyBkEQzJmVBXHHIAmApHXPg5PPa0qcY36MK/4nS3jtQyWa8x8EFmbtYIPMWhMo+wSCCb2fJix1VYMEEs95yPbsos4PXX83uy0yi47Ilts4w+7WzqzCpIU/bMZ7X4PAKgsdsUM0WKzaXKi14lbbYmsyVDwsXHjRixYsAAejwcejwdNTU344x//qDzv8/mwdu1aVFZWoqSkBKtWrUJnZ6fpi86UGB6mvdsl0vORo7JLqTtxF0qpareLLGuPfk9lJBjC52fH/84KrbKLGC62LG5k+6RiF+arxsNPHuV+DyBcDlL/GYznYDCXCrnVlsjSDAUf9fX1eOSRR9Da2op9+/bh6quvxk033YQDBw4AAB544AG8+OKLeOaZZ9DS0oITJ07glltuycnCMyEyHwGNLEayIWMi+ND6Gj30TDgNycBQki2mepzo8WEkJMPlsCnHoo9H0UFj0eCjvXsQn50ZhN0moWlm4gm56oBktJtNBXXphQPGkivmbhciSzPUnXfjjTfGfP7www9j48aN2LVrF+rr6/Hkk09i8+bNuPrqqwEAmzZtwvnnn49du3bhsssuM2/VGRJBhdbJtiNK2UU158Mugg85o0OwQiEZ/cPaPR+FTjtsUjj46PeNKI14RoiSS8OkwnF9SFey813EVNOLGspRmmR+yRWzJ+OXbxwBAFSVju6MD2Gqqsl0PGeicqmQcz6ILC3jrQHBYBDPPPMMBgYG0NTUhNbWVgQCAaxYsUJ5zZw5c9DY2IidO3dqBh9+vx9+v1/53Ov1ZrqktByqQCIZkflwJsl8AOHeggKbPeHrUhkMBCGqKcm22kqShGK3A32+EfT5RzBFx/d89H8O4u3ItE8A6B6ITDYd5zc6EXy89P4JfPh5L4BoYBXf7yFcPLUchU47hgLBMZP5EAf7VZW4MwomrUBkPl7+sAMfn+wb5dUQWU9deSEev+2iUXt/w/8yfvDBB2hqaoLP50NJSQmee+45zJ07F/v374fL5UJ5eXnM66urq9HR0aH5/Zqbm/HQQw8ZXngmortdkpdQRM+HPUnPBxAJPpzGgg9RcrHbJKV5NV5pJPjQM2K9dyiAx187nPS5+fXlhtY21syMHOfe1T+sDA0TVpxfnfRr3A47rr2gGi/sP4EL6sqSvibfFkT6UBbUj431jEUiO5Tsz5qIcm/G4Oj+f2c4+DjvvPOwf/9+9Pb24ve//z3WrFmDlpaWjBewfv16rFu3Tvnc6/WioaEh4++Xit6yS7IhY0BmTafiULkSt0NzG2hJgQPo1TfroyfyF8btsOEXt16oPO522nF5kp6I8eTqOVPwn3/VhO4Bf8zjtWWFmHeO9o38H74yH3+1fCbm1nlyvURdFjaU4w/3Lh3X255z7dq51fiPuy/D2VH+B5DIqkY7K2v43V0uF2bNmgUAWLRoEfbu3Ytf/OIX+OpXv4rh4WH09PTEZD86OztRU1Oj+f3cbjfc7vyky8X8jkDaU22jAYckSXDZbRgOhjIKPlLN+BCMnO8iZmBUFLtw/bxaw+sZyyRJwqXTKwx/XbHbMWYCD2HBOM9C5ZrNJmHJjPEdLBNR5rKe8xEKheD3+7Fo0SI4nU5s27ZNee7gwYNoa2tDU1NTtm9jCnGybVBjzkcwyYRTILvttmKIUrJ+D6Ek0kipt+wCRPsjiIiIxhtDmY/169dj5cqVaGxsRF9fHzZv3ow33ngDW7duRVlZGe68806sW7cOFRUV8Hg8+Na3voWmpqYxsdMFiA4P02o4Fdtp48/icDlsgD+zQWPqsouWUgOZDxF8eBh8EBHROGUo+Dh16hS+8Y1v4OTJkygrK8OCBQuwdetWfPGLXwQA/PznP4fNZsOqVavg9/tx3XXX4Ze//GVOFp4JZchYmvHq6hNwAdXhctmUXVJlPoyUXSLBRzmDDyIiGqcMBR9PPvlkyucLCgqwYcMGbNiwIatF5Uq68eqi5yNp5gPRw8KMSHWonBAdsZ4++PCy7EJEROOctc52SZP5GEkyXh3IrudD9HGk6vkoVjIf6c93Yc8HERGNd5YKPpzpMh/BxN0ugKrsklHPRzj4KE6xrUnp+dCR+RBbbcUociIiovHGUsGHXQk+Um+11Sq7ZNTzkeJEW0E8Z6ThlJkPIiIarywVfDjTll0Sh4wB5pRdzJrzoQQfRWPjHBMiIiKjLBV8KOPVNTIfQa2eD6XsYvwEzgF/+p4PI5kPMWSMmQ8iIhqvLBV8KGUXjd4NzZ4PM8oubu1gwUjPB3e7EBHReGep4EPM7zDc85HFnI9+PXM+Muj54JwPIiIarywVfNiVU23TDBnT2mqr8XWp6Jrz4dY35yMQDGFgOFz6YeaDiIjGK0sFH+m22qYcr45MT7XV0fMRCT78I6kPrxNZD4Dj1YmIaPyyVPAhhoxpne0STHKqLWDObpfiFJkP9XMDKUovIvgoLXAkBEhERETjhbWCD1vqU21HtE61zXC3i38kqAwmS1V2cdptKHCG3yNV3wd3uhAR0URgreDDnvpUW7ELJn7OhzvDzId690qq4CP8fDigSBV8iJ0unG5KRETjmbWCj0g5JZinCacikChy2dOWSUp17HjhdFMiIpoILBZ8pG44je52MedsFz07XYQSHbM+xLkuDD6IiGg8s1bwkabhVGzB1cp8+DMsu6Sa8SEo221TZj7Cz5UVcrQ6ERGNX9YKPpSGU62yi8Z49SzLLqV6Mh8F6TMfLLsQEdFEYK3gQ2k4TQwiQiEZIiYxa6ttv44TbQVlxLo/oPmaniGWXYiIaPyzWPChfaqteuS65nh1gz0fYmJpsSt98FGso+eDu12IiGgisFbwYdM+1VZditEcr57DzId4TeqeD5ZdiIho/LNo8JEYRKgfi898ZDvnQ1fPR+Q1qSaccsgYERFNBJYKPpypyi6qx+J7PpzZbrXV0/PBOR9ERGQRlgo+7CkzH+HgQ5LMHzImppemoudk2x4GH0RENAFYKvgQvRzJMh/RQ+USJ5EqDad5mPOhlfnwBYLK+7PhlIiIxjNLBR+inJKs4VRsv002Bl3JfGRYdjFjzocoudhtkq6JqURERGOVpYIPu1277KKMVrclXpJMyy59Bsarl6Y5WE40m3oKHJCk1OfEEBERjWWWCj5EYJFqzofdnnhjd2ea+fCFA4ZiHcFHsdse+ZrUmY/yIo5WJyKi8c1SwYc9xZwPZbR60p6PcGCQ8Xh1A3M++odHIMuJ6xPBh4fNpkRENM5ZKviINpwm2e0SFA2n5pVdlIZTA2UXWQYGh4MJz/NEWyIimigsFXykOtVW9HykajgdCckIaRxKFy8UkjEQCSL07HYpcNqU907W96GUXRh8EBHROGet4CPFqbZaJ9oC0eAD0N/3MTAcDSD0ZD4kSUo568PLGR9ERDRBWCv4SLHbJVp20Z7zAQB+naUXkb1w2iWlYTWdVLM+OGCMiIgmCkPBR3NzMy655BKUlpZiypQpuPnmm3Hw4MGY11x55ZWQJCnm45577jF10ZlKNecjOmQs8ZKoD5rT2/eh7vfQuzW2NMWsj16eaEtERBOEoeCjpaUFa9euxa5du/DKK68gEAjg2muvxcDAQMzr7rrrLpw8eVL5+NnPfmbqojOlHCyXpOcjkKLnQ5Ikw4PG+gyc6yJEMx+BhOe424WIiCYKQ6MyX3755ZjPn3rqKUyZMgWtra1Yvny58nhRURFqamrMWaGJRNklkCSACEZKMc4kPR8A4LbbMDwSyiDzoT9YKE7R8yGGjLHhlIiIxrusej56e3sBABUVFTGPP/3006iqqsK8efOwfv16DA4Oan4Pv98Pr9cb85EroqSStOE0qJ35AIxvt40eKmfXvb6SFCfbsuGUiIgmiowPCQmFQrj//vuxdOlSzJs3T3n8a1/7GqZOnYq6ujq8//77+M53voODBw/i2WefTfp9mpub8dBDD2W6DEOiDacyZFmO6cUYSdHzAWQQfBiY8SGIM2AGUmy1LWPPBxERjXMZBx9r167Fhx9+iB07dsQ8fvfddyu/nj9/Pmpra3HNNdfgyJEjmDlzZsL3Wb9+PdatW6d87vV60dDQkOmyUlKf2zISkmNKLErwoVF2ifZ8JA4ASyba86E/WFC22sYFH7IsK7tdygs5Xp2IiMa3jIKPe++9Fy+99BLefPNN1NfXp3ztkiVLAACHDx9OGny43W643e5MlmGY+tyWYEiGU1URET0fWmUXZ2S7rd6ttgMGDpUTtE62HRgOKqUill2IiGi8M9TzIcsy7r33Xjz33HN47bXXMH369LRfs3//fgBAbW1tRgs0k3qGR3zTaSDFnA8gOuvDaM+HnnNdBK05H6Lk4rLbUOC01GgWIiKagAxlPtauXYvNmzfjhRdeQGlpKTo6OgAAZWVlKCwsxJEjR7B582bccMMNqKysxPvvv48HHngAy5cvx4IFC3LyGzDCqRoWFt90qsz5sJvT89GXSc+HRuZDOdelyKl7ZggREdFYZSj42LhxI4DwIDG1TZs24fbbb4fL5cKrr76Kxx57DAMDA2hoaMCqVavw4IMPmrbgbKiTGvHnu0QbTlP3fCQ7FyaZ/kzKLpFtufE9H73c6UJERBOIoeAj2VHvag0NDWhpaclqQbkkSRKcdgmBoJwwYl2cdKvV8+E22HDa7wsHDEaGjBVHtuXGZz64zZaIiCYSyzUQ2DWmnIqyi1Or7JJhz0dGZRd/fNmFA8aIiGjisFzw4dQ432UkxXh1IPOej2JDwUc4uDg7OIyQan0suxAR0URiueBDGTQWTF52SdfzoXerrS8QLs8Uu/RPOJ1WWYwilx19vhF83NGnPM4BY0RENJFYLviwp8l8aA4Zsxs7WG4oEnwUOPUHHy6HDZfNqAQAbD90Wnm8h5kPIiKaQCwXfDjtqXs+zBqvPjQcDj4KDWQ+AGDZrCoAwI7DXcpjLLsQEdFEYrngI3q+S/IhY2b1fPgC4dcVGsh8AMDyc8PBx+6j3UrpRux2KWfZhYiIJgDrBR8aZRcxXj3t2S46go+RYEgpzxgNPmZOLkGNpwDDIyHs/awbQHS3CzMfREQ0EVgw+AgHF/Hj1dMNGXMb6PnwqQIUo2UXSZKwbHY4+7H9ULj0wrILERFNJJYLPkRZJX68+ohSdsm+50P0ewDR4WRGXKEZfPBEWyIiGv8sF3yIIWLxDaci8+E0oedD9GoUOu0ZncWyNNJ0+qeTXpzy+uD1MfNBREQTh+WCD9HTEV92ET0f9jRbbf06yi7RbbaZXd6qEjcuqPMAAF4+0AEx1Z7BBxERTQTWCz7SlF20ej6cGWY+MiX6Pl567yQAoMhlV7IvRERE45nl7mZit0tAa8iYVs+HgbNdRM9HgcFmU7UrZk0GAOyJ7Hhh1oOIiCYK6wUfGuPVg+kmnBppODUh87F42qSYZlUGH0RENFFYL/gQp9rGZT5ED4jWkDERCMT3iiRjRtmlwGnHpdMrlM8ZfBAR0URhveBDY7dLUNntkmarrYGGU6MzPuItnz1Z+TWDDyIimiisF3woDafJh4xpjle3hwMJfT0f4dcYOVQuGdF0CnC0OhERTRzWCz7sonwS33Bq3nh1M3o+AGBOTSmqStwAmPkgIqKJw3LBh9OW/GC56Fbb1GUXf5622gLhUetfODdceqn2FGT1vYiIiMYKx2gvIN/sGg2nwbRlFwM9H8Pm9HwAwHeuPw/zzvFg1aL6rL8XERHRWGC54EOr4TSQ5mC5TMou2fZ8AMAUTwHuWDo96+9DREQ0Vliv7KI55yN1z4d7FHo+iIiIJiLLBR9aZRe9PR96yi6+4ezOdiEiIprILHd3VE611Rivnq7nIxiSE86FiecbMa/ng4iIaKKxXPChTDjVGjKWZqstkL70opztwrILERFRAusGHwlDxlKPVzcUfLDng4iISJP1gg+tIWNpej4cNglSJC7xB4Mp32MoEA5OGHwQERElslzwYU8zXl1rt4skSdFZH2kyHz4T53wQERFNNJYLPqJbbZP3fGjN+QCgO/gwc84HERHRRGO54EOUVQJxO1YCwdQ9H4D+7bbs+SAiItJmveDDnrzsEt3ton1J9E45ZdmFiIhIm6Hgo7m5GZdccglKS0sxZcoU3HzzzTh48GDMa3w+H9auXYvKykqUlJRg1apV6OzsNHXR2VAyHwmn2qae8wFEg48AMx9EREQZMxR8tLS0YO3atdi1axdeeeUVBAIBXHvttRgYGFBe88ADD+DFF1/EM888g5aWFpw4cQK33HKL6QvPVDTzEb/bJTJeXUfPR6qTbQPBkBLIMPggIiJKZOhguZdffjnm86eeegpTpkxBa2srli9fjt7eXjz55JPYvHkzrr76agDApk2bcP7552PXrl247LLLzFt5hkRwoc5ehEIyRCziyLLsIrIeAFDgslxVi4iIKK2s7o69vb0AgIqKCgBAa2srAoEAVqxYobxmzpw5aGxsxM6dO5N+D7/fD6/XG/ORS8lOtVWPWtfVcJoi+BD9HpIUzZQQERFRVMZ3x1AohPvvvx9Lly7FvHnzAAAdHR1wuVwoLy+PeW11dTU6OjqSfp/m5maUlZUpHw0NDZkuSReHLbHsov61rq22KXo+fKoBY5Kk/b2IiIisKuPgY+3atfjwww+xZcuWrBawfv169Pb2Kh/t7e1Zfb90lLKLareLetS61pAxwFjZhf0eREREyRnq+RDuvfdevPTSS3jzzTdRX1+vPF5TU4Ph4WH09PTEZD86OztRU1OT9Hu53W643e5MlpERZ7KyS1Cd+dCOx9wGgg8OGCMiIkrOUOZDlmXce++9eO655/Daa69h+vTpMc8vWrQITqcT27ZtUx47ePAg2tra0NTUZM6Ks2RXDpZL3vORouqia8jYEGd8EBERpWQo87F27Vps3rwZL7zwAkpLS5U+jrKyMhQWFqKsrAx33nkn1q1bh4qKCng8HnzrW99CU1PTmNjpAkTLKiOqACI6YExK2aehZ7y6j2UXIiKilAwFHxs3bgQAXHnllTGPb9q0CbfffjsA4Oc//zlsNhtWrVoFv9+P6667Dr/85S9NWawZlLKLKtuhZ7Q6EM18pJrzwZ4PIiKi1AwFH7Isp31NQUEBNmzYgA0bNmS8qFyKll0SMx+p+j0AnQ2nkbJLAcsuRERESVluEIXTpj3nI9VOFwBw2cMBRcqeDyXzYblLS0REpIvl7pDJG07Tj1YHdA4ZY9mFiIgoJcsFH84kDaciC5K25yPytXrKLtztQkRElJzlgo9k49VN7fngnA8iIqKUrBd8pCq7pOv50DPng8EHERFRStYLPuyJu130l13Y80FERJQt6wUfkdJKICgrW4eVIWNpyy7pd7uoD5YjIiKiRBYMPqLZDVF5CYR0Zj4454OIiChr1gs+VH0dYrJp0GjPByecEhERZcx6wYeqtCLKLaLnI+2cD7v+hlMGH0RERMlZL/hQZTdE0DGic6ut28iQMZflLi0REZEulrtDqrMbgUi5ZSQXPR/MfBARESVlueBDkiQlyBBlF8M9Hyy7EBERZcxywQcQzX6IhtOA0Z4PXWUXBh9ERETJWDL4cMaNWA8qZRd949X9es52YeaDiIgoKUsGH/En24r/OnVvtQ0mfV6WZY5XJyIiSsOSwYczbsS6OOFW93h1jZ6P4WBIGVzG4IOIiCg5SwYfSuYjruwiyjFa0m219Q1HH2fZhYiIKDlLBh9inkd82UXvVtuQHM2WqPki5Ri7TUpbwiEiIrIqSwYfStklGFt2SbfbRZ0ZSVZ6UTebShKDDyIiomQsGXxoNZzqnfMBJC+9sNmUiIgoPUsGH1pbbdONV3fYJIiERqrgg6PViYiItFnyLikyHGK8uhgylq7nQ5KklDtefJzxQURElJYlgw8xTCwYNDZeHUh9vgtHqxMREaVnyeDDaYub8xHSN14dUG23TdZwyp4PIiKitCwZfDjscQ2nQX3j1YHU57sou114rgsREZEmawYfttiGU2W8uo7MR6qyi49lFyIiorSsGXzYY0+1FT0fdpN6Plh2ISIi0mbN4COS4QjGlV309HwoJ9smHTIWfozBBxERkTaLBh/h33YgfshYtj0fLLsQERGlZc3gI268elDnhFNAZ88Hh4wRERFpMnyXfPPNN3HjjTeirq4OkiTh+eefj3n+9ttvhyRJMR/XX3+9Wes1RXzZRfR+pBsyBgAuRzirwYZTIiKizBgOPgYGBrBw4UJs2LBB8zXXX389Tp48qXz87ne/y2qRZnNESieBuPHqTiNlF875ICIiyojD6BesXLkSK1euTPkat9uNmpqajBeVawmn2ob0jVcHVEPGOOeDiIgoIzlpTnjjjTcwZcoUnHfeefjmN7+JM2fOaL7W7/fD6/XGfORa4qm2HK9ORESUL6YHH9dffz1+85vfYNu2bfjHf/xHtLS0YOXKlQgGg0lf39zcjLKyMuWjoaHB7CUlUIaMifHqQf27XUTWJOnBcgw+iIiI0jJcdknn1ltvVX49f/58LFiwADNnzsQbb7yBa665JuH169evx7p165TPvV5vzgMQR1zmI2ig7KLM+Ug1ZIxlFyIiIk053xM6Y8YMVFVV4fDhw0mfd7vd8Hg8MR+5JhpORcYjYOBgOZdde7eL0vPBzAcREZGmnAcfx48fx5kzZ1BbW5vrt9ItvuE0aFLPhy8QfozBBxERkTbDZZf+/v6YLMbRo0exf/9+VFRUoKKiAg899BBWrVqFmpoaHDlyBN/+9rcxa9YsXHfddaYuPBsJDacGej5E8BHgVlsiIqKMGA4+9u3bh6uuukr5XPRrrFmzBhs3bsT777+Pf/u3f0NPTw/q6upw7bXX4ic/+Qncbrd5q86SM67sYvpWWwYfREREmgwHH1deeSVkWdZ8fuvWrVktKB9Eb0cgFDte3amn7KIxZEyWZVXDKcerExERabHkXdIef6ptyMh49eSZD/XuF2Y+iIiItFky+Egou2TQ8xG/1VbM+ADY80FERJSKJYOPaMNp7Hh1XbtdNMououTitEtKcENERESJLHmXjG61jR0ypmvOh1J2iZ3YKppNmfUgIiJKzZLBhyiviOFiYttsNj0fPNeFiIhIH2sGH3bRcBq/20V/z0d82UU514Wj1YmIiFKyZvAhMh+ZzPmwa2Q+hjndlIiISA9rBh9x49XFf431fCQvu7Dng4iIKDVrBh+qOR+hkIxI4kM5cC4V9nwQERFlx5rBhz1adgmqprUaajiN7/lQdrtY8pISERHpZsk7pTrzIbbbqh9PRTSlxg8ZG2LDKRERkS6WDj4CoZAyaAwwOGSMPR9EREQZsWbwoRqvLrbZAvrGq7tVZRf1AXs80ZaIiEgfawYfqrJLQFV20VF1UXo+ZBkxgYtvhMEHERGRHtYMPiLllUAwpBowJkGS9DecArFNp6LhlD0fREREqVky+FBOtQ3JSs+Hnp0uQLTnA4jt+2DPBxERkT6WDD6UU22DIWW3i55+DyDcLyLilNjggxNOiYiI9LBk8OG0qTMfkeBDx04XQZRe1Ntth1h2ISIi0sWSwYddGa8e3e2iZ8aHUOxyAADODAwrj/k44ZSIiEgXSwYfTlF2CYUQCBrr+QCAixrLAQA7j5xRHmPPBxERkT6WDD7EnI+QDCX40NvzAQBXzJ4MANh+6LTyGMsuRERE+lgy+FBnOXyRRlEjPR/LZlcBAPZ9dlYJOkTZpcBhyUtKRESkmyXvlE5VoCGGgxkpu8yoKkZdWQGGgyHs+awbAM92ISIi0suSwYe6xOKPBA1OA2UXSZKipZdPwqWXITacEhER6WLR4COx7GIk8wFESy87DncBiPZ8sOGUiIgoNUsGHzabpAwKE70aRno+AGDprCpIEvBxRx86vT5l5gfLLkRERKlZMvgAoqUXJfgwmPmoKHZhXl0ZAODVP3Uqj7PsQkRElJp1g49IpsM3YnyrrSBKL698FA0+WHYhIiJKzbrBRyTTITIfRns+AOCKSPDx9uHwsDGXw5bR9yEiIrIS6wYf9tjzWYz2fADAoqmTUOi0YzjIQ+WIiIj0sm7wEZf5MNrzAQBuhx1LZlQonzP4ICIiSs9w8PHmm2/ixhtvRF1dHSRJwvPPPx/zvCzL+MEPfoDa2loUFhZixYoVOHTokFnrNY3THttwas+g5wMAls2qUn7NnS5ERETpGb7jDgwMYOHChdiwYUPS53/2s5/h8ccfxxNPPIHdu3ejuLgY1113HXw+X9aLNZPozfBH5nw4Myi7ANFzXgA2mxIREenhMPoFK1euxMqVK5M+J8syHnvsMTz44IO46aabAAC/+c1vUF1djeeffx633nprdqs1UXS3S+YNpwBwbnUJppS6carPjwKnZatYREREupl6tzx69Cg6OjqwYsUK5bGysjIsWbIEO3fuTPo1fr8fXq835iMfxDh1MZk0k54PIDxqXWy5Zc8HERFReqYGHx0dHQCA6urqmMerq6uV5+I1NzejrKxM+WhoaDBzSZqUsouy2yXzS3HdBTUAgPpJhdkvjIiIaIIb9TrB+vXr0dvbq3y0t7fn5X1Fj0c2u12Ea+dW4/9+83L86MsXmLI2IiKiicxwz0cqNTXhDEBnZydqa2uVxzs7O3HhhRcm/Rq32w23223mMnSxK1ttMztYTk2SJCyaOsmUdREREU10pmY+pk+fjpqaGmzbtk15zOv1Yvfu3WhqajLzrbImyiyi4dSZRdmFiIiI9DOc+ejv78fhw4eVz48ePYr9+/ejoqICjY2NuP/++/HTn/4Us2fPxvTp0/H9738fdXV1uPnmm81cd9ZE2cVvQuaDiIiI9DMcfOzbtw9XXXWV8vm6desAAGvWrMFTTz2Fb3/72xgYGMDdd9+Nnp4eLFu2DC+//DIKCgrMW7UJ7FmeaktERESZMRx8XHnllZBlWfN5SZLw4x//GD/+8Y+zWliuOePHq2c4ZIyIiIiMsWyjQ3TImCi7WPZSEBER5ZVl77iOSLARDMmRz5n5ICIiygfrBh9xZRaWXYiIiPLDssFH/O4WZj6IiIjyw7LBhzOux4M9H0RERPlh2TtufJnFybILERFRXlg3+Igrs3DIGBERUX5YN/iIG6fOng8iIqL8sHDwEd9watlLQURElFeWvePGZzq41ZaIiCg/LBx8xO92YfBBRESUDxYOPlh2ISIiGg2WveOy4ZSIiGh0WDb4iJ/rYWfPBxERUV5YNviI7/GIn3hKREREuWHZO2582YUNp0RERPlh2eDDGZ/5YNmFiIgoLywbfMRnOpj5ICIiyg/LBh/OhN0ulr0UREREeWXZO258poMTTomIiPLDssFHfI8H53wQERHlh2WDD45XJyIiGh2WDT7ih4rF94AQERFRblj2jhs/VIyZDyIiovywbPAR32DKng8iIqL8sG7wkbDbxbKXgoiIKK8se8fleHUiIqLRYd3gIz7zweCDiIgoL6wbfMT3fHDIGBERUV5YN/iwcbw6ERHRaLDsHTe+zMKqCxERUX6YHnz86Ec/giRJMR9z5swx+22ypi6zOO3hdRIREVHuOXLxTS+44AK8+uqr0Tdx5ORtsqKeaMqdLkRERPmTk6jA4XCgpqYmF9/aNOqAg/0eRERE+ZOTu+6hQ4dQV1eHGTNmYPXq1Whra9N8rd/vh9frjfnIB/V4de50ISIiyh/Tg48lS5bgqaeewssvv4yNGzfi6NGjuOKKK9DX15f09c3NzSgrK1M+GhoazF5SUuqAgzM+iIiI8keSZVnO5Rv09PRg6tSpePTRR3HnnXcmPO/3++H3+5XPvV4vGhoa0NvbC4/Hk7N1+QJBzPn+ywCAao8bu/9+Rc7ei4iIaKLzer0oKyvTdf/OeSdoeXk5zj33XBw+fDjp8263G263O9fLSKBuOGXPBxERUf7k/K7b39+PI0eOoLa2NtdvZYi60sKeDyIiovwxPfj427/9W7S0tOCzzz7D22+/ja985Suw2+247bbbzH6rrEiSBGck6OBWWyIiovwxvexy/Phx3HbbbThz5gwmT56MZcuWYdeuXZg8ebLZb5U1h82GQDAYs/OFiIiIcsv04GPLli1mf8ucEbtcmPkgIiLKH0v/yC96PdjzQURElD8WDz7Cv33O+SAiIsofawcfkaCDW22JiIjyx9J3XQd3uxAREeWdtYOPSMaDPR9ERET5Y/HgQ4r5LxEREeWetYOPSMOpnT0fREREeWPpu67IeDhZdiEiIsobawcfbDglIiLKO0sHH2KsOns+iIiI8sfSwYfIeIjeDyIiIso9S991lfHqzHwQERHljbWDDx4sR0RElHfWDj4i5RYnyy5ERER5Y+m7rpO7XYiIiPLO0sGHnbtdiIiI8s7SwYdT2e3C4IOIiChfLB18RIeMWfoyEBER5ZWl77rlRS4AQFmhc5RXQkREZB2O0V7AaLrrihmon1SImy86Z7SXQkREZBmWDj4ml7rxjaZpo70MIiIiS7F02YWIiIjyj8EHERER5RWDDyIiIsorBh9ERESUVww+iIiIKK8YfBAREVFeMfggIiKivGLwQURERHnF4IOIiIjyisEHERER5VXOgo8NGzZg2rRpKCgowJIlS7Bnz55cvRURERGNIzkJPv7jP/4D69atww9/+EO88847WLhwIa677jqcOnUqF29HRERE40hOgo9HH30Ud911F+644w7MnTsXTzzxBIqKivDrX/86F29HRERE44jpp9oODw+jtbUV69evVx6z2WxYsWIFdu7cmfB6v98Pv9+vfN7b2wsA8Hq9Zi+NiIiIckTct2VZTvta04OPrq4uBINBVFdXxzxeXV2Njz/+OOH1zc3NeOihhxIeb2hoMHtpRERElGN9fX0oKytL+RrTgw+j1q9fj3Xr1imfh0IhdHd3o7KyEpIkmfpeXq8XDQ0NaG9vh8fjMfV7Uyxe6/zhtc4fXuv84bXOH7OutSzL6OvrQ11dXdrXmh58VFVVwW63o7OzM+bxzs5O1NTUJLze7XbD7XbHPFZeXm72smJ4PB7+Zc4TXuv84bXOH17r/OG1zh8zrnW6jIdgesOpy+XCokWLsG3bNuWxUCiEbdu2oampyey3IyIionEmJ2WXdevWYc2aNVi8eDEuvfRSPPbYYxgYGMAdd9yRi7cjIiKicSQnwcdXv/pVnD59Gj/4wQ/Q0dGBCy+8EC+//HJCE2q+ud1u/PCHP0wo85D5eK3zh9c6f3it84fXOn9G41pLsp49MUREREQm4dkuRERElFcMPoiIiCivGHwQERFRXjH4ICIioryyTPCxYcMGTJs2DQUFBViyZAn27Nkz2ksa95qbm3HJJZegtLQUU6ZMwc0334yDBw/GvMbn82Ht2rWorKxESUkJVq1alTCAjox75JFHIEkS7r//fuUxXmvzfP755/j617+OyspKFBYWYv78+di3b5/yvCzL+MEPfoDa2loUFhZixYoVOHTo0CiueHwKBoP4/ve/j+nTp6OwsBAzZ87ET37yk5izQXitM/fmm2/ixhtvRF1dHSRJwvPPPx/zvJ5r293djdWrV8Pj8aC8vBx33nkn+vv7s1+cbAFbtmyRXS6X/Otf/1o+cOCAfNddd8nl5eVyZ2fnaC9tXLvuuuvkTZs2yR9++KG8f/9++YYbbpAbGxvl/v5+5TX33HOP3NDQIG/btk3et2+ffNlll8mXX375KK56/NuzZ488bdo0ecGCBfJ9992nPM5rbY7u7m556tSp8u233y7v3r1b/vTTT+WtW7fKhw8fVl7zyCOPyGVlZfLzzz8vv/fee/KXv/xlefr06fLQ0NAornz8efjhh+XKykr5pZdeko8ePSo/88wzcklJifyLX/xCeQ2vdeb++7//W/7e974nP/vsszIA+bnnnot5Xs+1vf766+WFCxfKu3btkrdv3y7PmjVLvu2227JemyWCj0svvVReu3at8nkwGJTr6urk5ubmUVzVxHPq1CkZgNzS0iLLsiz39PTITqdTfuaZZ5TX/OlPf5IByDt37hytZY5rfX198uzZs+VXXnlF/sIXvqAEH7zW5vnOd74jL1u2TPP5UCgk19TUyP/0T/+kPNbT0yO73W75d7/7XT6WOGF86Utfkv/yL/8y5rFbbrlFXr16tSzLvNZmig8+9Fzbjz76SAYg7927V3nNH//4R1mSJPnzzz/Paj0TvuwyPDyM1tZWrFixQnnMZrNhxYoV2Llz5yiubOLp7e0FAFRUVAAAWltbEQgEYq79nDlz0NjYyGufobVr1+JLX/pSzDUFeK3N9Ic//AGLFy/Gn//5n2PKlCm46KKL8Ktf/Up5/ujRo+jo6Ii51mVlZViyZAmvtUGXX345tm3bhk8++QQA8N5772HHjh1YuXIlAF7rXNJzbXfu3Iny8nIsXrxYec2KFStgs9mwe/furN5/1E+1zbWuri4Eg8GE6arV1dX4+OOPR2lVE08oFML999+PpUuXYt68eQCAjo4OuFyuhIMCq6ur0dHRMQqrHN+2bNmCd955B3v37k14jtfaPJ9++ik2btyIdevW4e///u+xd+9e/M3f/A1cLhfWrFmjXM9k/6bwWhvz3e9+F16vF3PmzIHdbkcwGMTDDz+M1atXAwCvdQ7pubYdHR2YMmVKzPMOhwMVFRVZX/8JH3xQfqxduxYffvghduzYMdpLmZDa29tx33334ZVXXkFBQcFoL2dCC4VCWLx4Mf7hH/4BAHDRRRfhww8/xBNPPIE1a9aM8uomlv/8z//E008/jc2bN+OCCy7A/v37cf/996Ouro7XeoKb8GWXqqoq2O32hK7/zs5O1NTUjNKqJpZ7770XL730El5//XXU19crj9fU1GB4eBg9PT0xr+e1N661tRWnTp3CxRdfDIfDAYfDgZaWFjz++ONwOByorq7mtTZJbW0t5s6dG/PY+eefj7a2NgBQrif/Tcne3/3d3+G73/0ubr31VsyfPx9/8Rd/gQceeADNzc0AeK1zSc+1rampwalTp2KeHxkZQXd3d9bXf8IHHy6XC4sWLcK2bduUx0KhELZt24ampqZRXNn4J8sy7r33Xjz33HN47bXXMH369JjnFy1aBKfTGXPtDx48iLa2Nl57g6655hp88MEH2L9/v/KxePFirF69Wvk1r7U5li5dmrBl/JNPPsHUqVMBANOnT0dNTU3MtfZ6vdi9ezevtUGDg4Ow2WJvQ3a7HaFQCACvdS7pubZNTU3o6elBa2ur8prXXnsNoVAIS5YsyW4BWbWrjhNbtmyR3W63/NRTT8kfffSRfPfdd8vl5eVyR0fHaC9tXPvmN78pl5WVyW+88YZ88uRJ5WNwcFB5zT333CM3NjbKr732mrxv3z65qalJbmpqGsVVTxzq3S6yzGttlj179sgOh0N++OGH5UOHDslPP/20XFRUJP/2t79VXvPII4/I5eXl8gsvvCC///778k033cTtnxlYs2aNfM455yhbbZ999lm5qqpK/va3v628htc6c319ffK7774rv/vuuzIA+dFHH5Xfffdd+dixY7Is67u2119/vXzRRRfJu3fvlnfs2CHPnj2bW22N+Od//me5sbFRdrlc8qWXXirv2rVrtJc07gFI+rFp0yblNUNDQ/Jf//Vfy5MmTZKLiorkr3zlK/LJkydHb9ETSHzwwWttnhdffFGeN2+e7Ha75Tlz5sj/+q//GvN8KBSSv//978vV1dWy2+2Wr7nmGvngwYOjtNrxy+v1yvfdd5/c2NgoFxQUyDNmzJC/973vyX6/X3kNr3XmXn/99aT/Rq9Zs0aWZX3X9syZM/Jtt90ml5SUyB6PR77jjjvkvr6+rNcmybJqlBwRERFRjk34ng8iIiIaWxh8EBERUV4x+CAiIqK8YvBBREREecXgg4iIiPKKwQcRERHlFYMPIiIiyisGH0RERJRXDD6IiIgorxh8EBERUV4x+CAiIqK8YvBBREREefX/A+/8197v1vmnAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "plt.plot(result.scores)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\n", + "\n", + "You are an instruction optimizer for large language models.\n", + "\n", + "I will give some task instructions I've tried, along with their corresponding validation scores.\n", + "- The instructions are arranged in order based on their scores, where higher scores indicate better quality.\n", + "- Your task is to propose a new instruction that will lead a good language model to perform the task even better.\n", + "- Be creative, and think out of the box.\n", + "- Don't repeat instructions, descriptions and prefixes that have already been attempted.\n", + "\n", + "---\n", + "\n", + "Follow the following format.\n", + "\n", + "Analysis: Consider what made the previous instructions good or bad.\n", + "Proposed Signature: A signature that will likely lead to a high score.. Respond with a single JSON object. JSON Schema: {\"properties\": {\"instructions\": {\"description\": \"The instructions for the task\", \"title\": \"Instructions\", \"type\": \"string\"}, \"question_prefix\": {\"description\": \"The prefix for question\", \"title\": \"Question Prefix\", \"type\": \"string\"}, \"question_desc\": {\"description\": \"The description for question\", \"title\": \"Question Desc\", \"type\": \"string\"}, \"answer_prefix\": {\"description\": \"The prefix for answer\", \"title\": \"Answer Prefix\", \"type\": \"string\"}, \"answer_desc\": {\"description\": \"The description for answer\", \"title\": \"Answer Desc\", \"type\": \"string\"}}, \"required\": [\"instructions\", \"question_prefix\", \"question_desc\", \"answer_prefix\", \"answer_desc\"], \"title\": \"SignatureInfo[BasicQA]\", \"type\": \"object\"}\n", + "Score: The expected score for the new signature. Don't write anything after this number. (Respond with a single float value)\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"You are an expert in your field. Respond to the inquiries with short, factual answers.\",\"question_prefix\":\"Inquiry:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Response:\",\"answer_desc\":\"typically a few words, factual\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with extensive knowledge, provide precise and factual responses to the questions. Keep your answers brief, typically within 1-5 words.\",\"question_prefix\":\"Interrogative:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Rejoinder:\",\"answer_desc\":\"a concise, factual answer\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast database of information, your task is to provide accurate and succinct answers to the following questions. Your responses should be factual and typically consist of 1-5 words.\",\"question_prefix\":\"Prompt:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Retort:\",\"answer_desc\":\"a short, factual answer\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with access to a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. The questions will be about various topics, and your responses should be succinct, typically consisting of 1-5 words.\",\"question_prefix\":\"Inquiry:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Response:\",\"answer_desc\":\"a brief, factual answer\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast reservoir of information, your task is to provide precise, factual answers to the questions asked. Your responses should be succinct, typically consisting of 1-5 words, and cover a wide range of topics.\",\"question_prefix\":\"Enquiry:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Rejoinder:\",\"answer_desc\":\"a brief, factual answer\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge base, your task is to provide accurate, factual answers to a wide range of questions. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge and understanding.\",\"question_prefix\":\"Inquiry:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Retort:\",\"answer_desc\":\"a brief, factual answer demonstrating accuracy and understanding\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy and precision are paramount.\",\"question_prefix\":\"Interrogative:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision and breadth of knowledge\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Scrutiny:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Inquiry:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Riposte:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant answer.\",\"question_prefix\":\"Interrogative Assessment:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Counterpoint:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Analysis:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Clarification:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Query:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Response:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 34.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Precision, brevity, and accuracy are paramount.\",\"question_prefix\":\"Interrogative Probe:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Rebuttal:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 36.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Examination:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 36.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Remember, your goal is to provide the most accurate and concise answer possible.\",\"question_prefix\":\"Interrogative Assessment:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Counterpoint:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 36.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Additionally, consider the context of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Contextual Inquiry:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Contextual Response:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 36.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with an extensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your wide-ranging knowledge across various topics. Accuracy, precision, and brevity are key. Consider the context of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Exploration:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Clarification:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 36.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Exploration:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 36.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics.\",\"question_prefix\":\"Examination:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision and understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy and precision are paramount.\",\"question_prefix\":\"Examination:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision and understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Probe:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Retort:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Query:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Rebuttal:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Scrutiny:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Scrutiny:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Dissection:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Elucidation:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 38.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Examination:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 40.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Examination:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Succinct Rejoinder:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 40.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.\",\"question_prefix\":\"Interrogative Assessment:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Counterpart:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and understanding\"}\n", + "Score: 40.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Dissection:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Elucidation:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 42.0\n", + "\n", + "---\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Dissection:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Precise Elucidation:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "Score: 42.0\n", + "\n", + "---\n", + "\n", + "Analysis:\u001b[32m The instructions that scored higher were those that emphasized the AI's extensive knowledge base and its ability to provide precise, factual responses. They also stressed the importance of brevity, typically limiting responses to 1-5 words. The instructions that scored lower did not emphasize these points as strongly. The use of unique prefixes for questions and answers, such as \"Interrogative Dissection\" and \"Precise Elucidation\", seemed to contribute to higher scores as well.\n", + "\n", + "Proposed Signature: {\"instructions\":\"As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.\",\"question_prefix\":\"Interrogative Probe:\",\"question_desc\":\"${question}\",\"answer_prefix\":\"Concise Clarification:\",\"answer_desc\":\"a brief, factual answer demonstrating precision, brevity, and contextual understanding\"}\n", + "\n", + "Score: 44.0\u001b[0m\n", + "\n", + "\n", + "\n" + ] + } + ], + "source": [ + "gpt4.inspect_history(n=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "42.0" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result.scores[48]" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'base': StringSignature(question -> answer\n", + " instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.'\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Interrogative Dissection:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'desc': 'a brief, factual answer demonstrating precision, brevity, and contextual understanding', '__dspy_field_type': 'output', 'prefix': 'Precise Elucidation:'})\n", + " )}" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result.signatures[48]" + ] } ], "metadata": { diff --git a/tests/functional/test_signature_opt_typed.py b/tests/functional/test_signature_opt_typed.py index 44adb9d0ea..4f0124fcc1 100644 --- a/tests/functional/test_signature_opt_typed.py +++ b/tests/functional/test_signature_opt_typed.py @@ -154,7 +154,7 @@ def test_opt(): ) dspy.settings.configure(lm=qa_model) - program = optimize_signature( + result = optimize_signature( student=TypedPredictor(BasicQA), evaluator=Evaluate(devset=hotpotqa, metric=answer_exact_match, num_threads=1), initial_prompts=1, @@ -172,4 +172,6 @@ class ExpectedSignature(dspy.Signature): question: str = dspy.InputField(desc="$q", prefix="Q:") answer: str = dspy.OutputField(desc="$a", prefix="A:") - assert program.signature.equals(ExpectedSignature) + assert result.program.signature.equals(ExpectedSignature) + + assert result.scores == [0, 0] From 1e638a14da97d3a46b02a215f064a8c98ddcf8a3 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Wed, 6 Mar 2024 12:54:06 -0800 Subject: [PATCH 131/243] Force the optimizer to not repeat signatures --- dspy/signatures/signature.py | 9 +- dspy/teleprompt/signature_opt_typed.py | 41 +- examples/functional/signature_opt_typed.ipynb | 1841 ++--------------- examples/quiz/DSPy_QuizGen_Cache | 1 + tests/functional/test_functional.py | 17 + tests/functional/test_signature_opt_typed.py | 112 +- 6 files changed, 343 insertions(+), 1678 deletions(-) create mode 160000 examples/quiz/DSPy_QuizGen_Cache diff --git a/dspy/signatures/signature.py b/dspy/signatures/signature.py index 314d6c712b..74fa3ee731 100644 --- a/dspy/signatures/signature.py +++ b/dspy/signatures/signature.py @@ -35,7 +35,9 @@ def __call__(cls, *args, **kwargs): # noqa: ANN002 def __new__(mcs, signature_name, bases, namespace, **kwargs): # noqa: N804 # Set `str` as the default type for all fields raw_annotations = namespace.get("__annotations__", {}) - for name, _field in namespace.items(): + for name, field in namespace.items(): + if not isinstance(field, FieldInfo): + continue # Don't add types to non-field attributes if not name.startswith("__") and name not in raw_annotations: raw_annotations[name] = str namespace["__annotations__"] = raw_annotations @@ -272,9 +274,8 @@ def make_signature( def _parse_signature(signature: str) -> Tuple[Type, Field]: - pattern = r"^\s*[\w\s,:]+\s*->\s*[\w\s,:]+\s*$" - if not re.match(pattern, signature): - raise ValueError(f"Invalid signature format: '{signature}'") + if signature.count("->") != 1: + raise ValueError(f"Invalid signature format: '{signature}', must contain exactly one '->'.") fields = {} inputs_str, outputs_str = map(str.strip, signature.split("->")) diff --git a/dspy/teleprompt/signature_opt_typed.py b/dspy/teleprompt/signature_opt_typed.py index 35e103306c..1455971533 100644 --- a/dspy/teleprompt/signature_opt_typed.py +++ b/dspy/teleprompt/signature_opt_typed.py @@ -100,20 +100,32 @@ class GenerateInstructionInitial(Signature, Generic[T]): return GenerateInstructionInitial -class GenerateSignature(dspy.Signature, Generic[T]): - __doc__ = textwrap.dedent("""\ - You are an instruction optimizer for large language models. +def generate_with_avoidance(signatures_to_avoid: list[BaseModel]) -> type[Signature]: + class GenerateSignature(dspy.Signature, Generic[T]): + __doc__ = textwrap.dedent("""\ + You are an instruction optimizer for large language models. + + I will give some task instructions I've tried, along with their corresponding validation scores. + - The instructions are arranged in order based on their scores, where higher scores indicate better quality. + - Your task is to propose a new instruction that will lead a good language model to perform the task even better. + - Be creative, and think out of the box. + - Don't repeat instructions, descriptions and prefixes that have already been attempted. + """) - I will give some task instructions I've tried, along with their corresponding validation scores. - - The instructions are arranged in order based on their scores, where higher scores indicate better quality. - - Your task is to propose a new instruction that will lead a good language model to perform the task even better. - - Be creative, and think out of the box. - - Don't repeat instructions, descriptions and prefixes that have already been attempted. - """) + analysis: str = OutputField(desc="Consider what made the previous instructions good or bad.") + proposed_signature: T = OutputField(desc="A signature that will likely lead to a high score.") + score: float = OutputField( + desc="The expected score for the new signature. Don't write anything after this number." + ) + + @pydantic.field_validator("proposed_signature") + @classmethod + def check_signature_not_attempted(cls, s: T) -> T: + if s in signatures_to_avoid: + raise ValueError("Never propose a signature already in the list above.") + return s - analysis: str = OutputField(desc="Consider what made the previous instructions good or bad.") - proposed_signature: T = OutputField(desc="A signature that will likely lead to a high score.") - score: float = OutputField(desc="The expected score for the new signature. Don't write anything after this number.") + return GenerateSignature @dataclass @@ -233,7 +245,6 @@ def optimize_signature( # TODO: Parallelize this for name, _p in named_predictors: SignatureInfo = type(candidates[name][0]) # noqa: N806 - generator = TypedPredictor(GenerateSignature[SignatureInfo]) demos = [dspy.Example(proposed_signature=info, score=sc) for info, sc in zip(candidates[name], scores)] if sorted_order == "chronological": @@ -246,6 +257,10 @@ def optimize_signature( demos = demos[:max_examples] else: raise ValueError(f"Invalid sorted_order: {sorted_order}") + + # We can only tell the LM to avoid the signatures we are actually giving it as demos. + avoid = [ex.proposed_signature for ex in demos] + generator = TypedPredictor(generate_with_avoidance(avoid)[SignatureInfo]) generator.predictor.demos = demos if verbose: diff --git a/examples/functional/signature_opt_typed.ipynb b/examples/functional/signature_opt_typed.ipynb index 115d481406..feab8d6356 100644 --- a/examples/functional/signature_opt_typed.ipynb +++ b/examples/functional/signature_opt_typed.ipynb @@ -2,18 +2,9 @@ "cells": [ { "cell_type": "code", - "execution_count": 7, + "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], + "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -25,1529 +16,310 @@ ] }, { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import dspy\n", - "turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000)\n", - "gpt4 = dspy.OpenAI(model='gpt-4', max_tokens=4000)\n", - "dspy.settings.configure(lm=turbo)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Prediction(\n", - " answer='Paris'\n", - ")" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dspy.TypedPredictor(\"question -> answer\")(question=\"What is the capital of France?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(20, 50)" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from dspy.datasets import HotPotQA\n", - "\n", - "# Load the dataset.\n", - "dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)\n", - "\n", - "# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\n", - "trainset = [x.with_inputs('question') for x in dataset.train]\n", - "devset = [x.with_inputs('question') for x in dataset.dev]\n", - "\n", - "len(trainset), len(devset)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "class BasicQA(dspy.Signature):\n", - " \"\"\"Answer questions with short factoid answers.\"\"\"\n", - "\n", - " question = dspy.InputField()\n", - " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 1 typed predictors to optimize.\n", - "Generating 6 initial signatures for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 0...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5693.37it/s]\n", - "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", - " df = df.applymap(truncate_cell)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 1...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 1 / 50 (2.0): 100%|██████████| 50/50 [00:00<00:00, 5255.36it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 1 / 50 (2.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 2...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4871.89it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 3...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5412.98it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 4...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 6 / 50 (12.0): 100%|██████████| 50/50 [00:00<00:00, 5261.43it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 6 / 50 (12.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 5...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 50 (10.0): 100%|██████████| 50/50 [00:00<00:00, 5405.45it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 50 (10.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 6...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 5346.47it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 7...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 2367.98it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 8...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 5037.72it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 9...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 4994.05it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 10...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 11 / 50 (22.0): 100%|██████████| 50/50 [00:00<00:00, 5207.99it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 11 / 50 (22.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 11...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5178.03it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 12...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 6126.65it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 13...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1013.36it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 14...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4902.06it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 15...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 4703.72it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 16...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5067.42it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 17...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 5343.06it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 18...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5155.11it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 19...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 673.08it/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 20...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 5469.88it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 21...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4980.53it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 22...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 6185.74it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 23...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5566.87it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 24...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4908.72it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 25...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 968.52it/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 26...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4921.16it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 27...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 5208.37it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 28...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 5443.61it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 29...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 5854.70it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 30...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 5709.64it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 31...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 810.21it/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 32...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 3970.45it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 33...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 4379.74it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 34...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3614.53it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 35...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 3822.32it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 36...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 3541.11it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 37...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 2972.87it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 38...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 2611.94it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 39...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 2124.95it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 40...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 2477.12it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 41...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 5950.21it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 42...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 5514.47it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 43...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 5892.86it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 44...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 2642.45it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 45...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 4842.19it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 46...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 5380.35it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 47...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 5425.87it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 48...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 21 / 50 (42.0): 100%|██████████| 50/50 [00:00<00:00, 5317.32it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 21 / 50 (42.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 49...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 21 / 50 (42.0): 100%|██████████| 50/50 [00:00<00:00, 5832.88it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 21 / 50 (42.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 50...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1801.99it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 51...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4837.94it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 52...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5629.33it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 53...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5490.21it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 54...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5715.56it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 55...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5685.19it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 56...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 668.95it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 57...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5015.07it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 58...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5527.40it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 59...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5037.23it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 60...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5525.36it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 61...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5428.40it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 62...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 956.70it/s] " - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 63...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5655.44it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 64...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5225.77it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 65...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4379.93it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 66...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5452.39it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 67...\n" - ] - }, + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5898.83it/s]\n" + "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" ] - }, + } + ], + "source": [ + "import dspy\n", + "turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000)\n", + "gpt4 = dspy.OpenAI(model='gpt-4', max_tokens=4000)\n", + "dspy.settings.configure(lm=turbo)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Prediction(\n", + " answer='Paris'\n", + ")" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dspy.TypedPredictor(\"question -> answer\")(question=\"What is the capital of France?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(20, 50)" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from dspy.datasets import HotPotQA\n", + "\n", + "# Load the dataset.\n", + "dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)\n", + "\n", + "# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\n", + "trainset = [x.with_inputs('question') for x in dataset.train]\n", + "devset = [x.with_inputs('question') for x in dataset.dev]\n", + "\n", + "len(trainset), len(devset)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class BasicQA(dspy.Signature):\n", + " \"\"\"Answer questions with short factoid answers.\"\"\"\n", + "\n", + " question = dspy.InputField()\n", + " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Found 1 typed predictors to optimize.\n", + "Generating 6 initial signatures for base...\n", "\n", "================================================================================\n", - "Running eval iteration 68...\n" + "Running eval iteration 0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 711.01it/s] \n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 3086.59it/s]\n", + "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", + " df = df.applymap(truncate_cell)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 16 / 50 (32.0%)\n", "\n", "================================================================================\n", - "Running eval iteration 69...\n" + "Running eval iteration 1...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3476.83it/s]\n" + "Average Metric: 1 / 50 (2.0): 100%|██████████| 50/50 [00:00<00:00, 1268.65it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 1 / 50 (2.0%)\n", "\n", "================================================================================\n", - "Running eval iteration 70...\n" + "Running eval iteration 2...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4120.30it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1031.35it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 17 / 50 (34.0%)\n", "\n", "================================================================================\n", - "Running eval iteration 71...\n" + "Running eval iteration 3...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4182.01it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1364.88it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 16 / 50 (32.0%)\n", "\n", "================================================================================\n", - "Running eval iteration 72...\n" + "Running eval iteration 4...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4117.15it/s]\n" + "Average Metric: 6 / 50 (12.0): 100%|██████████| 50/50 [00:00<00:00, 892.68it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 6 / 50 (12.0%)\n", "\n", "================================================================================\n", - "Running eval iteration 73...\n" + "Running eval iteration 5...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3325.38it/s]\n" + "Average Metric: 5 / 50 (10.0): 100%|██████████| 50/50 [00:00<00:00, 1055.56it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", + "Average Metric: 5 / 50 (10.0%)\n", "\n", "================================================================================\n", - "Running eval iteration 74...\n" + "Running eval iteration 6...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 3122.67it/s]\n" + "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 942.15it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 12 / 50 (24.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 7 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 75...\n" + "Running eval iteration 7...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 2266.85it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1054.12it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 8 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 76...\n" + "Running eval iteration 8...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1976.75it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 957.29it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 9 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 77...\n" + "Running eval iteration 9...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 2248.40it/s]\n" + "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 1015.95it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 12 / 50 (24.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 10 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 78...\n" + "Running eval iteration 10...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5650.26it/s]\n" + "Average Metric: 11 / 50 (22.0): 100%|██████████| 50/50 [00:00<00:00, 839.64it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 11 / 50 (22.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 11 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 79...\n" + "Running eval iteration 11...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1173.79it/s]\n" + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 833.32it/s]\n" ] }, { @@ -1556,16 +328,17 @@ "text": [ "Average Metric: 15 / 50 (30.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 12 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 80...\n" + "Running eval iteration 12...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5328.67it/s]\n" + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1105.97it/s]\n" ] }, { @@ -1574,196 +347,207 @@ "text": [ "Average Metric: 15 / 50 (30.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 13 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 81...\n" + "Running eval iteration 13...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4941.68it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1112.59it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 14 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 82...\n" + "Running eval iteration 14...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4712.60it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1096.58it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 15 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 83...\n" + "Running eval iteration 15...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5124.50it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1092.70it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 16 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 84...\n" + "Running eval iteration 16...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5616.82it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1097.79it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 17 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 85...\n" + "Running eval iteration 17...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5007.41it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 547.69it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 18 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 86...\n" + "Running eval iteration 18...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5371.25it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 964.67it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 19 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 87...\n" + "Running eval iteration 19...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1938.59it/s]\n" + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 1014.22it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 19 / 50 (38.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 20 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 88...\n" + "Running eval iteration 20...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5357.39it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 906.14it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 21 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 89...\n" + "Running eval iteration 21...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5891.37it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1017.81it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 22 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 90...\n" + "Running eval iteration 22...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4924.74it/s]\n" + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 1032.48it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 19 / 50 (38.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 23 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 91...\n" + "Running eval iteration 23...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5301.19it/s]\n" + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 726.33it/s]\n" ] }, { @@ -1772,16 +556,17 @@ "text": [ "Average Metric: 15 / 50 (30.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 24 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 92...\n" + "Running eval iteration 24...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 2998.12it/s]\n" + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 957.55it/s]\n" ] }, { @@ -1790,34 +575,36 @@ "text": [ "Average Metric: 15 / 50 (30.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 25 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 93...\n" + "Running eval iteration 25...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 975.27it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1009.53it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 26 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 94...\n" + "Running eval iteration 26...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5701.88it/s]\n" + "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1064.53it/s]\n" ] }, { @@ -1826,102 +613,114 @@ "text": [ "Average Metric: 15 / 50 (30.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 27 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 95...\n" + "Running eval iteration 27...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5424.74it/s]\n" + "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 1052.90it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 18 / 50 (36.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 28 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 96...\n" + "Running eval iteration 28...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 4879.48it/s]\n" + "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 731.18it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 20 / 50 (40.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 29 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 97...\n" + "Running eval iteration 29...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5342.52it/s]\n" + "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:02<00:00, 18.61it/s]\n", + "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", + " df = df.applymap(truncate_cell)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 16 / 50 (32.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 30 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 98...\n" + "Running eval iteration 30...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 5386.71it/s]\n" + "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:02<00:00, 18.23it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n", + "Average Metric: 17 / 50 (34.0%)\n", "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 31 to avoid.\n", "\n", "================================================================================\n", - "Running eval iteration 99...\n" + "Running eval iteration 31...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 957.51it/s] " + "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:02<00:00, 20.82it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 15 / 50 (30.0%)\n" + "Average Metric: 19 / 50 (38.0%)\n", + "Generating new signature for base...\n", + "Tested the signature, and it's not in the list of 32 to avoid.\n", + "\n", + "================================================================================\n", + "Running eval iteration 32...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\n" + "Average Metric: 17 / 49 (34.7): 98%|█████████▊| 49/50 [00:14<00:00, 20.66it/s]" ] } ], @@ -1944,9 +743,16 @@ ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check the final program after optimization" + ] + }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1959,7 +765,7 @@ "))" ] }, - "execution_count": 25, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -1969,171 +775,24 @@ ] }, { - "cell_type": "code", - "execution_count": 21, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'base': [SignatureInfo[BasicQA](instructions='Answer questions with short factoid answers.', question_prefix='Question:', question_desc='${question}', answer_prefix='Answer:', answer_desc='often between 1 and 5 words'),\n", - " SignatureInfo[BasicQA](instructions='You are a knowledgeable AI. Provide concise answers to the following questions.', question_prefix='Q:', question_desc='${question}', answer_prefix='A:', answer_desc='a brief, factual response'),\n", - " SignatureInfo[BasicQA](instructions='You are an expert in your field. Respond to the inquiries with short, factual answers.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Response:', answer_desc='typically a few words, factual'),\n", - " SignatureInfo[BasicQA](instructions='You are a highly intelligent AI. Please provide succinct answers to the questions.', question_prefix='Query:', question_desc='${question}', answer_prefix='Reply:', answer_desc='usually 1-5 words, factual'),\n", - " SignatureInfo[BasicQA](instructions='You are as smart as ChatGPT. Please answer the following questions briefly and accurately.', question_prefix='Question:', question_desc='${question}', answer_prefix='Answer:', answer_desc='a short, factual response'),\n", - " SignatureInfo[BasicQA](instructions='You are a professor of knowledge. Please provide short, fact-based answers to the questions.', question_prefix='Q:', question_desc='${question}', answer_prefix='A:', answer_desc='a brief, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='You are a well-informed AI. Please respond to the questions with concise, factual answers.', question_prefix='Query:', question_desc='${question}', answer_prefix='Response:', answer_desc='typically a few words, factual'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with extensive knowledge, provide precise and factual responses to the questions. Keep your answers brief, typically within 1-5 words.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a concise, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast database of information, your task is to provide accurate and succinct answers to the following questions. Your responses should be factual and typically consist of 1-5 words.', question_prefix='Prompt:', question_desc='${question}', answer_prefix='Retort:', answer_desc='a short, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with comprehensive knowledge, your role is to provide brief and factual answers to the following questions. Your responses should be accurate, typically not exceeding five words.', question_prefix='Posed Question:', question_desc='${question}', answer_prefix='Concise Reply:', answer_desc='a factual answer, usually within 1-5 words'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide precise and factual answers to the questions asked. Your responses should be concise, typically not exceeding five words.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide succinct, factual answers to the questions posed. Your responses should be accurate and typically not exceed five words.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Rebuttal:', answer_desc='a brief, factual answer, usually within 1-5 words'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with access to a vast array of information, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words.', question_prefix='Prompt:', question_desc='${question}', answer_prefix='Retort:', answer_desc='a brief, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with access to a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. The questions will be about various topics, and your responses should be succinct, typically consisting of 1-5 words.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Response:', answer_desc='a brief, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast reservoir of information, your task is to provide precise, factual answers to the questions asked. Your responses should be succinct, typically consisting of 1-5 words, and cover a wide range of topics.', question_prefix='Enquiry:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with access to a vast repository of information, your task is to provide accurate, factual answers to the questions posed. The questions could be about any topic, so use your extensive knowledge to provide the most accurate response. Your answers should be succinct, typically consisting of 1-5 words.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer, demonstrating accuracy and breadth of knowledge'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast compendium of information, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and cover a broad spectrum of topics.', question_prefix='Probing:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a concise, factual answer'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to a wide range of questions. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge and understanding.', question_prefix='Inquiry:', question_desc='${question}', answer_prefix='Retort:', answer_desc='a brief, factual answer demonstrating accuracy and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to a wide range of questions, from general knowledge to specific facts. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge and understanding. Accuracy is paramount.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating accuracy and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics.', question_prefix='Examination:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide accurate, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy and precision are paramount.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer demonstrating precision and breadth of knowledge'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy and precision are paramount.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision and breadth of knowledge'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy and precision are paramount.', question_prefix='Examination:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogation:', question_desc='${question}', answer_prefix='Repartee:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative:', question_desc='${question}', answer_prefix='Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Precision and brevity are key.', question_prefix='Interrogative Statement:', question_desc='${question}', answer_prefix='Concise Response:', answer_desc='a brief, factual answer demonstrating precision and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Query:', question_desc='${question}', answer_prefix='Concise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Precision, brevity, and accuracy are paramount.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Rebuttal:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Examination:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Examination:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Succinct Retort:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Examination:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Query:', question_desc='${question}', answer_prefix='Succinct Rebuttal:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Succinct Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the inquiries posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Precise Retort:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Inquiry:', question_desc='${question}', answer_prefix='Succinct Riposte:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount.', question_prefix='Interrogative Assessment:', question_desc='${question}', answer_prefix='Concise Counterpart:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Remember, your goal is to provide the most accurate and concise answer possible.', question_prefix='Interrogative Assessment:', question_desc='${question}', answer_prefix='Concise Counterpoint:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant answer.', question_prefix='Interrogative Assessment:', question_desc='${question}', answer_prefix='Concise Counterpoint:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Additionally, consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Contextual Inquiry:', question_desc='${question}', answer_prefix='Contextual Response:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Analysis:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with an extensive knowledge base, your task is to provide accurate, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your wide-ranging knowledge across various topics. Accuracy, precision, and brevity are key. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Exploration:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Precise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Exploration:', question_desc='${question}', answer_prefix='Precise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Scrutiny:', question_desc='${question}', answer_prefix='Precise Rejoinder:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Query:', question_desc='${question}', answer_prefix='Precise Response:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a vast knowledge repository, your task is to provide precise, factual responses to the questions asked. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Dissection:', question_desc='${question}', answer_prefix='Precise Elucidation:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Dissection:', question_desc='${question}', answer_prefix='Precise Elucidation:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Dissection:', question_desc='${question}', answer_prefix='Precise Elucidation:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding'),\n", - " SignatureInfo[BasicQA](instructions='As an AI with a comprehensive knowledge base, your task is to provide precise, factual answers to the questions posed. Your responses should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a broad spectrum of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.', question_prefix='Interrogative Probe:', question_desc='${question}', answer_prefix='Concise Clarification:', answer_desc='a brief, factual answer demonstrating precision, brevity, and contextual understanding')]}" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "result.signatures" + "Plot the scores over time" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "StringSignature(question -> answer\n", - " instructions='You are highly intelligent. Please provide short, factual answers to the following questions.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Inquiry:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'usually between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Reply:'})\n", - ")\n" - ] - } - ], - "source": [ - "print(result.program.signature)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[]" + "[]" ] }, - "execution_count": 26, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" }, @@ -2155,7 +814,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -2348,50 +1007,6 @@ "source": [ "gpt4.inspect_history(n=1)" ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "42.0" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "result.scores[48]" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'base': StringSignature(question -> answer\n", - " instructions='As an AI with a comprehensive knowledge repository, your task is to provide precise, factual responses to the questions posed. Your answers should be succinct, typically consisting of 1-5 words, and demonstrate your extensive knowledge across a wide array of topics. Accuracy, precision, and brevity are paramount. Consider the context and nuances of the question to provide the most relevant and accurate answer.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Interrogative Dissection:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'a brief, factual answer demonstrating precision, brevity, and contextual understanding', '__dspy_field_type': 'output', 'prefix': 'Precise Elucidation:'})\n", - " )}" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "result.signatures[48]" - ] } ], "metadata": { diff --git a/examples/quiz/DSPy_QuizGen_Cache b/examples/quiz/DSPy_QuizGen_Cache new file mode 160000 index 0000000000..27d6d433e7 --- /dev/null +++ b/examples/quiz/DSPy_QuizGen_Cache @@ -0,0 +1 @@ +Subproject commit 27d6d433e73b91d3cf677ecf1d757813fcbd611d diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 2289ce697c..2d680d3b99 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -635,3 +635,20 @@ class GenericSignature(dspy.Signature, Generic[T]): dspy.settings.configure(lm=lm) assert predictor().output == 23 + + +def test_field_validator_in_signature(): + class ValidatedSignature(dspy.Signature): + a: str = dspy.OutputField() + + @pydantic.field_validator("a") + @classmethod + def space_in_a(cls, a: str) -> str: + if not " " in a: + raise ValueError("a must contain a space") + return a + + with pytest.raises(pydantic.ValidationError): + _ = ValidatedSignature(a="no-space") + + _ = ValidatedSignature(a="with space") diff --git a/tests/functional/test_signature_opt_typed.py b/tests/functional/test_signature_opt_typed.py index 4f0124fcc1..85c8c8d56c 100644 --- a/tests/functional/test_signature_opt_typed.py +++ b/tests/functional/test_signature_opt_typed.py @@ -1,12 +1,10 @@ -import json +from typing import Generic, TypeVar + +import pydantic import dspy from dspy.evaluate import Evaluate from dspy.functional import TypedPredictor -from dspy.teleprompt.signature_opt_typed import ( - GenerateSignature, - make_info, - optimize_signature, -) +from dspy.teleprompt.signature_opt_typed import optimize_signature, make_info from dspy.utils import DummyLM from dspy.evaluate import Evaluate @@ -14,11 +12,6 @@ from dspy.functional import TypedPredictor -class BasicQA(dspy.Signature): - question: str = dspy.InputField() - answer: str = dspy.OutputField() - - hotpotqa = [ ex.with_inputs("question") for ex in [ @@ -106,44 +99,11 @@ class BasicQA(dspy.Signature): ] -def old_test_signature_info(): - info = make_info(BasicQA) - SignatureInfo = type(info) - - devset = [ - dspy.Example( - instructions="Answer the following questions", - question_desc="Some question to answer", - question_prefix="Q: ", - answer_desc="A short answer to the question", - answer_prefix="A: ", - ), - ] - - lm = DummyLM( - [ - json.dumps(dict(devset[0])), # Proposed signature - ] - ) - dspy.settings.configure(lm=lm) - - generator = TypedPredictor(GenerateInstructionGivenAttempts[SignatureInfo]) - - res = generator(attempted_signatures=[ScoredSignature[SignatureInfo](signature=info, score=50)]) - assert res.proposed_signature == SignatureInfo(**devset[0]) - - # Test the "to_signature" method - - class OutputSignature(dspy.Signature): - """Answer the following questions""" - - question: str = dspy.InputField(desc="Some question to answer", prefix="Q: ") - answer: str = dspy.OutputField(desc="A short answer to the question", prefix="A: ") - - assert res.proposed_signature.to_signature().equals(OutputSignature) - - def test_opt(): + class BasicQA(dspy.Signature): + question: str = dspy.InputField() + answer: str = dspy.OutputField() + qa_model = DummyLM([]) prompt_model = DummyLM( [ @@ -175,3 +135,59 @@ class ExpectedSignature(dspy.Signature): assert result.program.signature.equals(ExpectedSignature) assert result.scores == [0, 0] + + +def test_opt_composed(): + class MyModule(dspy.Module): + def __init__(self): + self.p1 = TypedPredictor("question:str -> considerations:list[str]", max_retries=1) + self.p2 = TypedPredictor("considerations:list[str] -> answer:str", max_retries=1) + + def forward(self, question): + considerations = self.p1(question=question).considerations + return self.p2(considerations=considerations) + + class ExpectedSignature1(dspy.Signature): + "I1" + + question: str = dspy.InputField(desc="$q", prefix="Q:") + considerations: list[str] = dspy.OutputField(desc="$c", prefix="C:") + + info1 = make_info(ExpectedSignature1) + + class ExpectedSignature2(dspy.Signature): + "I2" + + considerations: list[str] = dspy.InputField(desc="$c", prefix="C:") + answer: str = dspy.OutputField(desc="$a", prefix="A:") + + info2 = make_info(ExpectedSignature2) + + T = TypeVar("T") + + class OutputWrapper(pydantic.BaseModel, Generic[T]): + value: list[T] + + qa_model = DummyLM([]) + prompt_model = DummyLM( + [ + "some thoughts", + OutputWrapper[type(info1)](value=[info1]).model_dump_json(), + "some thoughts", + OutputWrapper[type(info2)](value=[info2]).model_dump_json(), + ] + ) + dspy.settings.configure(lm=qa_model) + + result = optimize_signature( + student=MyModule(), + evaluator=lambda x: 0, # We don't care about the evaluator here + initial_prompts=1, + n_iterations=2, + verbose=True, + prompt_model=prompt_model, + strategy="last", + ) + + assert result.program.p1.signature.equals(ExpectedSignature1) + assert result.program.p2.signature.equals(ExpectedSignature2) From 77c507a7fb8918958eb8437402dbaeb9d7380d1d Mon Sep 17 00:00:00 2001 From: thomasahle Date: Wed, 6 Mar 2024 20:54:29 +0000 Subject: [PATCH 132/243] Automatic Style fixes --- dspy/teleprompt/signature_opt_typed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/teleprompt/signature_opt_typed.py b/dspy/teleprompt/signature_opt_typed.py index 1455971533..c86d297c14 100644 --- a/dspy/teleprompt/signature_opt_typed.py +++ b/dspy/teleprompt/signature_opt_typed.py @@ -115,7 +115,7 @@ class GenerateSignature(dspy.Signature, Generic[T]): analysis: str = OutputField(desc="Consider what made the previous instructions good or bad.") proposed_signature: T = OutputField(desc="A signature that will likely lead to a high score.") score: float = OutputField( - desc="The expected score for the new signature. Don't write anything after this number." + desc="The expected score for the new signature. Don't write anything after this number.", ) @pydantic.field_validator("proposed_signature") From f672c65c7126101d07350bbc98dfee55a0cb08fd Mon Sep 17 00:00:00 2001 From: mingyi yang Date: Wed, 6 Mar 2024 20:55:52 +0000 Subject: [PATCH 133/243] Remove unused kwargs passed to requests sent through `AzureOpenAI` This commit is to remove unused kwargs that we include in the requests sent through `AzureOpenAI` class. This seems to fix this issue: https://github.com/stanfordnlp/dspy/issues/543 --- dsp/modules/azure_openai.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/dsp/modules/azure_openai.py b/dsp/modules/azure_openai.py index c90f634e4f..d930bec6b5 100644 --- a/dsp/modules/azure_openai.py +++ b/dsp/modules/azure_openai.py @@ -107,9 +107,6 @@ def __init__( kwargs["model"] = model self.kwargs = { - "api_base": api_base, - "api_version": api_version, - "api_key": api_key, "temperature": 0.0, "max_tokens": 150, "top_p": 1, From eae3e0749633d7defbb65816e4a798233afb2692 Mon Sep 17 00:00:00 2001 From: Insop Song Date: Wed, 6 Mar 2024 19:10:48 -0800 Subject: [PATCH 134/243] Adding metric_threshold to BootstrapFewShotWithRandomSearch --- dspy/teleprompt/random_search.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index bc0e5ef61b..e1663cae80 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -24,7 +24,7 @@ class BootstrapFewShotWithRandomSearch(Teleprompter): - def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6, max_errors=10, stop_at_score=None): + def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6, max_errors=10, stop_at_score=None, metric_threshold=None): self.metric = metric self.teacher_settings = teacher_settings self.max_rounds = max_rounds @@ -71,7 +71,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None elif seed == -1: # unshuffled few-shot - program = BootstrapFewShot(metric=self.metric, max_bootstrapped_demos=self.max_num_samples, + program = BootstrapFewShot(metric=self.metric, metric_threshold=self.metric_threshold, max_bootstrapped_demos=self.max_num_samples, max_labeled_demos=self.max_labeled_demos, teacher_settings=self.teacher_settings, max_rounds=self.max_rounds) program2 = program.compile(student, teacher=teacher, trainset=trainset2) @@ -82,7 +82,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None random.Random(seed).shuffle(trainset2) size = random.Random(seed).randint(self.min_num_samples, self.max_num_samples) - teleprompter = BootstrapFewShot(metric=self.metric, max_bootstrapped_demos=size, + teleprompter = BootstrapFewShot(metric=self.metric, metric_threshold=self.metric_threshold, max_bootstrapped_demos=size, max_labeled_demos=self.max_labeled_demos, teacher_settings=self.teacher_settings, max_rounds=self.max_rounds) From 7558b8f439163cc81b3bf9ebfd0c44a76cb274a8 Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Wed, 6 Mar 2024 20:44:58 -0800 Subject: [PATCH 135/243] Update cheatsheet.md Added typed signature optimizer --- docs/docs/cheatsheet.md | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/docs/docs/cheatsheet.md b/docs/docs/cheatsheet.md index c5f11e0c42..af62bd248c 100644 --- a/docs/docs/cheatsheet.md +++ b/docs/docs/cheatsheet.md @@ -282,8 +282,13 @@ your_dspy_program_compiled = fewshot_optimizer.compile(student = your_dspy_progr #### Compiling a compiled program - bootstrapping a bootstraped program -your_dspy_program_compiledx2 = teleprompter.compile(your_dspy_program, teacher=your_dspy_program_compiled, trainset=trainset) - +```python +your_dspy_program_compiledx2 = teleprompter.compile( + your_dspy_program, + teacher=your_dspy_program_compiled, + trainset=trainset, +) +``` ### dspy.BootstrapFewShotWithRandomSearch @@ -364,6 +369,20 @@ kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) compiled_program_optimized_bayesian_signature = teleprompter.compile(your_dspy_program, devset=devset[:DEV_NUM], optuna_trials_num=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) ``` +### Signature Optimizer with Types + +```python +from dspy.teleprompt.signature_opt_typed import optimize_signature +from dspy.evaluate.metrics import answer_exact_match +from dspy.functional import TypedChainOfThought + +compiled_program = optimize_signature( + student=TypedChainOfThought("question -> answer"), + evaluator=Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True), + n_iterations=50, +).program +``` + ### dspy.KNNFewShot ```python From 610ad5966eec41dc21ef229e83657bc62ea89ca1 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Wed, 6 Mar 2024 21:55:18 -0800 Subject: [PATCH 136/243] refactoring optimizers, updates to optimizer notebook --- dspy/teleprompt/__init__.py | 11 +- dspy/teleprompt/copro_optimizer.py | 301 +++++++++++ dspy/teleprompt/mipro_optimizer.py | 499 +++++++++++++++++ dspy/teleprompt/signature_opt.py | 295 +--------- dspy/teleprompt/signature_opt_bayesian.py | 391 +------------- examples/qa/hotpot/hotpotqa_optimized.ipynb | 566 ++++++++++++++++++++ 6 files changed, 1390 insertions(+), 673 deletions(-) create mode 100644 dspy/teleprompt/copro_optimizer.py create mode 100644 dspy/teleprompt/mipro_optimizer.py create mode 100644 examples/qa/hotpot/hotpotqa_optimized.ipynb diff --git a/dspy/teleprompt/__init__.py b/dspy/teleprompt/__init__.py index a1088f2df3..ba14c1b940 100644 --- a/dspy/teleprompt/__init__.py +++ b/dspy/teleprompt/__init__.py @@ -1,10 +1,11 @@ +from .teleprompt import * from .bootstrap import * -from .ensemble import * +from .vanilla import * +from .random_search import * from .finetune import * +from .teleprompt_optuna import * from .knn_fewshot import * -from .random_search import * from .signature_opt import SignatureOptimizer from .signature_opt_bayesian import BayesianSignatureOptimizer -from .teleprompt import * -from .teleprompt_optuna import * -from .vanilla import * +from .mipro_optimizer import MIPRO +from .copro_optimizer import COPRO \ No newline at end of file diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py new file mode 100644 index 0000000000..f1c303a47c --- /dev/null +++ b/dspy/teleprompt/copro_optimizer.py @@ -0,0 +1,301 @@ +from collections import defaultdict + +import dsp +import dspy +from dspy.evaluate.evaluate import Evaluate +from dspy.signatures import Signature +from dspy.teleprompt.teleprompt import Teleprompter + +""" +USAGE SUGGESTIONS: + +The following code can be used to compile a optimized signature teleprompter, and evaluate it on an end task: + +teleprompter = COPRO(prompt_model=prompt_model, metric=metric, breadth=BREADTH, depth=DEPTH, init_temperature=INIT_TEMPERATURE) +kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) +compiled_prompt_opt = teleprompter.compile(program.deepcopy(), devset=devset[:DEV_NUM], eval_kwargs=kwargs) +eval_score = evaluate(compiled_prompt_opt, devset=evalset[:EVAL_NUM], **kwargs) + +Note that this teleprompter takes in the following parameters: + +* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). +* metric: The task metric used for optimization. +* breadth: The number of new prompts to generate at each iteration. Default=10. +* depth: The number of times we should ask our prompt model to generate new prompts, with the history of the past prompts as input. Default=3. +* init_temperature: The temperature used to generate new prompts. Higher roughly equals more creative. Default=1.4. +* verbose: Tells the method whether or not to print intermediate steps. +* track_stats: Tells the method whether or not to track statistics about the optimization process. + If True, the method will track the following statistics: + * results_best: The min,max,avg,stddev of top 10 scores for each predictor at each depth. + * results_latest: The min,max,avg,stddev of newest prompt scores for each predictor at each depth. + * total_calls: The total number of calls to the task metric. + These statistics will be returned as attributes of the best program. +""" +class BasicGenerateInstruction(Signature): + """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + +class GenerateInstructionGivenAttempts(dspy.Signature): + """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. + +Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" + + attempted_instructions = dspy.InputField(format=dsp.passages2text) + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + +class COPRO(Teleprompter): + def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): + if breadth <= 1: + raise ValueError("Breadth must be greater than 1") + self.metric = metric + self.breadth = breadth + self.depth = depth + self.init_temperature = init_temperature + self.prompt_model = prompt_model + self.verbose = verbose + self.track_stats = track_stats + + def _check_candidates_equal(self, candidate1, candidate2): + for p1, p2 in zip(candidate1["program"].predictors(), candidate2["program"].predictors()): + if self._get_signature(p1).instructions != self._get_signature(p2).instructions: + return False + *_, p1_last_field = self._get_signature(p1).fields.values() + *_, p2_last_field = self._get_signature(p2).fields.values() + if p1_last_field != p2_last_field: + return False + return True + + def _drop_duplicates(self, candidates): + final_candidates = [] + last_batch = [] + last_batch_score = -1 + for c in candidates: + repeat = False + if c['score'] == last_batch_score: + for c2 in last_batch: + if (self._check_candidates_equal(c, c2)): + repeat = True + break + if not repeat: + last_batch.append(c) + else: + last_batch = [c] + last_batch_score = c['score'] + if not repeat: + final_candidates.append(c) + return final_candidates + + def _print_signature(self, predictor): + if self.verbose: + signature = self._get_signature(predictor) + print(f"i: {signature.instructions}") + print(f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}") + print() + + def _get_signature(self, predictor): + if (hasattr(predictor, 'extended_signature')): + return predictor.extended_signature + elif (hasattr(predictor, 'signature')): + return predictor.signature + + def _set_signature(self, predictor, updated_signature): + if (hasattr(predictor, 'extended_signature')): + predictor.extended_signature = updated_signature + elif (hasattr(predictor, 'signature')): + predictor.signature = updated_signature + + + def compile(self, student, *, devset, eval_kwargs): + """student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" + module = student.deepcopy() + evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) + total_calls = 0 + results_best = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} + results_latest = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} + + if self.track_stats: + import numpy as np + + + candidates = {} + evaluated_candidates = defaultdict(dict) + + # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts + for predictor in module.predictors(): + basic_instruction = None + basic_prefix = None + *_, last_key = self._get_signature(predictor).fields.keys() + basic_instruction = self._get_signature(predictor).instructions + basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra['prefix'] + if self.prompt_model: + with dspy.settings.context(lm=self.prompt_model): + instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + else: + instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + # Add in our initial prompt as a candidate as well + instruct.completions.proposed_instruction.append(basic_instruction) + instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) + candidates[id(predictor)] = instruct.completions + evaluated_candidates[id(predictor)] = {} + + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + + latest_candidates = candidates + all_candidates = candidates + + module_clone = module.deepcopy() + + # For each iteration in depth... + for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors + if self.verbose: print(f"Starting iteration {d}/{self.depth}.") + + latest_scores = [] + + # Go through our module's predictors + for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): + candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation + if len(module.predictors()) > 1: + candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) + + # For each candidate + for c_i, c in enumerate(candidates_): + # Get the candidate instruction and prefix + instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() + + # Set this new module with our instruction / prefix + *_, last_key = self._get_signature(p_new).fields.keys() + updated_signature = self._get_signature(p_new) \ + .with_instructions(instruction) \ + .with_updated_fields(last_key, prefix=prefix) + self._set_signature(p_new, updated_signature) + + # Score the instruction / prefix + if self.verbose: print("----------------") + for i,predictor in enumerate(module_clone.predictors()): + if self.verbose: print(f"Predictor {i}") + self._print_signature(predictor) + if self.verbose: print(f"At Depth {d}/{self.depth}, Evaluating Prompt Candidate #{c_i}/{len(candidates_)} for Predictor {p_i} of {len(module.predictors())}.") + score = evaluate(module_clone, devset=devset, **eval_kwargs) + if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") + total_calls += 1 + if self.verbose: print("----------------") + + replace_entry = True + if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") + # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") + if ((instruction, prefix) in evaluated_candidates[id(p_old)]): + # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") + if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: + replace_entry = False + + if replace_entry: + # Add it to our evaluated candidates list + evaluated_candidates[id(p_old)][(instruction, prefix)] = { + "score": score, + "program": module_clone.deepcopy(), + "instruction": instruction, + "prefix": prefix, + "depth": d, + } + + if (len(candidates_)-self.breadth <= c_i): + latest_scores.append(score) + + if self.track_stats: + results_latest[id(p_old)]["depth"].append(d) + results_latest[id(p_old)]["max"].append(max(latest_scores)) + results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) + results_latest[id(p_old)]["min"].append(min(latest_scores)) + results_latest[id(p_old)]["std"].append(np.std(latest_scores)) + + # Now that we've evaluated the candidates, set this predictor to the best performing version + # to ensure the next round of scores reflect the best possible version + best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) + *_, last_key = self._get_signature(p_old).fields.keys() + updated_signature = self._get_signature(p_new) \ + .with_instructions(best_candidate["instruction"]) \ + .with_updated_fields(last_key, prefix=best_candidate["prefix"]) + self._set_signature(p_new, updated_signature) + if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") + if self.verbose: print("Full predictor with update: ") + for i,predictor in enumerate(module_clone.predictors()): + if self.verbose: print(f"Predictor {i}") + self._print_signature(predictor) + + if d == self.depth-1: + break + + + new_candidates = {} + for p_base in module.predictors(): + # Build Few-Shot Example of Optimized Prompts + attempts = [] + shortest_len = self.breadth + shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) + best_predictors = list(evaluated_candidates[id(p_base)].values()) + + # best_predictors = evaluated_candidates[id(p_base)].values()[:] + best_predictors.sort(key=lambda x: x['score'], reverse=True) + + if self.track_stats: + scores = [x['score'] for x in best_predictors][:10] + results_best[id(p_base)]["depth"].append(d) + results_best[id(p_base)]["max"].append(max(scores)) + results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) + results_best[id(p_base)]["min"].append(min(scores)) + results_best[id(p_base)]["std"].append(np.std(scores)) + + for i in range(shortest_len-1,-1,-1): + # breakpoint() + attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') + attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') + attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') + + # Generate next batch of potential prompts to optimize, with previous attempts as input + if self.prompt_model: + with dspy.settings.context(lm=self.prompt_model): + instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + else: + instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + # Get candidates for each predictor + new_candidates[id(p_base)] = instr.completions + all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) + all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) + + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + latest_candidates = new_candidates + + candidates = [] + for predictor in module.predictors(): + candidates.extend(list(evaluated_candidates[id(predictor)].values())) + + if self.track_stats: + best_predictors = list(evaluated_candidates[id(predictor)].values()) + best_predictors.sort(key=lambda x: x['score'], reverse=True) + + scores = [x['score'] for x in best_predictors][:10] + results_best[id(predictor)]["depth"].append(d) + results_best[id(predictor)]["max"].append(max(scores)) + results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) + results_best[id(predictor)]["min"].append(min(scores)) + results_best[id(predictor)]["std"].append(np.std(scores)) + + # if verbose: print(f"candidates: {candidates}") + candidates.sort(key=lambda x: x['score'], reverse=True) + + candidates = self._drop_duplicates(candidates) + + best_program = candidates[0]["program"] + best_program.candidate_programs = candidates + best_program.total_calls = total_calls + if self.track_stats: + best_program.results_best = results_best + best_program.results_latest = results_latest + + return best_program \ No newline at end of file diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py new file mode 100644 index 0000000000..5dfd691cab --- /dev/null +++ b/dspy/teleprompt/mipro_optimizer.py @@ -0,0 +1,499 @@ +import math +import random +from collections import defaultdict +import textwrap + +import optuna + +import dsp +import dspy +from dspy.evaluate.evaluate import Evaluate +from dspy.signatures import Signature +from dspy.signatures.signature import signature_to_template +from dspy.teleprompt import BootstrapFewShot +from dspy.teleprompt.teleprompt import Teleprompter +import sys +import warnings + + +""" +USAGE SUGGESTIONS: + +The following code can be used to compile a optimized signature teleprompter using the MIPROOptimizer, and evaluate it on an end task: + +from dspy.teleprompt import MIPROOptimizer + +teleprompter = MIPROOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, n=10, init_temperature=1.0) +kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) +compiled_prompt_opt = teleprompter.compile(program, devset=devset[:DEV_NUM], trials_num=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) +eval_score = evaluate(compiled_prompt_opt, devset=evalset[:EVAL_NUM], **kwargs) + +Note that this teleprompter takes in the following parameters: + +* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). +* task_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). +* metric: The task metric used for optimization. +* n: The number of new prompts and sets of fewshot examples to generate and evaluate. Default=10. +* init_temperature: The temperature used to generate new prompts. Higher roughly equals more creative. Default=1.0. +* verbose: Tells the method whether or not to print intermediate steps. +* track_stats: Tells the method whether or not to track statistics about the optimization process. + If True, the method will track a dictionary with a key corresponding to the trial number, + and a value containing a dict with the following keys: + * program: the program being evaluated at a given trial + * score: the last average evaluated score for the program + * pruned: whether or not this program was pruned + This information will be returned as attributes of the best program. +""" + +class BasicGenerateInstruction(Signature): + """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + +class BasicGenerateInstructionWithDataObservations(Signature): + """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. I will also give you some ``observations`` I have made about the dataset and task. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + observations = dspy.InputField(desc="Observations about the dataset and task") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + +class BasicGenerateInstructionWithExamples(dspy.Signature): + ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. + +Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""") + # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + +class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): + ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will give you some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. I will also provide you with the current ``basic instruction`` that is being used for this task. + +Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""") + observations = dspy.InputField(desc="Observations about the dataset and task") + examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + +class ObservationSummarizer(dspy.Signature): + ("""Given a series of observations I have made about my dataset, please summarize them into a brief 2-3 sentence summary which highlights only the most important details.""") + observations = dspy.InputField(desc="Observations I have made about my dataset") + summary = dspy.OutputField(desc="Two to Three sentence summary of only the most significant highlights of my observations") + +class DatasetDescriptor(dspy.Signature): + ("""Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ + """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ + """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""") + + examples = dspy.InputField(desc="Sample data points from the dataset") + observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed") + +class DatasetDescriptorWithPriorObservations(dspy.Signature): + ("""Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ + """I will also provide you with a few observations I have already made. Please add your own observations or if you feel the observations are comprehensive say 'COMPLETE' """ + """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ + """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""") + + examples = dspy.InputField(desc="Sample data points from the dataset") + prior_observations = dspy.InputField(desc="Some prior observations I made about the data") + observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add") + +class MIPRO(Teleprompter): + def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): + self.n = n + self.metric = metric + self.init_temperature = init_temperature + self.prompt_model = prompt_model if prompt_model is not None else dspy.settings.lm + self.task_model = task_model if task_model is not None else dspy.settings.lm + self.verbose = verbose + self.track_stats = track_stats + self.teacher_settings = teacher_settings + self.view_data_batch_size = view_data_batch_size + + def _print_full_program(self, program): + for i,predictor in enumerate(program.predictors()): + if self.verbose: print(f"Predictor {i}") + # if self.verbose: print(f"i: {self._get_signature(predictor).instructions}") + # if self.verbose: print(f"p: {self._get_signature(predictor).fields[-1].name}") + if self.verbose: print(f"i: {self._get_signature(predictor).instructions}") + *_, last_field = self._get_signature(predictor).fields.values() + if self.verbose: print(f"p: {last_field.json_schema_extra['prefix']}") + if self.verbose: print("\n") + + def _print_model_history(self, model, n=1): + if self.verbose: print(f"Model ({model}) History:") + model.inspect_history(n=n) + + def _observe_data(self, trainset, max_iterations=10): + upper_lim = min(len(trainset), self.view_data_batch_size) + observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)(examples=(trainset[0:upper_lim].__repr__())) + observations = observation["observations"] + + skips = 0 + iterations = 0 + for b in range(self.view_data_batch_size, len(trainset), self.view_data_batch_size): + upper_lim = min(len(trainset), b+self.view_data_batch_size) + output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)(prior_observations=observations, examples=(trainset[b:upper_lim].__repr__())) + iterations += 1 + if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE": + skips += 1 + if skips >= 5: + break + continue + if iterations >= max_iterations: + break + observations += output["observations"] + + summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations) + + return summary.summary + + def _create_example_string(self, fields, example): + + # Building the output string + output = [] + for field in fields: + name = field.name + separator = field.separator + input_variable = field.input_variable + + # Determine the value from input_data or prediction_data + value = example.get(input_variable) + + # Construct the string for the current field + field_str = f"{name}{separator}{value}" + output.append(field_str) + + # Joining all the field strings + return '\n'.join(output) + + def _get_signature(self, predictor): + if (hasattr(predictor, 'extended_signature')): + return predictor.extended_signature + elif (hasattr(predictor, 'signature')): + return predictor.signature + + def _set_signature(self, predictor, updated_signature): + if (hasattr(predictor, 'extended_signature')): + predictor.extended_signature = updated_signature + elif (hasattr(predictor, 'signature')): + predictor.signature = updated_signature + + def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo_candidates, devset): + candidates = {} + evaluated_candidates = defaultdict(dict) + + if view_data: + # Create data observations + self.observations = None + with dspy.settings.context(lm=self.prompt_model): + self.observations = self._observe_data(devset).replace("Observations:","").replace("Summary:","") + + if view_examples: + example_sets = {} + for predictor in module.predictors(): + # Get all augmented examples + example_set = {} + all_sets_of_examples = demo_candidates[id(predictor)] # Get all generated sets of examples + for example_set_i, set_of_examples in enumerate(all_sets_of_examples): + if example_set_i != 0: # Skip the no examples case + for example in set_of_examples: # Get each individual example in the set + if "augmented" in example.keys(): + if example["augmented"]: + if example_set_i not in example_set: + example_set[example_set_i] = [] + fields_to_use = signature_to_template(predictor.signature).fields + input_variable_names = list(self._get_signature(predictor).input_fields.keys()) + example_string = self._create_example_string(fields_to_use, example) + example_set[example_set_i].append(example_string) + example_sets[id(predictor)] = example_set + else: + example_set[example_set_i] = [] + example_sets[id(predictor)] = example_set + + # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts + for predictor in module.predictors(): + basic_instruction = None + basic_prefix = None + basic_instruction = self._get_signature(predictor).instructions + *_, last_field = self._get_signature(predictor).fields.values() + basic_prefix = last_field.json_schema_extra["prefix"] + with dspy.settings.context(lm=self.prompt_model): + # Data & Examples + if view_data and view_examples: + if 1 not in example_sets[id(predictor)].keys(): + raise ValueError("No examples found for the given predictor") + instruct = None + for i in range(1, self.n): + new_instruct = dspy.Predict( + BasicGenerateInstructionWithExamplesAndDataObservations, + n=1, + temperature=self.init_temperature, + )( + basic_instruction=basic_instruction, + observations=self.observations, + examples=example_sets[id(predictor)][i], + ) + if not instruct: + instruct = new_instruct + else: + instruct.completions.proposed_instruction.extend(new_instruct.completions.proposed_instruction) + instruct.completions.proposed_prefix_for_output_field.extend(new_instruct.completions.proposed_prefix_for_output_field) + # Just data + elif view_data: + instruct = dspy.Predict(BasicGenerateInstructionWithDataObservations, n=N-1, temperature=self.init_temperature)(basic_instruction=basic_instruction, observations=self.observations) + # Just examples + elif view_examples: + instruct = None + for i in range(1,self.n): # Note: skip over the first example set which is empty + new_instruct = dspy.Predict( + BasicGenerateInstructionWithExamples, + n=1, + temperature=self.init_temperature, + )( + basic_instruction=basic_instruction, + examples=example_sets[id(predictor)][i], + ) + if not instruct: + instruct = new_instruct + else: + instruct.completions.proposed_instruction.extend(new_instruct.completions.proposed_instruction) + instruct.completions.proposed_prefix_for_output_field.extend(new_instruct.completions.proposed_prefix_for_output_field) + # Neither + else: + instruct = dspy.Predict(BasicGenerateInstruction, n=N-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + + # Add in our initial prompt as a candidate as well + instruct.completions.proposed_instruction.insert(0, basic_instruction) + instruct.completions.proposed_prefix_for_output_field.insert(0, basic_prefix) + candidates[id(predictor)] = instruct.completions + evaluated_candidates[id(predictor)] = {} + + if self.verbose: self._print_model_history(self.prompt_model) + + return candidates, evaluated_candidates + + def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, trials_num=None, optuna_trials_num=None): + # Define ANSI escape codes for colors + YELLOW = '\033[93m' + BLUE = '\033[94m' + BOLD = '\033[1m' + ENDC = '\033[0m' # Resets the color to default + + # Check if both trials_num and optuna_trials_num are None + if trials_num is None and optuna_trials_num is None: + raise ValueError(f"{YELLOW}{BOLD}You must specify the number of trials using the 'trials_num' parameter.{ENDC}") + + # Check if the deprecated parameter is used + if optuna_trials_num is not None: + print("in it!") + # Issue a deprecation warning + warnings.warn( + "`trials_num` is deprecated and will be removed in a future version. " + "Use `trials_num` instead.", + DeprecationWarning + ) + # Use trials_num as a fallback if trials_num is not provided + if trials_num is None: + trials_num = optuna_trials_num + + random.seed(seed) + + estimated_task_model_calls_wo_module_calls = len(devset) * trials_num # M * T * P + estimated_prompt_model_calls = 10 + self.n * len(student.predictors()) # num data summary calls + N * P + + + + user_message = textwrap.dedent(f"""\ + {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} + + Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: + + {YELLOW}- Task Model: {BLUE}{BOLD}{len(devset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{trials_num}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} + {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.n}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} + + {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} + + {YELLOW}Total Cost = (Number of calls to task model * (Avg Input Token Length per Call * Task Model Price per Input Token + Avg Output Token Length per Call * Task Model Price per Output Token) + + (Number of calls to prompt model * (Avg Input Token Length per Call * Task Prompt Price per Input Token + Avg Output Token Length per Call * Prompt Model Price per Output Token).{ENDC} + + For a preliminary estimate of potential costs, we recommend you perform your own calculations based on the task + and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: + + {YELLOW}- Reducing the number of trials (`trials_num`), the size of the trainset, or the number of LM calls in your program.{ENDC} + {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC} + + To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. + + If you would like to bypass this confirmation step in future executions, set the {YELLOW}`requires_permission_to_run`{ENDC} flag to {YELLOW}`False`.{ENDC} + + {YELLOW}Awaiting your input...{ENDC} + """) + + print(user_message) + + sys.stdout.flush() # Flush the output buffer to force the message to print + + + if requires_permission_to_run: + user_input = input("Do you wish to continue? (y/n): ").strip().lower() + if user_input != 'y': + print("Compilation aborted by the user.") + else: + # Set up program and evaluation function + module = student.deepcopy() + evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) + + # In the case where the bootstrapped and labeled demos are set to 0, we'll stil bootstrap examples to use in our meta prompt + if max_bootstrapped_demos==0 and max_labeled_demos==0: #TODO: address case when max_bootstrapped alone is 0 + max_bootstrapped_demos_for_candidate_gen = 1 + max_labeled_demos_for_candidate_gen = 1 #TODO: this might only need to be 0 + else: + max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos + max_labeled_demos_for_candidate_gen = max_labeled_demos + + # Generate N few shot example sets + demo_candidates = {} + for i in range(self.n): + if i == 0: # Story empty set of demos as default for index 0 + for module_p in module.predictors(): + if id(module_p) not in demo_candidates.keys(): + demo_candidates[id(module_p)] = [] + demo_candidates[id(module_p)].append([]) + else: + if self.verbose: print(f"Creating basic bootstrap: {i}/{self.n-1}") + + # Create a new basic bootstrap few - shot program . + rng = random.Random(i) + shuffled_devset = devset[:] # Create a copy of devset + rng.shuffle(shuffled_devset) # Shuffle the copy + tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings) + candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_devset) + + # Store the candidate demos + for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): + if id(module_p) not in demo_candidates.keys(): + demo_candidates[id(module_p)] = [] + demo_candidates[id(module_p)].append(candidate_p.demos) + + # Generate N candidate prompts + instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, devset) + + # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples + if max_bootstrapped_demos==0 and max_labeled_demos==0: + demo_candidates = None + + # Initialize variables to store the best program and its score + best_score = float('-inf') + best_program = None + trial_num = 0 + + trial_logs = {} + + # Define our trial objective + def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, devset): + def objective(trial): + nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables + candidate_program = baseline_program.deepcopy() + + # Suggest the instruction to use for our predictor + print(f"Starting trial #{trial_num}") + trial_logs[trial_num] = {} + + for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): + + # Get instruction candidates for our given predictor + p_instruction_candidates = instruction_candidates[id(p_old)] + if demo_candidates: p_demo_candidates = demo_candidates[id(p_old)] + + # Suggest the index of the instruction candidate to use in our trial + instruction_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_instruction",range(len(p_instruction_candidates))) + if demo_candidates: demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) + trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx + if demo_candidates: trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx + + # Get the selected instruction candidate + selected_candidate = p_instruction_candidates[instruction_idx] + selected_instruction = selected_candidate.proposed_instruction.strip('"').strip() + selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() + + # Use this candidates in our program + *_, last_field = self._get_signature(p_new).fields.keys() + updated_signature = self._get_signature(p_new).with_instructions(selected_instruction).with_updated_fields(last_field, prefix=selected_prefix) + self._set_signature(p_new, updated_signature) + + # Get the selected demos + if demo_candidates: selected_demos = p_demo_candidates[demos_idx] + + # Use these demos in our program + if demo_candidates: p_new.demos = selected_demos + + # breakpoint() + + if self.verbose: print("Evaling the following program:") + # breakpoint() + if self.verbose: self._print_full_program(candidate_program) + trial_logs[trial_num]["program"] = candidate_program + + # Evaluate with the new prompts + total_score = 0 + batch_size = 100 + num_batches = math.ceil(len(devset) / batch_size) + + for i in range(num_batches): + start_index = i * batch_size + end_index = min((i + 1) * batch_size, len(devset)) + split_dev = devset[start_index:end_index] + split_score = evaluate(candidate_program, devset=split_dev, display_table=0) + if self.verbose: print(f"{i}st split score: {split_score}") + + total_score += split_score * len(split_dev) + curr_weighted_avg_score = total_score / min((i+1)*100,len(devset)) + if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") + + trial.report(curr_weighted_avg_score, i) + + # Handle pruning based on the intermediate value. + if trial.should_prune(): + print(f"Trial pruned.") + trial_logs[trial_num]["score"] = curr_weighted_avg_score + trial_logs[trial_num]["pruned"] = True + trial_num += 1 + raise optuna.TrialPruned() + + if self.verbose: print(f"Fully evaled score: {curr_weighted_avg_score}") + if self.verbose: self._print_model_history(self.task_model, n=1) + # breakpoint() + score = curr_weighted_avg_score + + trial_logs[trial_num]["score"] = curr_weighted_avg_score + trial_logs[trial_num]["pruned"] = False + + # Update the best program if the current score is better + if score > best_score: + best_score = score + best_program = candidate_program.deepcopy() + + trial_num += 1 + + return score + + return objective + + # Run the trial + objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, devset) + sampler = optuna.samplers.TPESampler(seed=seed) + study = optuna.create_study(direction="maximize", sampler=sampler) + score = study.optimize(objective_function, n_trials=trials_num) + + if best_program is not None and self.track_stats: + best_program.trial_logs = trial_logs + + print(f"Returning {best_program} from continue_program") + return best_program \ No newline at end of file diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index e760ebf45b..04da45a9c9 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -1,12 +1,10 @@ -from collections import defaultdict - -import dsp -import dspy -from dspy.evaluate.evaluate import Evaluate -from dspy.signatures import Signature -from dspy.teleprompt.teleprompt import Teleprompter - +from .copro_optimizer import COPRO """ +=============================================================== +DEPRECATED!!! +PLEASE USE COPRO INSTEAD. +=============================================================== + USAGE SUGGESTIONS: The following code can be used to compile a optimized signature teleprompter, and evaluate it on an end task: @@ -31,284 +29,7 @@ * total_calls: The total number of calls to the task metric. These statistics will be returned as attributes of the best program. """ -class BasicGenerateInstruction(Signature): - """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" - - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") - -class GenerateInstructionGivenAttempts(dspy.Signature): - """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. -Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" - - attempted_instructions = dspy.InputField(format=dsp.passages2text) - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") - -class SignatureOptimizer(Teleprompter): +class SignatureOptimizer(COPRO): def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): - if breadth <= 1: - raise ValueError("Breadth must be greater than 1") - self.metric = metric - self.breadth = breadth - self.depth = depth - self.init_temperature = init_temperature - self.prompt_model = prompt_model - self.verbose = verbose - self.track_stats = track_stats - - def _check_candidates_equal(self, candidate1, candidate2): - for p1, p2 in zip(candidate1["program"].predictors(), candidate2["program"].predictors()): - if p1.extended_signature.instructions != p2.extended_signature.instructions: - return False - *_, p1_last_field = p1.extended_signature.fields.values() - *_, p2_last_field = p2.extended_signature.fields.values() - if p1_last_field != p2_last_field: - return False - return True - - def _drop_duplicates(self, candidates): - final_candidates = [] - last_batch = [] - last_batch_score = -1 - for c in candidates: - repeat = False - if c['score'] == last_batch_score: - for c2 in last_batch: - if (self._check_candidates_equal(c, c2)): - repeat = True - break - if not repeat: - last_batch.append(c) - else: - last_batch = [c] - last_batch_score = c['score'] - if not repeat: - final_candidates.append(c) - return final_candidates - - def _print_signature(self, predictor): - if self.verbose: - if (hasattr(predictor, 'extended_signature')): - signature = predictor.extended_signature - else: - signature = predictor.extended_signature1 - print(f"i: {signature.instructions}") - print(f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}") - print() - - - def compile(self, student, *, devset, eval_kwargs): - """student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" - module = student.deepcopy() - evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) - total_calls = 0 - results_best = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} - results_latest = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} - - if self.track_stats: - import numpy as np - - - candidates = {} - evaluated_candidates = defaultdict(dict) - - # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts - for predictor in module.predictors(): - basic_instruction = None - basic_prefix = None - *_, last_key = predictor.extended_signature.fields.keys() - if (hasattr(predictor, 'extended_signature')): - basic_instruction = predictor.extended_signature.instructions - basic_prefix = predictor.extended_signature.fields[last_key].json_schema_extra['prefix'] - else: - basic_instruction = predictor.extended_signature1.instructions - basic_prefix = predictor.extended_signature1.fields[last_key].json_schema_extra['prefix'] - if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - else: - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - # Add in our initial prompt as a candidate as well - instruct.completions.proposed_instruction.append(basic_instruction) - instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) - candidates[id(predictor)] = instruct.completions - evaluated_candidates[id(predictor)] = {} - - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - - latest_candidates = candidates - all_candidates = candidates - - module_clone = module.deepcopy() - - # For each iteration in depth... - for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors - if self.verbose: print(f"Starting iteration {d}/{self.depth}.") - - latest_scores = [] - - # Go through our module's predictors - for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): - candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation - if len(module.predictors()) > 1: - candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) - - # For each candidate - for c_i, c in enumerate(candidates_): - # Get the candidate instruction and prefix - instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() - - # Set this new module with our instruction / prefix - if (hasattr(p_new, 'extended_signature')): - *_, last_key = p_new.extended_signature.fields.keys() - p_new.extended_signature = p_new.extended_signature \ - .with_instructions(instruction) \ - .with_updated_fields(last_key, prefix=prefix) - else: - *_, last_key = p_new.extended_signature1.fields.keys() - p_new.extended_signature1 = p_new.extended_signature1 \ - .with_instructions(instruction) \ - .with_updated_fields(last_key, prefix=prefix) - *_, last_key = p_new.extended_signature2.fields.keys() - p_new.extended_signature2 = p_new.extended_signature2 \ - .with_instructions(instruction) \ - .with_updated_fields(last_key, prefix=prefix) - - # Score the instruction / prefix - if self.verbose: print("----------------") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i}") - self._print_signature(predictor) - if self.verbose: print(f"At Depth {d}/{self.depth}, Evaluating Prompt Candidate #{c_i}/{len(candidates_)} for Predictor {p_i} of {len(module.predictors())}.") - score = evaluate(module_clone, devset=devset, **eval_kwargs) - if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") - total_calls += 1 - if self.verbose: print("----------------") - - replace_entry = True - if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") - # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") - if ((instruction, prefix) in evaluated_candidates[id(p_old)]): - # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") - if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: - replace_entry = False - - if replace_entry: - # Add it to our evaluated candidates list - evaluated_candidates[id(p_old)][(instruction, prefix)] = { - "score": score, - "program": module_clone.deepcopy(), - "instruction": instruction, - "prefix": prefix, - "depth": d, - } - - if (len(candidates_)-self.breadth <= c_i): - latest_scores.append(score) - - if self.track_stats: - results_latest[id(p_old)]["depth"].append(d) - results_latest[id(p_old)]["max"].append(max(latest_scores)) - results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) - results_latest[id(p_old)]["min"].append(min(latest_scores)) - results_latest[id(p_old)]["std"].append(np.std(latest_scores)) - - # Now that we've evaluated the candidates, set this predictor to the best performing version - # to ensure the next round of scores reflect the best possible version - best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) - if (hasattr(p_new, 'extended_signature')): - *_, last_key = p_old.extended_signature.fields.keys() - p_new.extended_signature = p_new.extended_signature \ - .with_instructions(best_candidate["instruction"]) \ - .with_updated_fields(last_key, prefix=best_candidate["prefix"]) - else: - *_, last_key1 = p_old.extended_signature1.fields.keys() - p_new.extended_signature1 = p_new.extended_signature \ - .with_instructions(best_candidate["instruction"]) \ - .with_updated_fields(last_key1, prefix=best_candidate["prefix"]) - *_, last_key2 = p_old.extended_signature2.fields.keys() - p_new.extended_signature2 = p_new.extended_signature \ - .with_instructions(best_candidate["instruction"]) \ - .with_updated_fields(last_key2, prefix=best_candidate["prefix"]) - if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") - if self.verbose: print("Full predictor with update: ") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i}") - self._print_signature(predictor) - - if d == self.depth-1: - break - - - new_candidates = {} - for p_base in module.predictors(): - # Build Few-Shot Example of Optimized Prompts - attempts = [] - shortest_len = self.breadth - shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) - best_predictors = list(evaluated_candidates[id(p_base)].values()) - - # best_predictors = evaluated_candidates[id(p_base)].values()[:] - best_predictors.sort(key=lambda x: x['score'], reverse=True) - - if self.track_stats: - scores = [x['score'] for x in best_predictors][:10] - results_best[id(p_base)]["depth"].append(d) - results_best[id(p_base)]["max"].append(max(scores)) - results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) - results_best[id(p_base)]["min"].append(min(scores)) - results_best[id(p_base)]["std"].append(np.std(scores)) - - for i in range(shortest_len-1,-1,-1): - # breakpoint() - attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') - attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') - attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') - - # Generate next batch of potential prompts to optimize, with previous attempts as input - if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) - else: - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) - - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - # Get candidates for each predictor - new_candidates[id(p_base)] = instr.completions - all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) - all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) - - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - latest_candidates = new_candidates - - candidates = [] - for predictor in module.predictors(): - candidates.extend(list(evaluated_candidates[id(predictor)].values())) - - if self.track_stats: - best_predictors = list(evaluated_candidates[id(predictor)].values()) - best_predictors.sort(key=lambda x: x['score'], reverse=True) - - scores = [x['score'] for x in best_predictors][:10] - results_best[id(predictor)]["depth"].append(d) - results_best[id(predictor)]["max"].append(max(scores)) - results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) - results_best[id(predictor)]["min"].append(min(scores)) - results_best[id(predictor)]["std"].append(np.std(scores)) - - # if verbose: print(f"candidates: {candidates}") - candidates.sort(key=lambda x: x['score'], reverse=True) - - candidates = self._drop_duplicates(candidates) - - best_program = candidates[0]["program"] - best_program.candidate_programs = candidates - best_program.total_calls = total_calls - if self.track_stats: - best_program.results_best = results_best - best_program.results_latest = results_latest - - return best_program \ No newline at end of file + super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) \ No newline at end of file diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index e316462c94..a0cdd84588 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -1,18 +1,11 @@ -import math -import random -from collections import defaultdict - -import optuna - -import dsp -import dspy -from dspy.evaluate.evaluate import Evaluate -from dspy.signatures import Signature -from dspy.signatures.signature import signature_to_template -from dspy.teleprompt import BootstrapFewShot -from dspy.teleprompt.teleprompt import Teleprompter +from dspy.teleprompt.mipro_optimizer import MIPRO """ +=============================================================== +DEPRECATED!!! +PLEASE USE MIPRO INSTEAD. +=============================================================== + USAGE SUGGESTIONS: The following code can be used to compile a optimized signature teleprompter using the BayesianSignatureOptimizer, and evaluate it on an end task: @@ -40,373 +33,9 @@ * pruned: whether or not this program was pruned This information will be returned as attributes of the best program. """ -class BasicGenerateInstruction(Signature): - """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" - - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") - -class BasicGenerateInstructionWithDataObservations(Signature): - """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. I will also give you some ``observations`` I have made about the dataset and task. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" - - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - observations = dspy.InputField(desc="Observations about the dataset and task") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") - -class BasicGenerateInstructionWithExamples(dspy.Signature): - ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. - -Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""") - # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") - -class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): - ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. - -Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - observations = dspy.InputField(desc="Observations about the dataset and task") - examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") - -class ObservationSummarizer(dspy.Signature): - ("""Given a series of observations I have made about my dataset, please summarize them into a brief 2-3 sentence summary which highlights only the most important details.""") - observations = dspy.InputField(desc="Observations I have made about my dataset") - summary = dspy.OutputField(desc="Two to Three sentence summary of only the most significant highlights of my observations") - -class DatasetDescriptor(dspy.Signature): - ("""Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ - """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ - """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""") - - examples = dspy.InputField(desc="Sample data points from the dataset") - observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed") - -class DatasetDescriptorWithPriorObservations(dspy.Signature): - ("""Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ - """I will also provide you with a few observations I have already made. Please add your own observations or if you feel the observations are comprehensive say 'COMPLETE' """ - """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ - """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""") - - examples = dspy.InputField(desc="Sample data points from the dataset") - prior_observations = dspy.InputField(desc="Some prior observations I made about the data") - observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add") - -class BayesianSignatureOptimizer(Teleprompter): - def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=False, view_data_batch_size=10): - self.n = n - self.metric = metric - self.init_temperature = init_temperature - self.prompt_model = prompt_model if prompt_model is not None else dspy.settings.lm - self.task_model = task_model if task_model is not None else dspy.settings.lm - self.verbose = verbose - self.track_stats = track_stats - self.teacher_settings = teacher_settings - self.view_data_batch_size = view_data_batch_size - - def _print_full_program(self, program): - for i,predictor in enumerate(program.predictors()): - if self.verbose: print(f"Predictor {i}") - if (hasattr(predictor, 'extended_signature')): - if self.verbose: print(f"i: {predictor.extended_signature.instructions}") - *_, last_field = predictor.extended_signature.fields.values() - if self.verbose: print(f"p: {last_field.json_schema_extra['prefix']}") - else: - if self.verbose: print(f"i: {predictor.extended_signature1.instructions}") - *_, last_field = predictor.extended_signature1.fields.values() - if self.verbose: print(f"p: {last_field.json_schema_extra['prefix']}") - if self.verbose: print("\n") - - def _print_model_history(self, model, n=1): - if self.verbose: print(f"Model ({model}) History:") - model.inspect_history(n=n) - - def _observe_data(self, trainset): - upper_lim = min(len(trainset), self.view_data_batch_size) - observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)(examples=(trainset[0:upper_lim].__repr__())) - observations = observation["observations"] - - skips = 0 - for b in range(self.view_data_batch_size, len(trainset), self.view_data_batch_size): - upper_lim = min(len(trainset), b+self.view_data_batch_size) - output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)(prior_observations=observations, examples=(trainset[b:upper_lim].__repr__())) - if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE": - skips += 1 - if skips >= 5: - break - continue - observations += output["observations"] - - summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations) - - return summary.summary - - def _create_example_string(self, fields, example): - - # Building the output string - output = [] - for field in fields: - name = field.name - separator = field.separator - input_variable = field.input_variable - - # Determine the value from input_data or prediction_data - value = example.get(input_variable) - - # Construct the string for the current field - field_str = f"{name}{separator}{value}" - output.append(field_str) - - # Joining all the field strings - return '\n'.join(output) - - def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo_candidates, devset): - candidates = {} - evaluated_candidates = defaultdict(dict) - - if view_data: - # Create data observations - self.observations = None - with dspy.settings.context(lm=self.prompt_model): - self.observations = self._observe_data(devset).replace("Observations:","").replace("Summary:","") - - if view_examples: - example_sets = {} - for predictor in module.predictors(): - # Get all augmented examples - example_set = {} - all_sets_of_examples = demo_candidates[id(predictor)] # Get all generated sets of examples - for example_set_i, set_of_examples in enumerate(all_sets_of_examples): - if example_set_i != 0: # Skip the no examples case - for example in set_of_examples: # Get each individual example in the set - if "augmented" in example.keys(): - if example["augmented"]: - if example_set_i not in example_set: - example_set[example_set_i] = [] - fields_to_use = signature_to_template(predictor.signature).fields - input_variable_names = list(predictor.signature.input_fields.keys()) - example_with_only_signature_fields = {key: value for key, value in example.items() if key in input_variable_names} - example_string = self._create_example_string(fields_to_use, example_with_only_signature_fields) - example_set[example_set_i].append(example_string) - example_sets[id(predictor)] = example_set - else: - example_set[example_set_i] = [] - example_sets[id(predictor)] = example_set - - # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts - for predictor in module.predictors(): - basic_instruction = None - basic_prefix = None - if (hasattr(predictor, 'extended_signature')): - basic_instruction = predictor.extended_signature.instructions - *_, last_field = predictor.extended_signature.fields.values() - basic_prefix = last_field.json_schema_extra["prefix"] - else: - basic_instruction = predictor.extended_signature1.instructions - *_, last_field = predictor.extended_signature1.fields.values() - basic_prefix = last_field.json_schema_extra["prefix"] - with dspy.settings.context(lm=self.prompt_model): - # Data & Examples - if view_data and view_examples: - if 1 not in example_sets[id(predictor)].keys(): - raise ValueError("No examples found for the given predictor") - instruct = None - for i in range(1, self.n): - new_instruct = dspy.Predict( - BasicGenerateInstructionWithExamplesAndDataObservations, - n=1, - temperature=self.init_temperature, - )( - basic_instruction=basic_instruction, - observations=self.observations, - examples=example_sets[id(predictor)][i], - ) - if not instruct: - instruct = new_instruct - else: - instruct.completions.proposed_instruction.extend(new_instruct.completions.proposed_instruction) - instruct.completions.proposed_prefix_for_output_field.extend(new_instruct.completions.proposed_prefix_for_output_field) - # Just data - elif view_data: - instruct = dspy.Predict(BasicGenerateInstructionWithDataObservations, n=N-1, temperature=self.init_temperature)(basic_instruction=basic_instruction, observations=self.observations) - # Just examples - elif view_examples: - instruct = None - for i in range(1,self.n): # Note: skip over the first example set which is empty - new_instruct = dspy.Predict( - BasicGenerateInstructionWithExamples, - n=1, - temperature=self.init_temperature, - )( - basic_instruction=basic_instruction, - examples=example_sets[id(predictor)][i], - ) - if not instruct: - instruct = new_instruct - else: - instruct.completions.proposed_instruction.extend(new_instruct.completions.proposed_instruction) - instruct.completions.proposed_prefix_for_output_field.extend(new_instruct.completions.proposed_prefix_for_output_field) - # Neither - else: - instruct = dspy.Predict(BasicGenerateInstruction, n=N-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - - # Add in our initial prompt as a candidate as well - instruct.completions.proposed_instruction.insert(0, basic_instruction) - instruct.completions.proposed_prefix_for_output_field.insert(0, basic_prefix) - candidates[id(predictor)] = instruct.completions - evaluated_candidates[id(predictor)] = {} - - if self.verbose: self._print_model_history(self.prompt_model) - - return candidates, evaluated_candidates - - def compile(self, student, *, devset, optuna_trials_num, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True): - - random.seed(seed) - - # Set up program and evaluation function - module = student.deepcopy() - evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) - - # Generate N few shot example sets - demo_candidates = {} - for i in range(self.n): - if i == 0: # Story empty set of demos as default for index 0 - for module_p in module.predictors(): - if id(module_p) not in demo_candidates: - demo_candidates[id(module_p)] = [] - demo_candidates[id(module_p)].append([]) - else: - if self.verbose: print(f"Creating basic bootstrap: {i}/{self.n-1}") - - # Create a new basic bootstrap few - shot program . - rng = random.Random(i) - shuffled_devset = devset[:] # Create a copy of devset - rng.shuffle(shuffled_devset) # Shuffle the copy - tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, teacher_settings=self.teacher_settings) - candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_devset) - - # Store the candidate demos - for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): - if id(module_p) not in demo_candidates: - demo_candidates[id(module_p)] = [] - demo_candidates[id(module_p)].append(candidate_p.demos) - - # Generate N candidate prompts - instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, devset) - - # Initialize variables to store the best program and its score - best_score = float('-inf') - best_program = None - trial_num = 0 - - trial_logs = {} - - # Define our trial objective - def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, devset): - def objective(trial): - nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables - candidate_program = baseline_program.deepcopy() - - # Suggest the instruction to use for our predictor - if self.verbose: print(f"Starting trial num: {trial_num}") - trial_logs[trial_num] = {} - - for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): - - # Get instruction candidates for our given predictor - p_instruction_candidates = instruction_candidates[id(p_old)] - p_demo_candidates = demo_candidates[id(p_old)] - - # Suggest the index of the instruction candidate to use in our trial - #instruction_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_instruction",range(len(p_instruction_candidates))) - #demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) - instruction_idx = trial.suggest_int(f"{id(p_old)}_predictor_instruction",low=0, high=len(p_instruction_candidates)-1) - demos_idx = trial.suggest_int(f"{id(p_old)}_predictor_demos",low=0, high=len(p_demo_candidates)-1) - - trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx - trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx - - # Get the selected instruction candidate - selected_candidate = p_instruction_candidates[instruction_idx] - selected_instruction = selected_candidate.proposed_instruction.strip('"').strip() - selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() - - # Use this candidates in our program - *_, last_field = p_new.extended_signature.fields.keys() - p_new.extended_signature = p_new.extended_signature \ - .with_instructions(selected_instruction) \ - .with_updated_fields(last_field, prefix=selected_prefix) - - # Get the selected demos - selected_demos = p_demo_candidates[demos_idx] - - # Use these demos in our program - p_new.demos = selected_demos - - if self.verbose: print("Evaling the following program:") - self._print_full_program(candidate_program) - trial_logs[trial_num]["program"] = candidate_program - - # Evaluate with the new prompts - total_score = 0 - batch_size = 100 - num_batches = math.ceil(len(devset) / batch_size) - - for i in range(num_batches): - start_index = i * batch_size - end_index = min((i + 1) * batch_size, len(devset)) - split_dev = devset[start_index:end_index] - split_score = evaluate(candidate_program, devset=split_dev, display_table=0) - if self.verbose: print(f"{i}st split score: {split_score}") - - total_score += split_score * len(split_dev) - curr_weighted_avg_score = total_score / min((i+1)*100,len(devset)) - if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") - - trial.report(curr_weighted_avg_score, i) - - # Handle pruning based on the intermediate value. - if trial.should_prune(): - if self.verbose: print("Optuna decided to prune!") - trial_logs[trial_num]["score"] = curr_weighted_avg_score - trial_logs[trial_num]["pruned"] = True - trial_num += 1 - raise optuna.TrialPruned() - - if self.verbose: - print(f"Fully evaled score: {curr_weighted_avg_score}") - self._print_model_history(self.task_model, n=1) - score = curr_weighted_avg_score - - trial_logs[trial_num]["score"] = curr_weighted_avg_score - trial_logs[trial_num]["pruned"] = False - - # Update the best program if the current score is better - if score > best_score: - best_score = score - best_program = candidate_program.deepcopy() - - trial_num += 1 - - return score - - return objective - - # Run the trial - objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, devset) - sampler = optuna.samplers.TPESampler(seed=seed) - study = optuna.create_study(direction="maximize", sampler=sampler) - score = study.optimize(objective_function, n_trials=optuna_trials_num) - if best_program is not None and self.track_stats: - best_program.trial_logs = trial_logs +class BayesianSignatureOptimizer(MIPRO): + def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): + print(u"\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") - return best_program + super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) \ No newline at end of file diff --git a/examples/qa/hotpot/hotpotqa_optimized.ipynb b/examples/qa/hotpot/hotpotqa_optimized.ipynb new file mode 100644 index 0000000000..c87d474979 --- /dev/null +++ b/examples/qa/hotpot/hotpotqa_optimized.ipynb @@ -0,0 +1,566 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"DSPy7\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FAQ 🙋\n", + "#### 1) How does MIPRO work?\n", + "At a high level, the MIPRO program optimizer works by first proposing candidate fewshot example sets and instructions for each prompt in your program, and then optimizing over these fewshot example sets and instructions as hyperparameters for a specified number of trials. Each trial, the optimizer evaluates different combinations of prompts on a train set, which allows it to learn which combinations yield the best performance.\n", + "\n", + "#### 2) How much will MIPRO cost me to run?\n", + "Note that __this notebook__ is free to run, because all LM calls have been cached. However, when using an optimizer on your own program, here is a breakdown of the upper bound of the number of calls to the task model and prompt model respectively:\n", + "\n", + "- **Task model calls**: MIPRO makes up to __O(TxPxM)__ task model calls, where T is the number of trials, P is the number of prompts in the program, and M is the size of the train set. This is because the model is evaluating the program on the train set each trial. In practice, this should be lower given that MIPRO tunes poor trials early (ie. it may stop a trial after running on the first 100 or so examples if performance is poor).\n", + "\n", + "- **Prompt model calls**: MIPRO makes up to N*P+10 prompt model calls, where N is the number of instruction / fewshot example set candidates to generate for each prompt, and P is the number of prompts in the program. The extra 10 calls comes from generating a summary of the data in the training set, which we use in the meta prompt to create better instructions.\n", + "\n", + "#### 3) How should I configure the hyperparameters?\n", + "We have yet to run full hyperparameter sweeps with MIPRO, but based off of initial experimintation, we'd recommend the following:\n", + "- __Trial num__: Gains can be seen after about 20-30 trials. However, 100-200 trials can help with adding on additional marginal gains.\n", + "- __n__: This hyperparameter controls the number of candidate prompts and fewshot example sets that are generated to optimize over. With more trials and less prompts to optimize, we can set n to be higher, as we have more trials to explore different combinations of prompts. If your program has between 2-3 modules and is the `num_trials=30`, we'd recommend ~`n=10`. If n is higher (say `n=100`), then we can go higher to ~`n=15`. If you have a program with only 1 module and are keeping the program 0-shot (ie. no fewshot examples), then `num_trials` should be set to equal `n`, because each trial can explore a new instruction.\n", + "- __Training set size__: Between 200 and 500 training examples are recommended. Increasing the training set size can help prevent overfitting, but adds to the expense to run.\n", + "\n", + "#### 4) What should I do if I want to reduce the cost?\n", + "You can always update hyperparameters accordingly, such as using a smaller train set, using less trials, or using a program with less modules.\n", + "Alternatively, one strategy would be to optimize using a cheaper task model (ie. locally hosted Llama-2), as initial experiments have shown that prompts optimized for a smaller model also transfer to working well on a larger model.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 0] Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from huggingface_hub import hf_hub_download\n", + "import zipfile\n", + "import os\n", + "\n", + "# Define the repository ID on Hugging Face\n", + "repo_id = 'kopsahlong/test3'\n", + "cache_file_path = hf_hub_download(repo_id=repo_id, filename='notebook_cache_v3.zip')\n", + "compiled_program_file_path = hf_hub_download(repo_id=repo_id, filename='compiled_program.pickle')\n", + "# Unzipping the file\n", + "with zipfile.ZipFile(cache_file_path, 'r') as zip_ref:\n", + " zip_ref.extractall(\".\")\n", + "\n", + "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = \"notebook_cache\"\n", + "\n", + "\n", + "# Set up the cache for this notebook\n", + "# os.environ[\"DSP_CACHEDIR\"] = \"/lfs/0/kristaoo/dspy/examples/qa/hotpot/caches/cache_train_500_eval_500_n_10_trials_30_hops_4\" # repo_clone_path\n", + "# os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = \"/lfs/0/kristaoo/dspy/examples/qa/hotpot/caches/MIPRO_notebook_cache_v2\" # repo_clone_path\n", + "# os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = \"/lfs/0/kristaoo/dspy/notebook_cache\" # repo_clone_path\n", + "# os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = \"/lfs/0/kristaoo/dspy/examples/qa/hotpot/DSPy_notebook_cache/cache_copy\" " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pydantic version: 2.6.3\n" + ] + } + ], + "source": [ + "import pkg_resources\n", + "\n", + "# Get the version of cloudpickle\n", + "pydantic_v = pkg_resources.get_distribution(\"pydantic\").version\n", + "\n", + "print(f\"Pydantic version: {pydantic_v}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: add in DSPy setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also specify the __prompt LM model__ (in this case GPT 3.5), the __task LM model__ (Llama 13B) and the retrieval model we'll be using for our task (a HotPotQA multihop retrieval task)." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "import os \n", + "import dspy\n", + "import openai\n", + "import os\n", + "\n", + "### NOTE: if you'd like to run this code without a cache, you can remove these lines to configure your OPEN AI key ###\n", + "# os.environ['OPENAI_API_KEY'] = \"TODO: ADD YOUR OPEN AI KEY HERE\"\n", + "# openai.api_key = os.environ.get('OPENAI_API_KEY')\n", + "# openai.api_base = \"https://api.openai.com/v1\"\n", + "\n", + "prompt_model_name = \"gpt-3.5-turbo-1106\"\n", + "task_model_name = \"meta-llama/Llama-2-13b-chat-hf\"\n", + "colbert_v2_endpoint = \"http://20.102.90.50:2017/wiki17_abstracts\"\n", + "\n", + "prompt_model = dspy.OpenAI(model=prompt_model_name, max_tokens=150)\n", + "task_model = dspy.HFClientTGI(model=task_model_name, port=[7140, 7141, 7142, 7143], max_tokens=150)\n", + "\n", + "colbertv2 = dspy.ColBERTv2(url=colbert_v2_endpoint)\n", + "\n", + "dspy.settings.configure(rm=colbertv2, lm=task_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1] Define Task" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we'll define the program that we'd like to run, which is a multihop [...] (we can say that it was loosely inspired by a certain paper). We additionally load in the data, and define how we'd like to evaluate this task." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/miniconda3/envs/dspy_test/lib/python3.10/site-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n", + " table = cls._concat_blocks(blocks, axis=0)\n" + ] + } + ], + "source": [ + "from dspy.evaluate import Evaluate\n", + "import re \n", + "from dspy.datasets import HotPotQA\n", + "\n", + "class ReturnRankedDocuments(dspy.Signature):\n", + " \"\"\"Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.\"\"\"\n", + " question = dspy.InputField(desc=\"The question we're trying to answer.\")\n", + " context = dspy.InputField(desc=\"List of potentially related passages.\")\n", + " ranking = dspy.OutputField(desc=\"A comma separated list of numbers corresponding to passage indices, ranked in descending order by their helpfulness in answering our question.\")\n", + "\n", + "class RankingMultiHop(dspy.Module):\n", + " def __init__(self, hops, num_passages_to_retrieve, max_passages_in_context):\n", + " super().__init__()\n", + " self.hops = hops\n", + " self.num_passages_to_retrieve = num_passages_to_retrieve\n", + " self.max_passages_in_context = max_passages_in_context\n", + " self.retrieve = dspy.Retrieve(k = self.num_passages_to_retrieve)\n", + " self.generate_query = dspy.ChainOfThought(\"context ,question->search_query\")\n", + " self.generate_answer = dspy.ChainOfThought(\"context ,question->answer\")\n", + " self.generate_ranking = dspy.ChainOfThought(ReturnRankedDocuments)\n", + " \n", + " def forward(self,question):\n", + " context = []\n", + " full_context = []\n", + " top_context = []\n", + " max_passage_num = self.max_passages_in_context\n", + " for hop in range(self.hops):\n", + " # Get a new query\n", + " query = self.generate_query(context = context, question = question).search_query\n", + " # Get new passages\n", + " context = self.retrieve(query).passages\n", + " # Add these new passages to the previous top context \n", + " full_context = top_context + context\n", + " # Get the most important indices, ranked\n", + " most_important_indices = self.generate_ranking(question=question, context=full_context).ranking\n", + " indices = [int(num) for num in re.findall(r'\\d+', most_important_indices)]\n", + "\n", + " if len(indices) < max_passage_num:\n", + " indices = range(1,max_passage_num+1)\n", + "\n", + " valid_indices = [index-1 for index in indices if index-1 < len(context)]\n", + " top_indices = sorted(valid_indices, key=lambda x: x)[:max_passage_num+1]\n", + " most_important_context_list = [context[idx] for idx in top_indices]\n", + " # Save the top context\n", + " top_context = most_important_context_list\n", + "\n", + " return dspy.Prediction(context=context, answer=self.generate_answer(context = top_context , question = question).answer)\n", + "\n", + "program = RankingMultiHop(hops=4, num_passages_to_retrieve=5, max_passages_in_context=5)\n", + "\n", + "# Load and configure the datasets.\n", + "TRAIN_SIZE = 500\n", + "EVAL_SIZE = 500\n", + "\n", + "hotpot_dataset = HotPotQA(train_seed=1, eval_seed=2023, test_size=0)\n", + "trainset = [x.with_inputs('question') for x in hotpot_dataset.train][:TRAIN_SIZE]\n", + "devset = [x.with_inputs('question') for x in hotpot_dataset.dev][:EVAL_SIZE]\n", + "\n", + "# Set up metrics\n", + "NUM_THREADS = 10\n", + "\n", + "metric = dspy.evaluate.answer_exact_match\n", + "\n", + "# kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=None)\n", + "kwargs = dict(num_threads=NUM_THREADS, display_progress=True)\n", + "evaluate = Evaluate(devset=devset, metric=metric, **kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2] Baseline Evaluation\n", + "Now, we'll quickly evaluate our baseline program so that we can see how the performance using the Prompt Optimizer compares. We should see performance of about __16%__ on our trainset, and __21.4%__ on our devset." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 80 / 500 (16.0): 100%|██████████| 500/500 [00:30<00:00, 16.32it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 80 / 500 (16.0%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 107 / 500 (21.4): 100%|██████████| 500/500 [00:29<00:00, 17.01it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 107 / 500 (21.4%)\n" + ] + } + ], + "source": [ + "baseline_train_score = evaluate(program,devset=trainset)\n", + "baseline_eval_score = evaluate(program, devset=devset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3] Optimizing with MIPRO" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3a] Compile Program" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import cloudpickle as pickle\n", + "from dspy.teleprompt import BayesianSignatureOptimizer\n", + "\n", + "LOAD_PRECOMPILED_PROGRAM = True\n", + "\n", + "# By default, we will load the precompiled program\n", + "if LOAD_PRECOMPILED_PROGRAM:\n", + " # Load a our precompiled program\n", + " with open(compiled_program_file_path, 'rb') as file:\n", + " # Load the data from the file\n", + " compiled_program = pickle.load(file)\n", + "# Otherwise, if desired, the program can be compiled from scratch \n", + "else:\n", + " # Define hyperparameters:\n", + " N = 10 # The number of instructions and fewshot examples that we will generate and optimize over\n", + " trials = 30 # The number of optimization trials to be run (we will test out a new combination of instructions and fewshot examples in each trial) \n", + " temperature = 1.0 # The temperature configured for generating new instructions\n", + "\n", + " # Compile\n", + " eval_kwargs = dict(num_threads=16, display_progress=True, display_table=0)\n", + " teleprompter = BayesianSignatureOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, n=N, init_temperature=temperature, verbose=True)\n", + " compiled_program = teleprompter.compile(program.deepcopy(), devset=trainset, optuna_trials_num=trials, max_bootstrapped_demos=1,max_labeled_demos=2, eval_kwargs=eval_kwargs)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "generate_query = ChainOfThought(StringSignature(context, question -> search_query\n", + " instructions='Given the fields `context`, `question`, produce the fields `search_query`.'\n", + " context = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Context:', 'desc': '${context}'})\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " search_query = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'output', 'prefix': 'Search Query:', 'desc': '${search_query}'})\n", + "))\n", + "generate_answer = ChainOfThought(StringSignature(context, question -> answer\n", + " instructions='Given the fields `context`, `question`, produce the fields `answer`.'\n", + " context = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Context:', 'desc': '${context}'})\n", + " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", + " answer = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'output', 'prefix': 'Answer:', 'desc': '${answer}'})\n", + "))\n", + "generate_ranking = ChainOfThought(ReturnRankedDocuments(question, context -> ranking\n", + " instructions='Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.'\n", + " question = Field(annotation=str required=True json_schema_extra={'desc': \"The question we're trying to answer.\", '__dspy_field_type': 'input', 'prefix': 'Question:'})\n", + " context = Field(annotation=str required=True json_schema_extra={'desc': 'List of potentially related passages.', '__dspy_field_type': 'input', 'prefix': 'Context:'})\n", + " ranking = Field(annotation=str required=True json_schema_extra={'desc': 'A comma separated list of numbers corresponding to passage indices, ranked in descending order by their helpfulness in answering our question.', '__dspy_field_type': 'output', 'prefix': 'Ranking:'})\n", + "))" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "compiled_program" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3b] Evaluate optimized program" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 84 / 201 (41.8): 40%|████ | 200/500 [00:13<00:19, 15.48it/s]" + ] + } + ], + "source": [ + "bayesian_train_score = evaluate(compiled_program, devset=trainset)\n", + "bayesian_eval_score = evaluate(compiled_program, devset=devset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3c] Visualizing scores & prompts over trials" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's take a look at how this optimization looked over the course of each trial. We see that, in general, performance increases as trials go on, until it saturates after ~trial 13. Note that some of the 'pruned' trials have high scores, but were pruned early because they had comparitively lower scores on the easier slices of the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAckAAAE8CAYAAACrYErbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAABP3UlEQVR4nO3deVxUZfs/8M8MssOAKJvCACoCKmjgEiUIioILoWBk+JS4ZkFBpKX1mGiLS08FmsvzlKlZmEtom6KIoqBoSq45kRI0pqBGyYAoDDP37w9/c76OwwAzDMzAXO/Xi9fLOeeeM9c158jFuc99n8NjjDEQQgghRAVf3wEQQgghhoqKJCGEEKIGFUlCCCFEDSqShBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJGlWWFgYwsLCtHovj8dDenq6TuMxZpp8nzweD8nJye0bUAfbsmULeDweysvL9R0KMSJUJLs4Ho/Xqp/8/Hy9xXj79m2kpKTA19cXlpaWcHJywvDhw/HGG2+gtrZWb3EZuhMnTiA9PR137tzR6XbLy8uVjg0TExMIhUJMmTIF586d0+lndSaFhYUYP348evfuDQsLCwiFQkRHRyMrK4trU1dXh/T09Db9f2qv/Uq0003fAZD2tW3bNqXXX3zxBXJzc1WW+/n5Nfn+gwcPtltsAPD3339j6NChkEgkmDVrFnx9fVFVVYULFy5gw4YNePHFF2FjY9OuMXQW9+7dQ7du//df9sSJE1i2bBkSExNhb2+v88979tlnMWHCBMhkMohEImzYsAH79+/HyZMnMWTIEJ1/Xkuee+45TJs2Debm5h3+2bt27cIzzzyDIUOGICUlBd27d0dZWRmOHTuGTz/9FAkJCQAeFMlly5YBgNY9MO29X4lmqEh2cf/617+UXp88eRK5ubkqyx9VV1cHKysrmJmZtWd42LRpE8RiMY4fP44nnnhCaZ1EImn3z3/Y3bt3YW1t3WGfpykLC4sO/bzAwECl4+TJJ5/EU089hQ0bNuC///1vk+9pz+/QxMQEJiYm7bLtlqSnp2PAgAE4efKkyjF569YtvcREOgZ1txKEhYVh0KBBKC4uRmhoKKysrPDmm29y6x7+i7ihoQFvv/02goKCYGdnB2tra4SEhODIkSNafXZpaSlMTEzw+OOPq6wTCAQqheHUqVOYMGECunfvDmtrawQEBCAzM1OpzeHDhxESEgJra2vY29sjJiYGIpFIqU16ejp4PB4uX76MhIQEdO/eHSNHjuTWf/nllwgKCoKlpSUcHBwwbdo0XLt2TWkbV65cQVxcHFxcXGBhYQE3NzdMmzYN1dXVavNds2YNTExMlLrSPvzwQ/B4PKSlpXHLZDIZbG1t8cYbb3DLHr4mmZ6ejoULFwIAvLy8uK7RR6/X7d27F4MGDYK5uTkGDhyInJwctbG1ZPTo0QCAsrIyAP93jfDo0aN46aWX4OTkBDc3NwBAYmIiPD09Vbah+N4fprh+2lKsTV2T9PT0xKRJk1BYWIjhw4fDwsICffr0wRdffKHy2RcuXMCoUaNgaWkJNzc3vPvuu9i8eXOrrnOWlpZi2LBhTf7R5uTkBOBBN7WjoyMAYNmyZdw+UeyzCxcuIDExEX369IGFhQVcXFwwa9YsVFVVKX0/6varoht8y5YtKjE8er26pqYGqamp8PT0hLm5OZycnDB27Fj8/PPPzeZJVNGZJAEAVFVVYfz48Zg2bRr+9a9/wdnZucl2EokEn332GZ599lnMnTsXNTU12LRpEyIjI/HTTz9p3A3n4eEBmUyGbdu2YcaMGc22zc3NxaRJk+Dq6oqUlBS4uLhAJBLhhx9+QEpKCgDg0KFDGD9+PPr06YP09HTcu3cPa9euxZNPPomff/5Z5Rf3008/DW9vb7z//vtQPDXuvffew5IlSxAfH485c+bg9u3bWLt2LUJDQ3H27FnY29ujoaEBkZGRqK+vx8svvwwXFxdcv34dP/zwA+7cuQM7O7smcwgJCYFcLkdhYSEmTZoEACgoKACfz0dBQQHX7uzZs6itrUVoaGiT24mNjcVvv/2G7du34+OPP0bPnj0BgPslDTy4hpadnY2XXnoJtra2WLNmDeLi4iAWi9GjR49mv+umlJaWAoDKe1966SU4Ojri7bffxt27dzXebltjvXr1KqZOnYrZs2djxowZ+Pzzz5GYmIigoCAMHDgQAHD9+nWEh4eDx+Nh8eLFsLa2xmeffdbqrlsPDw/k5eXhzz//5P4QeJSjoyN3iWDKlCmIjY0FAAQEBAB4cPz+/vvvmDlzJlxcXPDLL7/gf//7H3755RecPHkSPB6v2f16+/btVsUKAPPnz8fu3buRnJyMAQMGoKqqCoWFhRCJRAgMDGz1dggARoxKUlISe3S3jxo1igFgGzduVGk/atQoNmrUKO51Y2Mjq6+vV2rzzz//MGdnZzZr1iyl5QDY0qVLm42nsrKSOTo6MgDM19eXzZ8/n2VlZbE7d+4otWtsbGReXl7Mw8OD/fPPP0rr5HI59+8hQ4YwJycnVlVVxS07f/484/P57Pnnn+eWLV26lAFgzz77rNK2ysvLmYmJCXvvvfeUll+8eJF169aNW3727FkGgO3atavZ/B4lk8mYQCBgr7/+Ohd7jx492NNPP81MTExYTU0NY4yxjz76iPH5fKVcH/0+P/jgAwaAlZWVqXwOAGZmZsauXr2q9D0AYGvXrm02xrKyMgaALVu2jN2+fZtVVlay/Px89thjjzEA7JtvvmGMMbZ582YGgI0cOZI1NjYqbWPGjBnMw8NDZduK712bWBWf93C+Hh4eDAA7duwYt+zWrVvM3Nycvfbaa9yyl19+mfF4PHb27FluWVVVFXNwcFD7HT5s06ZNXJzh4eFsyZIlrKCggMlkMqV2t2/fVnvc19XVqSzbvn27Svzq9qtiv2zevFllO49+pp2dHUtKSmo2J9I61N1KAADm5uaYOXNmi+1MTEy4Lie5XI6///4bjY2NGDp0qFZdOc7Ozjh//jzmz5+Pf/75Bxs3bkRCQgKcnJzwzjvvcGd3Z8+eRVlZGVJTU1UGMyi67yoqKnDu3DkkJibCwcGBWx8QEICxY8di3759Kp8/f/58pdfZ2dmQy+WIj4/HX3/9xf24uLjA29ub61ZWnCkeOHAAdXV1rc6Xz+fjiSeewLFjxwAAIpEIVVVVWLRoERhjKCoqAvDg7HLQoEFtGrgRERGBvn37cq8DAgIgEAjw+++/t+r9S5cuhaOjI1xcXBAWFobS0lKsWrWKO0NSmDt3bpuvFbYl1gEDBiAkJIR77ejoCB8fH6X35uTkIDg4WKmnw8HBAdOnT29VfLNmzUJOTg7CwsJQWFiId955ByEhIfD29saJEydatQ1LS0vu3/fv38dff/3FXWbQdTeovb09Tp06hRs3buh0u8aIiiQBAPTu3bvVg2S2bt2KgIAAWFhYoEePHnB0dMSPP/7Y7LW45ri6umLDhg2oqKhASUkJ1qxZw3Xfbdq0CcD/dfUNGjRI7Xb++OMPAICPj4/KOj8/P/z1118q3YFeXl5Kr69cuQLGGLy9veHo6Kj0IxKJuEEaXl5eSEtLw2effYaePXsiMjIS69ata9V3EBISguLiYty7dw8FBQVwdXVFYGAgBg8ezHW5FhYWKv3i14ZQKFRZ1r17d/zzzz+tev+8efOQm5uLvLw8FBcX49atW3j99ddV2j36HXZ0rK157x9//IF+/fqptGtqmTqRkZE4cOAA7ty5g2PHjiEpKQl//PEHJk2a1KrBO3///TdSUlLg7OwMS0tLODo6ct+dtv931Fm9ejUuXboEd3d3DB8+HOnp6a3+44goo2uSBIDyX7nN+fLLL5GYmIjJkydj4cKFcHJygomJCVasWMEVMm3xeDz0798f/fv3x8SJE+Ht7Y2vvvoKc+bMadN2m/No3nK5HDweD/v372/y7Ojh6SgffvghEhMT8e233+LgwYN45ZVXsGLFCpw8eVLtdSsAGDlyJKRSKYqKilBQUMAVw5CQEBQUFODXX3/F7du321wk1Z3dKc7OW+Lt7Y2IiIgW2zV17Dw6OEdBJpM1ubwtsbY1T01ZWVkhJCQEISEh6NmzJ5YtW4b9+/e3eE09Pj4eJ06cwMKFCzFkyBDY2NhALpcjKioKcrm8xc/V5DuNj49HSEgI9uzZg4MHD+KDDz7AqlWrkJ2djfHjx7cuUQKAiiTR0O7du9GnTx9kZ2cr/addunSpTj+nT58+6N69OyoqKgCA64q7dOmS2l/cHh4eAICSkhKVdb/++it69uzZ4vSEvn37gjEGLy8v9O/fv8U4/f394e/vj3//+984ceIEnnzySWzcuBHvvvuu2vcMHz4cZmZmKCgoQEFBATeaMTQ0FJ9++iny8vK4181R90vTEHTv3r3JyfCKs/2O5uHhgatXr6osb2qZJoYOHQoA3HGqbp/8888/yMvLw7Jly/D2229zy69cuaLSVt02unfvDgAq36u679TV1RUvvfQSXnrpJdy6dQuBgYF47733qEhqiLpbiUYUf7U//Ff6qVOnuGtpmjp16lSTIyJ/+uknVFVVcV2ngYGB8PLyQkZGhsovCUUsrq6uGDJkCLZu3arU5tKlSzh48CAmTJjQYjyxsbEwMTHBsmXLVM5EGGPccH2JRILGxkal9f7+/uDz+aivr2/2MywsLDBs2DBs374dYrFY6Uzy3r17WLNmDfr27QtXV9dmt6Mo+IZ4Z5a+ffuiuroaFy5c4JZVVFRgz549eoknMjISRUVFSncM+vvvv/HVV1+16v2KP1wepbjOrThOraysAKjuk6b+3wBARkaGyjbV7VeBQICePXty17MV1q9fr/RaJpOpdN86OTmhV69eLR6bRBWdSRKNTJo0CdnZ2ZgyZQomTpyIsrIybNy4EQMGDNDqFnLbtm3DV199hSlTpiAoKAhmZmYQiUT4/PPPYWFhwc3X5PP52LBhA6KjozFkyBDMnDkTrq6u+PXXX/HLL7/gwIEDAIAPPvgA48ePR3BwMGbPns1NAbGzs2vVfU/79u2Ld999F4sXL0Z5eTkmT54MW1tblJWVYc+ePZg3bx4WLFiAw4cPIzk5GU8//TT69++PxsZGbNu2DSYmJoiLi2vxc0JCQrBy5UrY2dnB398fwINfZD4+PigpKUFiYmKL2wgKCgIAvPXWW5g2bRpMTU0RHR1tEDdEmDZtGt544w1MmTIFr7zyCurq6rBhwwb0799fL3P1Xn/9dXz55ZcYO3YsXn75ZW4KiFAoxN9//93iWXlMTAy8vLwQHR2Nvn374u7duzh06BC+//57DBs2DNHR0QAedD0PGDAAO3bsQP/+/eHg4IBBgwZh0KBBCA0NxerVqyGVStG7d28cPHiQm3P6sOb265w5c7By5UrMmTMHQ4cOxbFjx/Dbb78pvb+mpgZubm6YOnUqBg8eDBsbGxw6dAinT5/Ghx9+qKNv1IjoZ1At0Rd1U0AGDhzYZPtHp4DI5XL2/vvvMw8PD2Zubs4ee+wx9sMPPzQ55B+tmAJy4cIFtnDhQhYYGMgcHBxYt27dmKurK3v66afZzz//rNK+sLCQjR07ltna2jJra2sWEBCgMqXh0KFD7Mknn2SWlpZMIBCw6OhodvnyZaU2iqkIt2/fbjKub775ho0cOZJZW1sza2tr5uvry5KSklhJSQljjLHff/+dzZo1i/Xt25dZWFgwBwcHFh4ezg4dOtRsvgo//vgjA8DGjx+vtHzOnDkMANu0aZPKe5r6Pt955x3Wu3dvxufzlaYNAGhyCoCHhwebMWNGs7Epphp88MEHzbZTTMk4ffp0k+sPHjzIBg0axMzMzJiPjw/78ssv1U4BaU2s6qaATJw4UeW9jx63jD2YthMSEsLMzc2Zm5sbW7FiBVuzZg0DwCorK5vNdfv27WzatGmsb9++zNLSkllYWLABAwawt956i0kkEqW2J06cYEFBQczMzExpn/35559sypQpzN7entnZ2bGnn36a3bhxQ6P9WldXx2bPns3s7OyYra0ti4+PZ7du3VLaRn19PVu4cCEbPHgw9/9k8ODBbP369c3mSJrGY6ydrm4TQoiBS01NxX//+1/U1tbq7ZZ3xLDRNUlCiFG4d++e0uuqqips27YNI0eOpAJJ1KJrkoQQoxAcHIywsDD4+fnh5s2b2LRpEyQSCZYsWaLv0IgBoyJJCDEKEyZMwO7du/G///0PPB4PgYGB2LRpU4tTbYhxo2uShBBCiBp0TZIQQghRg4okIYQQokaXvyYpl8tx48YN2NraGvRtvAghhLQfxhhqamrQq1cv8PmtPz/s8kXyxo0bcHd313cYhBBCDMC1a9eafQDBo7p8kbS1tQXw4IsRCARab0cqleLgwYMYN24cTE1NdRWewTK2fAHK2RhyNrZ8AcpZkbNEIoG7uztXE1qryxdJRRerQCBoc5G0srKCQCAwigPN2PIFKGdjyNnY8gUo50dz1vSyGw3cIYQQQtSgIkkIIYSoQUWSEEK6KLlcDrFYDAAQi8WQy+V6jqjz6fLXJAkhxBiJRCLk5OSgtrYWAQEByMrKgo2NDaKiouDn56fv8DoNOpMkhJAuRiQSYefOnZBIJErLJRIJdu7cCZFIpKfIOh8qkoQQ0oXI5XLk5OQ02yYnJ4e6XluJiiQhhHQhYrFY5QzyURKJhLtWSZpHRZIQQrqQmpoanbYzdlQkCSGkC2ntHWU0vfOMsaIiSQghXYhQKGzx7mICgQBCobCDIurcqEgSQkgXwufzERUV1WybqKgojZ6EYczoWyKEkC7Gz88P8fHxKmeUAoEA8fHxNE9SA3QzAUII6YL8/Pzg4+ODsrIyXLp0CQkJCfDy8qIzSA3Rt0UIIV0Un8/nrj0KhUIqkFqgM0lCDJBMLkOBuAAVNRVwtXVFiDAEJnwTfYdFiNGhIkmIgckWZSMlJwV/Sv7klrkJ3JAZlYlYv1g9RkaI8aFzb0IMSLYoG1N3TlUqkABwXXIdU3dORbYoW0+REaIbcrkc5eXluHjxIsrLyw3+9nh0JkmIgZDJZUjJSQEDU1nHwMADD6k5qYjxiaGuV9IpKZ5M8vBt8wQCgUE/mYTOJAkxEAXiApUzyIcxMFyTXEOBuKADoyJENzrrk0noTJJ0SZoOfJHJZSgUFwIACsWFCPUKbbG9pttvqX1FTUWrcmttO10zxsFExphze2jtk0l8fHwMbgQuFUnS5Wg68EXRvqq2CtsDtmNi1kT0sOnRYntNt99Se1db11bl19p2umSMg4mMMef2osmTSTw9PTsmqFYyrJJNSBtpOvDFkNqHCEPgJnADD7wmc+OBB3eBO0KEIc18A7pnjIOJtM1ZJpchvzwf2y9uR355PmRyWUeEa/A685NJqEiSLqOlgS8AkJqTyv3iMrT2JnwTZEZlAoBKoVS8zojK6NDuPk1z6Aq0zTlblA3PTE+Ebw1HQnYCwreGwzPTs9k/IoylqHbmJ5NQkSRdhqYDXwytPQDE+sVid/xu9Bb0VmrrJnDD7vjdHd7NZ4yDibTJWZszT22KamfVmZ9MQkWSdBmaDnwxtPYKsX6xKE8px5EZR5AVm4UjM46gLKVML9fBDH0wUXvQNGdtzjyNrQu7Mz+ZxPAiIkRLmg58MbT2DzPhmyDMMwzP+j+LMM8wvY2oNOTBRO1F05w1PfM0xi5soPM+mYRGt5IuQzHw5brkepO/gHjgwU3gxg18MbT2hqgr5KApTXPW9MxTk6Ia5hmmeQIGTPFkErFYjJqaGtja2hr8jdcNJrKVK1eCx+MhNTWVW3b//n0kJSWhR48esLGxQVxcHG7evKm/IIlB03Tgi6G1N0RdIQdNaZqzpmeextiF/TA+nw9PT0/4+/vD09PToAskYCBF8vTp0/jvf/+LgIAApeWvvvoqvv/+e+zatQtHjx7FjRs3EBtL85OIepoOfDG09oaoK+SgKU1y1nTqjjF2YXdmeu9ura2txfTp0/Hpp5/i3Xff5ZZXV1dj06ZNyMrKwujRowEAmzdvhp+fH06ePInHH39cXyETAxfrF4sYn5hW3ylF0f5Y2TFILknwY8KPzd5xR9vtd+Y7t3SFHDTV2pwVZ55Td04FDzylLtqmzjyNsQu7M9N7kUxKSsLEiRMRERGhVCSLi4shlUoRERHBLfP19YVQKERRUZHaIllfX4/6+nruteIuD1KpFFKpVOs4Fe9tyzY6k66Q75O9n+T+LZfJIZc1/7SBEa4jkHspFyNcR7Sqvabb17R9R9B0P7dnDnK5HH/++Sdqa2thY2MDNzc3nXfFaXNctybn6H7R2B23G28cegPXa65zy91s3bAyYiWi+0UrfWbm2Ew8t+c5AGi6qI7N0Nn32xX+L2uqqZy1zZ/HGFP9U6aDfP3113jvvfdw+vRpWFhYICwsDEOGDEFGRgaysrIwc+ZMpYIHAMOHD0d4eDhWrVrV5DbT09OxbNkyleVZWVmwsrJqlzwIIYQYtrq6OiQkJKC6urrFOZsP09uZ5LVr15CSkoLc3FxYWFjobLuLFy9GWloa91oikcDd3R3jxo3T6It5lFQqRW5uLsaOHQtTU1NdhGrQjC1fgHLWV84lJSXYs2eP2vVTpkyBj4+PTj7LEPJ9mEwuQ9GfRaisrYSLjQuC3YJ13oVtaDl3hKZybuneserorUgWFxfj1q1bCAwM5JbJZDIcO3YMn3zyCQ4cOICGhgbcuXMH9vb2XJubN2/CxcVF7XbNzc1hbm6ustzU1FQnB4iuttNZGFu+AOXckeRyOXJzc5t98G5ubi4GDBig065XQ9nHpjBFeN/wjvksA8m5Iz2cs7a5621065gxY3Dx4kWcO3eO+xk6dCimT5/O/dvU1BR5eXnce0pKSiAWixEcHKyvsAkhOqTJ0yEI0Qe9nUna2tpi0KBBSsusra3Ro0cPbvns2bORlpYGBwcHCAQCvPzyywgODqaRrYR0EZ356RDEOOh9dGtzPv74Y/D5fMTFxaG+vh6RkZFYv369vsMihOhIZ346BDEOBlUk8/PzlV5bWFhg3bp1WLdunX4CIoS0K8XTIZrrcjXUp0MQ42AQd9whhBinzvx0CGIc6MgjhOhVZ306BDEOBtXdSggxTp3x6RD6IJfL6TvqYFQkSacgk8uM6r6hxkjxdAjSNJFIhJycHKXrtwKBAFFRUXS23Y6oSBKDly3KRkpOitIz+NwEbsiMyuyST6Ag5FEikQg7d+5UWS6RSLBz507qlm5HdJ5ODFq2KBtTd05VeUjtdcl1TN05FdmibD1FRkjHkMvlyMnJabZNTk5Os3ctItqjIkkMlkwuQ0pOSpOPE1IsS81JhUwu6+jQCOkwdFci/aIiSQxWgbhA5QzyYQwM1yTXUCAu6MCoCOlYdFci/aIiSQxWRU2FTtsR0hnRXYn0i4okMViutq46bUeUyeQyFIoLAQCF4kLqtjZQirsSNYfuStR+qEgSgxUiDIGbwI17WvujeODBXeCOEGFIB0fW+WWLsuGZ6YmJWRMBABOzJsIz05MGQhkguiuRftG3SgyWCd8EmVGZAKBSKBWvM6IyaL7k/yeXy1FeXo6LFy+ivLxc7WhHYx0xrJiIDzwYDNOZRoPSXYn0h+ZJEoMW6xeL3fG7m5wnmRGVQfMk/7/WTjRvacQwDzyk5qQixiemS/3xofh+amtrERAQgKysLNjY2HSqifh0VyL9oCJJDF6sXyxifGLojjtqaDLRXJMRw2GeYe0Vcod6+Pt5uKB0xon4dFeijkdFknQKJnyTLvNLW5daO9Hcx8cHfD7f6EYMa/r9EPIoOioI6cQ0nWhubCOGaSI+aSsqkoR0YppONDe2EcM0EZ+0FRVJQjoxTSeaG9uIYZqIT9qKiiQhnZg2E80VI4Z7C3ortXMTuGF3/O4uNWKYJuKTtqIiSUgnpu1E81i/WJSnlOPHhB8BAD8m/IiylLIuVSABmohP2o6ODEI6OW0nmpvwTTBSOBIAMFI4sst0sT6KJuKTtqApIIR0ATTRvHmK76esrAyXLl1CQkICvLy86PshLaIiSUgXQRPNm8fn8yEUCnHp0iX6A4K0Gh0lhBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJCGEEKIGzZM0ADK5zOgeKGyMORNCOh8qknqWLcpGSk6K0tPi3QRuyIzKVHsfzc5eYLTJmRBC9IG6W/UoW5SNqTunKhULALguuY6pO6ciW5Td5Hs8Mz0RvjUcCdkJCN8aDs9MzybbGiJtciaEEH2hIqknMrkMKTkpYGAq6xTLUnNSIZPLuOWdvcBokzMhhOgTFUk9KRAXqBS7hzEwXJNcQ4G4AEDXKDCa5kwIIfqm1yK5YcMGBAQEQCAQQCAQIDg4GPv37+fW379/H0lJSejRowdsbGwQFxeHmzdv6jHi1pHJZcgvz8f2i9uRX57fZOGqqKlo1bYU7bpCgdE0Z0II0Te9Dtxxc3PDypUr4e3tDcYYtm7dipiYGJw9exYDBw7Eq6++ih9//BG7du2CnZ0dkpOTERsbi+PHj+sz7Ga1dlCKq61rq7anaNcVCoymORNCiL7p9UwyOjoaEyZMgLe3N/r374/33nsPNjY2OHnyJKqrq7Fp0yZ89NFHGD16NIKCgrB582acOHECJ0+e1GfYamlyzTBEGAI3gRt44DW5LR54cBe4I0QYAqBrFBhNcyaEEH0zmCkgMpkMu3btwt27dxEcHIzi4mJIpVJERERwbXx9fSEUClFUVITHH3+8ye3U19ejvr6eey2RSAAAUqkUUqlU6/gU71W3DZlchjcOvAELvkWT63ngYdGBRZjQZwI3XSNzbCae2/McAChda1QUkYyxGZDL5JDL5Hjc9XH0s+uHGzU3mrwuyQMPvW1743HXx9uUp0JL+WpLk5w7WnvlbMiMLWdjyxegnB9dpikeY0z1N24HunjxIoKDg3H//n3Y2NggKysLEyZMQFZWFmbOnKlU8ABg+PDhCA8Px6pVq5rcXnp6OpYtW6ayPCsrC1ZWVu2SAyGEEMNWV1eHhIQEVFdXQyAQtPp9ej+T9PHxwblz51BdXY3du3djxowZOHr0qNbbW7x4MdLS0rjXEokE7u7uGDdunEZfzKOkUilyc3MxduxYmJqaqqzffXk3Zn83u8XtbHpqE6YOmKq0TCaXoejPIlTWVsLFxgXBbsFqbw7wfcn3eOPQG7hec51b5mbrhpURKxHtE61hVqpKSkpw6NAh3L17F4MGDcKlS5dgbW2NiIgI+Pj4tHn7Cprk3FFa2sddUWfNWXGc1tTUcMtsbW1bPE47a75tQTk/yFnRq6gpvRdJMzMz9OvXDwAQFBSE06dPIzMzE8888wwaGhpw584d2Nvbc+1v3rwJFxcXtdszNzeHubm5ynJTU1OdHCDqtuNq54p78nstvt/VzlXl/aYwRXjf8FZ9fuygWMQMiGmXO+6IRCJ88803AAA+/8HlarlcjurqanzzzTeIj4+Hn59fmz8H0CznjqarY6Uz6Uw5P3ycPkyT47Qz5asrxp6ztrkb3DxJuVyO+vp6BAUFwdTUFHl5edy6kpISiMViBAcH6zHCpnXkoBQTvgnCPMPwrP+zCPMM00mBlMvlyMnJabZNTk4O5PKOv1ZIiAIdp6Sj6bVILl68GMeOHUN5eTkuXryIxYsXIz8/H9OnT4ednR1mz56NtLQ0HDlyBMXFxZg5cyaCg4PVDtrRJxO+CTKjMgFApVByg1KiMvTepaiOWCxusTtCIpFALBZ3UESEqKLjlHQ0vXa33rp1C88//zwqKipgZ2eHgIAAHDhwAGPHjgUAfPzxx+Dz+YiLi0N9fT0iIyOxfv16fYbcrFi/WOyO393kPMmMqAyDvnn3w9d2dNGOELlcDrFYjJqaGtja2kIoFHLd+Nqi49T4tMdxpAm9FslNmzY1u97CwgLr1q3DunXrOiiitov1i0WMT/tcM2xPtra2Om1HjJtIJEJOTo7SWZ9AIEBUVFSbrmvTcWpc2us40oTBXZPsCtrjmmF7EwqFLY7+FQgEEAqFHRQR6axEIhF27typ0i0qkUiwc+dOiEQirbdNx6nxaM/jSBNUJAmAB6NZo6Kimm0TFRXVod0cpPNp74E1dJwaB0MaoEVHEuH4+fkhPj5e5S91gUCg0+kfpOvqiIE1dJx2fYY0QEvv8ySJYfHz84OPjw/Kyspw6dIlJCQkwMvLi/4yJ63SUQNrFMepPgd0kPZjSAO0qEgSFXw+H0KhEJcuXaJfPEQjHTmwhs/nw9PTs83bIYbHkAZo0W8/QojO0MAaoguGdBxRkSSE6AwNrCG6YEjHER2phBCdooE1RBcM5Tiia5KEEJ2jgTVEFwzhOKIiSQhpFzSwhuiCvo8j+rOOEEIIUaNNRbKhoQElJSVobGzUVTyEEEKIwdCqSNbV1WH27NmwsrLCwIEDubsevPzyy1i5cqVOAySEEEL0RasiuXjxYpw/fx75+fmwsLDglkdERGDHjh06C44QQgjRJ60G7uzduxc7duzA448/Dh7v/x4wPHDgQJSWluosOEIIIUSftDqTvH37NpycnFSW3717V6loEkIIIZ2ZVkVy6NCh+PHHH7nXisL42WefITg4WDeRGRCZXIZCcSEAoFBcCJlcpueICCGEdAStulvff/99jB8/HpcvX0ZjYyMyMzNx+fJlnDhxAkePHtV1jHqVLcpGSk4KqmqrsD1gOyZmTUQPmx7IjMpErF+svsMjhBDSjrQ6kxw5ciTOnz+PxsZG+Pv74+DBg3ByckJRURGCgoJ0HaPeZIuyMXXnVPwp+VNp+XXJdUzdORXZomw9RUYIIaQjaHwmKZVK8cILL2DJkiX49NNP2yMmgyCTy5CSkwIGprKOgYEHHlJzUhHjEwMTvokeIiSEENLeND6TNDU1xTfffNMesRiUAnGByhnkwxgYrkmuoUBc0IFREUII6UhadbdOnjwZe/fu1XEohqWipkKn7QghhHQ+Wg3c8fb2xvLly3H8+HEEBQXB2tpaaf0rr7yik+D0ydXWVaft9Ekml6FAXICKmgq42roiRBhCXcSEENIKWhXJTZs2wd7eHsXFxSguLlZax+PxukSRDBGGwE3ghuuS601el+SBBzeBG0KEIXqIrvUUo3Mf7jp2E7jR6FxCCGkFrYpkWVmZruMwOCZ8E2RGZWLqzqngQfkGCYrXGVEZBn1Gphid+2iRV4zO3R2/mwolIYQ0o82PymKMgTHVM62uINYvFrvjd6O3oLfScjeBm8EXmJZG5wJAak4q3RiBEEKaoXWR/OKLL+Dv7w9LS0tYWloiICAA27Zt02VsBiHWLxblKeX4MeHBHYZ+TPgRZSllBl0gARqdSwjRnFwuR3l5OS5evIjy8nLI5XKdtu+MtOpu/eijj7BkyRIkJyfjySefBAAUFhZi/vz5+Ouvv/Dqq6/qNEh9M+GbYKRwJPZd2oeRwpEG3cWqQKNzCSGaEIlEyMnJgUQi4ZYJBAJERUXBz8+vze07K62K5Nq1a7FhwwY8//zz3LKnnnoKAwcORHp6epcrkp1RVxqdSwhpXyKRCDt37lRZLpFIsHPnTsTHxysVPk3bd2ZadbdWVFTgiSeeUFn+xBNPoKKCzkwMgWJ07qODjhR44MFd4G7wo3MJIe1LLpcjJyen2TY5OTlcV6qm7Ts7rYpkv379mvwrYseOHfD29m5zUKTtFKNzAXTa0bmEkPYnFouVukybIpFIIBaLtWrf2WnV3bps2TI888wzOHbsGHdN8vjx48jLy2uyeBL9UIzObWqeZEZUhsEPPiKEtL+amhqN2mnavrPTqkjGxcXh1KlT+Pjjj7nb0/n5+eGnn37CY489psv4SBvF+sUixieG7rhDCGmSra2tRu00bd/ZaVUkASAoKAhffvmlLmMh7cSEb4IwzzB9h0EIMUBCoRACgaDZLlSBQAChUKhV+85Oq2uS+/btw4EDB1SWHzhwAPv3729zUIQQQjoGn89HVFRUs22ioqLA5/O1at/ZaZXFokWLIJOp3qmFMYZFixa1OShCCCEdx8/PD/Hx8RAIBErLBQJBk9M5NG3fmWnV3XrlyhUMGDBAZbmvry+uXr3a6u2sWLEC2dnZ+PXXX2FpaYknnngCq1atgo+PD9fm/v37eO211/D111+jvr4ekZGRWL9+PZydnbUJnRBCSBP8/Pzg4+MDsViMmpoa2NraQigUqj0j1LR9Z6VVNnZ2dvj9999Vll+9elXlsVnNOXr0KJKSknDy5Enk5uZCKpVi3LhxuHv3Ltfm1Vdfxffff49du3bh6NGjuHHjBmJjaVQmIYToGp/Ph6enJ/z9/eHp6dliwdO0fWek1ZlkTEwMUlNTsWfPHvTt2xfAgwL52muv4amnnmr1dh6dkLplyxY4OTmhuLgYoaGhqK6uxqZNm5CVlYXRo0cDADZv3gw/Pz+cPHkSjz/+uDbhE0IIIa2iVZFcvXo1oqKi4OvrCzc3NwDAtWvXEBoaiv/85z9aB1NdXQ0AcHBwAAAUFxdDKpUiIiKCa+Pr6wuhUIiioqImi2R9fT3q6+u514oRWFKpFFKpVOvYFO9tyzY6E2PLF6CcjYGx5QtQzo8u0xSPafmcK8YYcnNzcf78eVhaWmLw4MEICdH+FmdyuRxPPfUU7ty5g8LCQgBAVlYWZs6cqVT0AGD48OEIDw/HqlWrVLaTnp6OZcuWqSzPysqClZWV1vERQgjpvOrq6pCQkIDq6mqVAUfN0ehMsqioCFVVVZg0aRJ4PB7GjRuHiooKLF26FHV1dZg8eTLWrl0Lc3NzjRNISkrCpUuXuAKprcWLFyMtLY17LZFI4O7ujnHjxmn0xTxKKpUiNzcXY8eOhampaZti7AyMLV+AcjaGnI0tX4ByVuTc0q301NGoSC5fvhxhYWGYNGkSAODixYuYO3cuZsyYAT8/P3zwwQfo1asX0tPTNQoiOTkZP/zwA44dO8Z13wKAi4sLGhoacOfOHdjb23PLb968CRcXlya3ZW5u3mSRNjU11ckBoqvtdBbGli9AORsDY8sXoJy1zV2joUjnzp3DmDFjuNdff/01hg8fjk8//RRpaWlYs2aNRvduZYwhOTkZe/bsweHDh+Hl5aW0PigoCKampsjLy+OWlZSUQCwWIzg4WJPQCSGEEI1pdCb5zz//KM1PPHr0KMaPH8+9HjZsGK5du9bq7SUlJSErKwvffvstbG1tUVlZCeDBFBNLS0vY2dlh9uzZSEtLg4ODAwQCAV5++WUEBwfTyFZCCCHtTqMzSWdnZ5SVlQEAGhoa8PPPPysVq5qaGo1OaTds2IDq6mqEhYXB1dWV+9mxYwfX5uOPP8akSZMQFxeH0NBQuLi4IDs7W5OwCSGEEK1odCY5YcIELFq0CKtWrcLevXthZWWlNKL1woUL3LzJ1mjNwFoLCwusW7cO69at0yRUQgghpM00KpLvvPMOYmNjMWrUKNjY2GDr1q0wMzPj1n/++ecYN26czoMkhBBC9EGjItmzZ08cO3YM1dXVsLGxgYmJ8jMJd+3aBRsbG50GSAghhOiLVnfcsbOza3K54k45hBBCSFfQ9e5GSwghhOgIFUlCCCFEDSqShBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJCGEEKIGFUlCCCFEDSqShBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJCGEEKIGFUlCCCFEDSqShBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJCGEEKIGFUlCCCFEDSqShBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJCGEEKKGXovksWPHEB0djV69eoHH42Hv3r1K6xljePvtt+Hq6gpLS0tERETgypUr+gmWEEKI0dFrkbx79y4GDx6MdevWNbl+9erVWLNmDTZu3IhTp07B2toakZGRuH//fgdHSgghxBh10+eHjx8/HuPHj29yHWMMGRkZ+Pe//42YmBgAwBdffAFnZ2fs3bsX06ZN68hQCSGEGCG9FsnmlJWVobKyEhEREdwyOzs7jBgxAkVFRWqLZH19Perr67nXEokEACCVSiGVSrWOR/HetmyjMzG2fAHK2RgYW74A5fzoMk0ZbJGsrKwEADg7Oystd3Z25tY1ZcWKFVi2bJnK8oMHD8LKyqrNceXm5rZ5G52JseULUM7GwNjyBSjnuro6rbZhsEVSW4sXL0ZaWhr3WiKRwN3dHePGjYNAINB6u1KpFLm5uRg7dixMTU11EapBM7Z8AcrZGHI2tnwBylmRs6JXUVMGWyRdXFwAADdv3oSrqyu3/ObNmxgyZIja95mbm8Pc3FxluampqU4OEF1tp7MwtnwBytkYGFu+AOWsbe4GWyS9vLzg4uKCvLw8rihKJBKcOnUKL774on6DI4RoTCaT6f26mFQqRbdu3XD//n3IZDK9xtJRjClnMzMz8Pm6nbSh1yJZW1uLq1evcq/Lyspw7tw5ODg4QCgUIjU1Fe+++y68vb3h5eWFJUuWoFevXpg8ebL+giaEaIQxhsrKSty5c0ffoYAxBhcXF1y7dg08Hk/f4XQIY8qZz+fDy8tLp3nqtUieOXMG4eHh3GvFtcQZM2Zgy5YteP3113H37l3MmzcPd+7cwciRI5GTkwMLCwt9hUwI0ZCiQDo5OcHKykqvv6jlcjlqa2thY2Oj8zMOQ2UsOcvlcty4cQMVFRVKl+jaSq9FMiwsDIwxtet5PB6WL1+O5cuXd2BUhBBdkclkXIHs0aOHvsOBXC5HQ0MDLCwsunTBeJgx5ezo6IgbN27otFu5a39jhBC9UlyD1MX0K0JaYmZmBgBUJAkhnUtXvxZGDIPiOGuuh1JTVCQJIYQQNahIEkKIETp+/Dj8/f1hamra6hkD6enpzc5T10ZTT4Dq6BiaQ0WSEEKacPv2bbz44osQCoUwNzeHi4sLIiMjcfz4cX2HphNpaWkYMmQIysrKsGXLljZvLz09HTwer9mfplRUVKh90IUhMNibCRBCyMNkchkKxAWoqKmAq60rQoQhMOGbtNvnxcXFoaGhAVu3bkWfPn1w8+ZN5OXloaqqqt0+syOVlpZi/vz5cHNz08n2FixYgPnz53Ovhw0bhnnz5mHu3LlNtm9oaICZmRl3dzVDRWeShBCDly3KhmemJ8K3hiMhOwHhW8PhmemJbFF2u3zenTt3UFBQgFWrViE8PBweHh4YPnw4Fi9ejKeeegoAUF5eDh6Ph3Pnzim9j8fjIT8/n1v2yy+/YNKkSRAIBLC1tUVISAhKS0u59Z9//jkGDhwIc3NzuLq6Ijk5WWl7c+bMgaOjIwQCAUaPHo3z589z68+fP4/w8HDY2tpCIBAgKCgIZ86cAQD88ccfiI6ORo8ePdC7d2/4+/tj3759XNxVVVWYNWsWeDwetmzZgi1btsDe3l7pe9i7d2+rB13Z2NjAxcWF+zExMYGtrS33etq0aUhOTkZqaip69uyJyMhIAKrdrW+88Qb69+8PKysr9OnTB0uWLNHrnZqoSBJCDFq2KBtTd07Fn5I/lZZfl1zH1J1T26VQ2tjYwMbGBnv37lV69J6mrl+/jtDQUJibm+Pw4cMoLi7GrFmz0NjYCADYsGEDkpKSMG/ePFy8eBHfffcd+vXrx73/6aefxq1bt7B//34UFxcjMDAQY8aMwd9//w0AmD59Otzc3HD69GkUFxdj0aJF3D1Kk5KSUF9fj/z8fBw/fhwrVqyAjY0N3N3dUVFRAYFAgIyMDFRUVOCZZ55pw7fVelu3boWZmRmOHz+OjRs3NtnG1tYWW7ZsweXLl5GZmYlPP/0UH3/8cYfE1xTqbiWEGCyZXIaUnBQwqA7pZ2DggYfUnFTE+MTotOu1W7du2LJlC+bOnYuNGzciMDAQo0aNwrRp0xAQENDq7axbtw52dnb4+uuvueLVv39/bv27776L1157DSkpKdyyYcOGAQAKCwvx008/4datW9xDG/7zn/9g79692L17N+bNmwexWIyFCxfC19cXAODt7c1tRywWIy4uDv7+/pBIJAgICOBuJuDi4gIejwc7O7sO7e709vbG6tWrm23z73//m/u3p6cnFixYgK+//hqvv/56e4fXJDqTJIQYrAJxgcoZ5MMYGK5JrqFAXKDzz46Li8ONGzfw3XffISoqCvn5+QgMDNRokMu5c+cQEhLS5BMobt26hRs3bmDMmDFNvvf8+fOora1Fjx49uDNbGxsblJWVcd21aWlpmDNnDiIiIrBy5UqlbtxXXnkF7777LkJCQrBixQpcuHBBsy+gHQQFBbXYZseOHXjyySfh4uICGxsb/Pvf/4ZYLO6A6JpGRZIQYrAqaip02k5TFhYWGDt2LJYsWYITJ04gMTERS5cuBQDurOzhieuPXjuztLRUu+3m1gEPHgDh6uqKc+fOKf2UlJRg4cKFAB6MKP3ll18wceJEHD58GAMGDMCePXsAAHPmzMHvv/+O6dOn4/Llyxg+fDjWrl2r9vP4fL7KJHxdXwu0trZudn1RURGmT5+OCRMm4IcffsDZs2fx1ltvoaGhQadxaIKKJCHEYLnatu5G1a1t11YDBgzA3bt3ATy4TyjwYAqDwsODeAAgICAABQUFTRYbW1tbeHp6Ii8vr8nPCgwMRGVlJbp164Z+/fop/fTs2ZNr179/f7z66qs4ePAgYmNjsXnzZm6du7s75s+fj23btiEtLQ2ffvqp2twcHR1RU1PD5ddUPu3txIkT8PDwwFtvvYWhQ4fC29sbf/zxR4fG8CgqkoQQgxUiDIGbwA08ND3Ckgce3AXuCBGG6PRzq6qqMHr0aHz55Ze4cOECysrKsGvXLqxevRoxMTEAHpwJPv7441i5ciVEIhGOHj2qdD0NAJKTkyGRSDBt2jScOXMGV65cwbZt21BSUgLgwZnghx9+iDVr1uDKlSv4+eefubO9iIgIBAcHY/LkyTh48CDKy8tx4sQJvPXWWzhz5gzu3buH5ORk5Ofn448//sDx48dx+vRp+Pn5AQBSU1Nx4MABlJWV4fz588jPz+fWNWXEiBGwsrLCm2++idLSUmRlZelk/qQmvL29IRaL8fXXX6O0tBRr1qzhzoz1hYokIcRgmfBNkBmVCQAqhVLxOiMqQ+fzJW1sbDBixAh8/PHHCA0NxaBBg7BkyRLMnTsXn3zyCdfu888/R2NjI4KCgrjn3z6sR48eOHz4MGprazFq1CgEBQXh008/5a5RzpgxAxkZGVi/fj0GDhyISZMm4cqVKw/y4/Gwb98+hIaGYubMmejfvz+mTZuGP/74A87OzjAxMUFVVRWef/559O/fH/Hx8Rg/fjyWLVsG4MFNvpOSkjBw4EBMnToV3t7eWL9+vdqcHRwc8OWXX2Lfvn3w9/fH9u3bkZ6ertPvtSVPPfUUXn31VSQnJ2PIkCE4ceIElixZ0qExPIrHdHknWAMkkUhgZ2eH6upqCAQCrbcjlUqxb98+TJgwocmL8F2NseULUM7tkfP9+/dRVlYGLy+vNj0HNluUjZScFKVBPO4Cd2REZSDWL7bV25HL5ZBIJBAIBF3+sVEKxpSz4nhzc3PD4cOHlY5rbWsBTQEhhBi8WL9YxPjEdOgddwgBqEgSQjoJE74JwjzD9B0GMTJd+9ybEEIIaQMqkoQQQogaVCQJIYQQNahIEkIIIWpQkSSEEELUoCJJCCGEqEFFkhBCCFGDiiQhhHQRnp6eyMjI0Nn2tmzZAnt7e73GoG9UJAkhpAmJiYng8Xjg8XgwMzNDv379sHz5cjQ2Nuo7NK306dOHy6epn8TERJX3PPPMM/jtt986PlgDQnfcIYR0CnK5HGKxGDU1NbC1tYVQKGz3e5FGRUVh8+bNqK+vx759+5CUlARTU1MsXrxYpW1DQwPMzMzaNZ62OHXqFPe8yBMnTiAuLg4lJSXcfUwffb6lVCqFpaVli8+97OroTJIQYvBEIhEyMzOxdetWZGdnY+vWrcjMzIRIJGrXzzU3N4eLiws8PDzw4osvIiIiAt999x2AB2eakydPxnvvvYdevXrBx8cHwIOnd+zdu1dpO/b29txjp8rLy8Hj8ZCdnY3w8HBYWVlh8ODBKCoqUnpPYWEhQkJCYGlpCXd3d7zyyitKz3q8desWoqOjYWlpCS8vL3z11VfN5uLo6AgXFxe4uLjAwcEBAODk5AQXFxfcv38f9vb22LFjB0aNGgULCwt89dVXKt2tpaWliImJgbOzM2xsbDBs2DAcOnRIm6+206AiSQgxaCKRCDt37oREIlFaLpFIsHPnznYvlA+ztLREQ0MD9zovLw8lJSXIzc3FDz/8oNG23nrrLSxYsADnzp1D//798eyzz3JduaWlpYiKikJcXBwuXLiAHTt2oLCwEMnJydz7ExMTce3aNRw5cgS7d+/G+vXrcevWrTblt2jRIqSkpEAkEiEyMlJlfW1tLSZMmIC8vDycPXsWUVFRiI6OhlgsbtPnGjLqbiWEGCy5XI6cnJxm2+Tk5MDHx6ddu14ZY8jLy8OBAwfw8ssvc8utra3x2WefadXNumDBAkycOBEAsGzZMgwcOBBXr16Fr68vVqxYgenTpyM1NRXAg4cRr1mzBqNGjcKGDRsgFouxf/9+/PTTTxg2bBgAYNOmTc0+VLk1UlNTERur/tFjgwcPxuDBg7nX77zzDvbs2YPvvvtOqYB3JVQkCSEGSywWq5xBPkoikUAsFsPT01Pnn//DDz/AxsYGUqkUcrkcCQkJSg8i9vf31/o6ZEBAAPdvV1dXAA+6UH19fXH+/HlcuHBBqQuVMQa5XI6ysjL89ttv6NatG4KCgrj1vr6+Go9EfdTQoUObXV9bW4v09HT8+OOPqKioQGNjI+7du0dnkoQQog81NTU6baep8PBwbNiwAWZmZujVqxe6dVP+lWltba3yHh6Ph0efZS+VSlXaPfyQax6PB+DBmTPwoBi98MILeOWVV1TeJxQK223EaVP5PGzBggXIzc3Ff/7zH/Tr1w+WlpaYOnWqUhd0V0NFkhBisGxtbXXaTlPW1tbo16+fRu9xdHRERUUF9/rKlSuoq6vTaBuBgYG4fPmy2s/29fVFY2MjiouLue7WkpIS3LlzR6PP0dTx48eRmJiIKVOmAHhQzMvLy9v1M/WNBu4QQgyWUCjkpiioIxAIIBQKOyiilo0ePRqffPIJzp49izNnzmD+/PlKZ42t8cYbb+DEiRNITk7GuXPncOXKFXz77bfcdT8fHx9ERUXhhRdewKlTp1BcXIw5c+a0+3QNb29vZGdn49y5czh//jwSEhK4s9+uiookaTO5XI7y8nJcvHgR5eXlXf4/Dek4fD4fUVFRzbaJiopq9/mSmvjwww/h7u6OkJAQJCQkYMGCBbCystJoGwEBATh69Ch+++03hISE4LHHHsPbb7+NXr16cW02b96MXr16YdSoUYiNjcW8efPg5OSk63SUfPTRR+jevTueeOIJREdHIzIyEoGBge36mfrGY492nhugdevW4YMPPkBlZSUGDx6MtWvXYvjw4a16r0QigZ2dHaqrq1v8i7Q5UqkU+/btw4QJEzT+q7Azam2+IpEIOTk5SoMrBAIBoqKi2jzSrqMZ2z4G2j/n+/fvo6ysDF5eXrCwsNB6O7o6zuRyOSQSCQQCgUEV1vZkTDkrjjc3NzccPnxY6bjWthYY/DXJHTt2IC0tDRs3bsSIESOQkZGByMhIlJSUtPtfTaR5ivlrj1LMX4uPj+90hZIYJj8/P/j4+HT4HXcIMfgj7KOPPsLcuXMxc+ZMDBgwABs3boSVlRU+//xzfYdm1Fo7f426Xomu8Pl8eHp6wt/fH56enlQgSYcw6DPJhoYGFBcXK90nkc/nIyIiQuUWTgr19fWor6/nXiu6Z6RSaZPDsFtL8d62bKMzaSlfsViM2traZn9R1dbWoqyszKAGVTTH2PYx0P45S6VSbn6fIfzBpLi6pIjJGBhTznK5HIwx7s5FDx/X2h7jBn1N8saNG+jduzdOnDiB4OBgbvnrr7+Oo0eP4tSpUyrvSU9Px7Jly1SWZ2VlaXzxnBDSNt26dYOLiwvc3d0N+ubfpGtoaGjAtWvXUFlZqfK0lrq6OiQkJHS9a5KaWrx4MdLS0rjXEokE7u7uGDduXJsH7uTm5mLs2LFGMaijpXzFYjGysrJa3E5CQkKnOpM0pn0MtH/O9fX1EIvFsLa2NoinSTDGuGuaign8XZ0x5Xzv3j1YWlriiSeewLFjx5SO65bu3KSOQRfJnj17wsTEBDdv3lRafvPmTbi4uDT5HnNzc5ibm6ssNzU11ckvAV1tp7NQl6+XlxdsbGyaPfAEAgG8vLw63bUjY9vHQPvlzOfzwePxcP/+/Rbv5tIRFN2NPB6v0x2X2jKmnBsbG8Hj8biR1A8f19oe3wZdJM3MzBAUFIS8vDxMnjwZwIMdnpeX12VvpttZKOavNTW6VcHQ5q+RjmdiYgJ7e3vu6RRWVlZ6PZuRy+VoaGjA/fv3jebYNJac5XI5bt++DSsrK5iYmOhsuwZdJAEgLS0NM2bMwNChQzF8+HBkZGTg7t27mDlzpr5DM3p+fn6Ij4/vMvMkSftQ9Pq09TFOusAY47rkunrXo4Ix5czn8yEUCnWap8EXyWeeeQa3b9/G22+/jcrKSgwZMgQ5OTlwdnbWd2gENH+NtIzH48HV1RVOTk56HzkslUpx7NgxhIaGGk2XujHlbGZmBj6fr9PjzOCLJAAkJydT96oBU8xfI6Q5JiYmOu0G0zaGxsZGWFhYdPmCoWCMOesS/blPCCGEqEFFkhBCCFGDiiQhhBCiRqe4JtkWihsKaTuRVEEqlaKurg4SicQo+vWNLV+AcjaGnI0tX4ByfvRmApreZK7LF8mamhoAgLu7u54jIYQQom81NTWws7NrdXuDvnerLsjlcty4caPNt2RS3N7u2rVrbbq9XWdhbPkClLMx5Gxs+QKUsyJnxe35evXqpdEUtS5/Jsnn8+Hm5qaz7QkEAqM50ADjyxegnI2BseULUM4ANDqDVKCBO4QQQogaVCQJIYQQNahItpK5uTmWLl3a5BNGuiJjyxegnI2BseULUM5t1eUH7hBCCCHaojNJQgghRA0qkoQQQogaVCQJIYQQNahIEkIIIWpQkWyFdevWwdPTExYWFhgxYgR++uknfYfUbtLT08Hj8ZR+fH199R2WTh07dgzR0dHo1asXeDwe9u7dq7SeMYa3334brq6usLS0REREBK5cuaKfYHWgpXwTExNV9nlUVJR+gtWRFStWYNiwYbC1tYWTkxMmT56MkpISpTb3799HUlISevToARsbG8TFxeHmzZt6irhtWpNvWFiYyn6eP3++niJuuw0bNiAgIIC7YUBwcDD279/PrdfV/qUi2YIdO3YgLS0NS5cuxc8//4zBgwcjMjISt27d0ndo7WbgwIGoqKjgfgoLC/Udkk7dvXsXgwcPxrp165pcv3r1aqxZswYbN27EqVOnYG1tjcjISNy/f7+DI9WNlvIFgKioKKV9vn379g6MUPeOHj2KpKQknDx5Erm5uZBKpRg3bhzu3r3LtXn11Vfx/fffY9euXTh69Chu3LiB2NhYPUatvdbkCwBz585V2s+rV6/WU8Rt5+bmhpUrV6K4uBhnzpzB6NGjERMTg19++QWADvcvI80aPnw4S0pK4l7LZDLWq1cvtmLFCj1G1X6WLl3KBg8erO8wOgwAtmfPHu61XC5nLi4u7IMPPuCW3blzh5mbm7Pt27frIULdejRfxhibMWMGi4mJ0Us8HeXWrVsMADt69Chj7ME+NTU1Zbt27eLaiEQiBoAVFRXpK0ydeTRfxhgbNWoUS0lJ0V9QHaB79+7ss88+0+n+pTPJZjQ0NKC4uBgRERHcMj6fj4iICBQVFekxsvZ15coV9OrVC3369MH06dMhFov1HVKHKSsrQ2VlpdI+t7Ozw4gRI7r0Ps/Pz4eTkxN8fHzw4osvoqqqSt8h6VR1dTUAwMHBAQBQXFwMqVSqtJ99fX0hFAq7xH5+NF+Fr776Cj179sSgQYOwePFi1NXV6SM8nZPJZPj6669x9+5dBAcH63T/dvkbnLfFX3/9BZlMBmdnZ6Xlzs7O+PXXX/UUVfsaMWIEtmzZAh8fH1RUVGDZsmUICQnBpUuXYGtrq+/w2l1lZSUANLnPFeu6mqioKMTGxsLLywulpaV48803MX78eBQVFcHExETf4bWZXC5HamoqnnzySQwaNAjAg/1sZmYGe3t7pbZdYT83lS8AJCQkwMPDA7169cKFCxfwxhtvoKSkBNnZ2XqMtm0uXryI4OBg3L9/HzY2NtizZw8GDBiAc+fO6Wz/UpEkSsaPH8/9OyAgACNGjICHhwd27tyJ2bNn6zEy0l6mTZvG/dvf3x8BAQHo27cv8vPzMWbMGD1GphtJSUm4dOlSl7u2ro66fOfNm8f929/fH66urhgzZgxKS0vRt2/fjg5TJ3x8fHDu3DlUV1dj9+7dmDFjBo4eParTz6Du1mb07NkTJiYmKiOibt68CRcXFz1F1bHs7e3Rv39/XL16Vd+hdAjFfjXmfd6nTx/07NmzS+zz5ORk/PDDDzhy5IjSI/NcXFzQ0NCAO3fuKLXv7PtZXb5NGTFiBAB06v1sZmaGfv36ISgoCCtWrMDgwYORmZmp0/1LRbIZZmZmCAoKQl5eHrdMLpcjLy8PwcHBeoys49TW1qK0tBSurq76DqVDeHl5wcXFRWmfSyQSnDp1ymj2+Z9//omqqqpOvc8ZY0hOTsaePXtw+PBheHl5Ka0PCgqCqamp0n4uKSmBWCzulPu5pXybcu7cOQDo1Pv5UXK5HPX19brdv7odW9T1fP3118zc3Jxt2bKFXb58mc2bN4/Z29uzyspKfYfWLl577TWWn5/PysrK2PHjx1lERATr2bMnu3Xrlr5D05mamhp29uxZdvbsWQaAffTRR+zs2bPsjz/+YIwxtnLlSmZvb8++/fZbduHCBRYTE8O8vLzYvXv39By5dprLt6amhi1YsIAVFRWxsrIydujQIRYYGMi8vb3Z/fv39R261l588UVmZ2fH8vPzWUVFBfdTV1fHtZk/fz4TCoXs8OHD7MyZMyw4OJgFBwfrMWrttZTv1atX2fLly9mZM2dYWVkZ+/bbb1mfPn1YaGioniPX3qJFi9jRo0dZWVkZu3DhAlu0aBHj8Xjs4MGDjDHd7V8qkq2wdu1aJhQKmZmZGRs+fDg7efKkvkNqN8888wxzdXVlZmZmrHfv3uyZZ55hV69e1XdYOnXkyBEGQOVnxowZjLEH00CWLFnCnJ2dmbm5ORszZgwrKSnRb9Bt0Fy+dXV1bNy4cczR0ZGZmpoyDw8PNnfu3E7/R2BT+QJgmzdv5trcu3ePvfTSS6x79+7MysqKTZkyhVVUVOgv6DZoKV+xWMxCQ0OZg4MDMzc3Z/369WMLFy5k1dXV+g28DWbNmsU8PDyYmZkZc3R0ZGPGjOEKJGO627/0qCxCCCFEDbomSQghhKhBRZIQQghRg4okIYQQogYVSUIIIUQNKpKEEEKIGlQkCSGEEDWoSBJCCCFqUJEkhBBC1KAiSUgHSU9Px5AhQzR6D4/Hw969e9slHl0oLy8Hj8fj7gNKSFdDRZIQLfB4vGZ/0tPTVd6zYMECpRsu60JiYiJ4PB5WrlyptHzv3r3g8Xg6/SxCjBE9T5IQLVRUVHD/3rFjB95++22UlJRwy2xsbLh/M8Ygk8lgY2OjtFxXLCwssGrVKrzwwgvo3r27zrevDw0NDTAzM9N3GITQmSQh2nBxceF+7OzswOPxuNe//vorbG1tsX//fgQFBcHc3ByFhYUq3a2nT5/G2LFj0bNnT9jZ2WHUqFH4+eefNY4lIiICLi4uWLFihdo2TXX1ZmRkwNPTk3udmJiIyZMn4/3334ezszPs7e2xfPlyNDY2YuHChXBwcICbmxs2b96ssv1ff/0VTzzxBCwsLDBo0CCVB99eunQJ48ePh42NDZydnfHcc8/hr7/+4taHhYUhOTkZqamp6NmzJyIjIzX+HghpD1QkCWknixYtwsqVKyESiRAQEKCyvqamBjNmzEBhYSFOnjwJb29vTJgwATU1NRp9jomJCd5//32sXbsWf/75Z5tiPnz4MG7cuIFjx47ho48+wtKlSzFp0iR0794dp06dwvz58/HCCy+ofM7ChQvx2muv4ezZswgODkZ0dDSqqqoAAHfu3MHo0aPx2GOP4cyZM8jJycHNmzcRHx+vtI2tW7fCzMwMx48fx8aNG9uUByG6QkWSkHayfPlyjB07Fn379oWDg4PK+tGjR+Nf//oXfH194efnh//973+oq6tTOQtrjSlTpmDIkCFYunRpm2J2cHDAmjVr4OPjg1mzZsHHxwd1dXV488034e3tjcWLF8PMzAyFhYVK70tOTkZcXBz8/PywYcMG2NnZYdOmTQCATz75BI899hjef/99+Pr64rHHHsPnn3+OI0eO4LfffuO24e3tjdWrV8PHxwc+Pj5tyoMQXaEiSUg7GTp0aLPrb968iblz58Lb2xt2dnYQCASora2FWCzW6vNWrVqFrVu3QiQSafV+ABg4cCD4/P/7teDs7Ax/f3/utYmJCXr06IFbt24pve/hp71369YNQ4cO5eI4f/48jhw5wl2TtbGxga+vLwCgtLSUe19QUJDWcRPSXmjgDiHtxNrautn1M2bMQFVVFTIzM+Hh4QFzc3MEBwejoaFBq88LDQ1FZGQkFi9ejMTERKV1fD4fjz46ViqVqmzD1NRU6TWPx2tymVwub3VctbW1iI6OxqpVq1TWubq6cv9u6fsiRB+oSBKiJ8ePH8f69esxYcIEAMC1a9eUBrNoY+XKlRgyZIhKd6WjoyMqKyvBGOOmhuhybuPJkycRGhoKAGhsbERxcTGSk5MBAIGBgfjmm2/g6emJbt3oVw7pXKi7lRA98fb2xrZt2yASiXDq1ClMnz4dlpaWbdqmv78/pk+fjjVr1igtDwsLw+3bt7F69WqUlpZi3bp12L9/f5s+62Hr1q3Dnj178OuvvyIpKQn//PMPZs2aBQBISkrC33//jWeffRanT59GaWkpDhw4gJkzZ0Imk+ksBkLaAxVJQvRk06ZN+OeffxAYGIjnnnsOr7zyCpycnNq83eXLl6t0h/r5+WH9+vVYt24dBg8ejJ9++gkLFixo82cprFy5EitXrsTgwYNRWFiI7777Dj179gQA9OrVC8ePH4dMJsO4cePg7++P1NRU2NvbK13/JMQQ8dijFyoIIYQQAoDOJAkhhBC1qEgSQgghalCRJIQQQtSgIkkIIYSoQUWSEEIIUYOKJCGEEKIGFUlCCCFEDSqShBBCiBpUJAkhhBA1qEgSQgghalCRJIQQQtT4f/vMJmwbuC7rAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "trial_logs = compiled_program.trial_logs\n", + "\n", + "# Extracting trial numbers, scores, and pruning status\n", + "trial_numbers = list(trial_logs.keys())\n", + "scores = [trial_logs[trial]['score'] for trial in trial_numbers]\n", + "pruning_status = [trial_logs[trial]['pruned'] for trial in trial_numbers]\n", + "\n", + "# Plot setup\n", + "plt.figure(figsize=(5, 3))\n", + "\n", + "# Plotting each point\n", + "for trial_number, score, pruned in zip(trial_numbers, scores, pruning_status):\n", + " if pruned:\n", + " plt.scatter(trial_number, score, color='grey', label='Pruned Trial' if 'Pruned Trial' not in plt.gca().get_legend_handles_labels()[1] else \"\")\n", + " else:\n", + " plt.scatter(trial_number, score, color='green', label='Successful Trial' if 'Successful Trial' not in plt.gca().get_legend_handles_labels()[1] else \"\")\n", + "\n", + "plt.xlabel('Trial Number')\n", + "plt.ylabel('Score')\n", + "plt.title('Trial Scores with Pruning Status')\n", + "plt.grid(True)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also visualize the best prompts discovered by MIPRO as our trials progress... " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Basline program | Score: 0:\n", + "Prompt 1 Instruction: Given the fields `context`, `question`, produce the fields `search_query`.\n", + "Prompt 2 Instruction: Given the fields `context`, `question`, produce the fields `answer`.\n", + "Prompt 3 Instruction: Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.\n", + "\n", + "----------------\n", + "Best program after 0 trials | Score: 24.0:\n", + "Prompt 1 Instruction: Given the fact-based nature of the questions related to pop culture, history, and entertainment, with a focus on identifying specific works or individuals, accurately retrieve and synthesize relevant information.\n", + "Prompt 2 Instruction: Given the fields `context` containing information about a specific topic, and `question` containing a fact-based question, generate the field `answer` with the specific information that directly addresses the question.\n", + "Prompt 3 Instruction: For a given question and list of passages, rank the passages in order of helpfulness in answering the question, with the most helpful passage first and the least helpful last. Provide a comma-separated list of the passage numbers in the ranked order.\n", + "\n", + "Best program after 5 trials | Score: 31.0:\n", + "Prompt 1 Instruction: Given the fact-based nature of the questions related to pop culture, history, and entertainment, with a focus on identifying specific works or individuals, accurately retrieve and synthesize relevant information.\n", + "Prompt 2 Instruction: Given the fields `context`, `question`, produce the fields `answer`.\n", + "Prompt 3 Instruction: Given a fact-based question, identify the specific work, individual, or event being referenced, and provide a concise and accurate answer based on the information provided in the passages.\n", + "\n", + "Best program after 10 trials | Score: 41.4:\n", + "Prompt 1 Instruction: Given the fields `context`, `question`, produce the fields `search_query`.\n", + "Prompt 2 Instruction: Given a fact-based question related to pop culture, history, or entertainment, along with a corresponding context, identify and provide a concise answer that directly corresponds to the specific question posed.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment and a list of relevant passages, identify and rank the passages in order of relevance to the question. Return a comma-separated list of the passage numbers, with the most relevant passage number first and the least relevant last.\n", + "\n", + "Best program after 15 trials | Score: 42.4:\n", + "Prompt 1 Instruction: Given the fields `context`, `question`, produce the fields `search_query`.\n", + "Prompt 2 Instruction: Given the context and question about a specific event or individual, generate a concise and precise answer that directly addresses the question.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment and a list of relevant passages, identify and rank the passages in order of relevance to the question. Return a comma-separated list of the passage numbers, with the most relevant passage number first and the least relevant last.\n", + "\n", + "Best program after 20 trials | Score: 42.4:\n", + "Prompt 1 Instruction: Given the fields `context`, `question`, produce the fields `search_query`.\n", + "Prompt 2 Instruction: Given the context and question about a specific event or individual, generate a concise and precise answer that directly addresses the question.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment and a list of relevant passages, identify and rank the passages in order of relevance to the question. Return a comma-separated list of the passage numbers, with the most relevant passage number first and the least relevant last.\n", + "\n", + "Best program after 25 trials | Score: 42.4:\n", + "Prompt 1 Instruction: Given the fields `context`, `question`, produce the fields `search_query`.\n", + "Prompt 2 Instruction: Given the context and question about a specific event or individual, generate a concise and precise answer that directly addresses the question.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment and a list of relevant passages, identify and rank the passages in order of relevance to the question. Return a comma-separated list of the passage numbers, with the most relevant passage number first and the least relevant last.\n", + "\n" + ] + } + ], + "source": [ + "best_score = 0\n", + "\n", + "def get_signature(predictor):\n", + " if (hasattr(predictor, 'extended_signature')):\n", + " return predictor.extended_signature\n", + " elif (hasattr(predictor, 'signature')):\n", + " return predictor.signature\n", + "\n", + "print(f\"Basline program | Score: {best_score}:\")\n", + "for i,predictor in enumerate(program.predictors()):\n", + " print(f\"Prompt {i+1} Instruction: {get_signature(predictor).instructions}\")\n", + "print() \n", + "\n", + "print(\"----------------\")\n", + "\n", + "for trial_num in compiled_program.trial_logs:\n", + " program_score = compiled_program.trial_logs[trial_num][\"score\"]\n", + " program_pruned = compiled_program.trial_logs[trial_num][\"pruned\"]\n", + " if program_score > best_score and not program_pruned:\n", + " best_score = program_score\n", + " best_program_so_far = compiled_program.trial_logs[trial_num][\"program\"]\n", + " if trial_num % 5 == 0:\n", + " print(f\"Best program after {trial_num} trials | Score: {best_score}:\")\n", + " for i,predictor in enumerate(best_program_so_far.predictors()):\n", + " print(f\"Prompt {i+1} Instruction: {get_signature(predictor).instructions}\")\n", + " print() " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "dspy_test", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 3b7f285a3078c12338df9e6c45202f26a741bec6 Mon Sep 17 00:00:00 2001 From: Michael Ryan Date: Thu, 7 Mar 2024 01:06:38 -0800 Subject: [PATCH 137/243] Changed MIPRO and COPRO fields --- dspy/teleprompt/copro_optimizer.py | 8 +-- dspy/teleprompt/mipro_optimizer.py | 65 ++++++++--------------- dspy/teleprompt/signature_opt.py | 12 ++++- dspy/teleprompt/signature_opt_bayesian.py | 32 ++++++++++- 4 files changed, 69 insertions(+), 48 deletions(-) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index f1c303a47c..4cd7e3ba3a 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -13,7 +13,7 @@ teleprompter = COPRO(prompt_model=prompt_model, metric=metric, breadth=BREADTH, depth=DEPTH, init_temperature=INIT_TEMPERATURE) kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) -compiled_prompt_opt = teleprompter.compile(program.deepcopy(), devset=devset[:DEV_NUM], eval_kwargs=kwargs) +compiled_prompt_opt = teleprompter.compile(program.deepcopy(), trainset=trainset[:DEV_NUM], eval_kwargs=kwargs) eval_score = evaluate(compiled_prompt_opt, devset=evalset[:EVAL_NUM], **kwargs) Note that this teleprompter takes in the following parameters: @@ -109,10 +109,10 @@ def _set_signature(self, predictor, updated_signature): predictor.signature = updated_signature - def compile(self, student, *, devset, eval_kwargs): + def compile(self, student, *, trainset, eval_kwargs): """student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" module = student.deepcopy() - evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) + evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) total_calls = 0 results_best = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} results_latest = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} @@ -179,7 +179,7 @@ def compile(self, student, *, devset, eval_kwargs): if self.verbose: print(f"Predictor {i}") self._print_signature(predictor) if self.verbose: print(f"At Depth {d}/{self.depth}, Evaluating Prompt Candidate #{c_i}/{len(candidates_)} for Predictor {p_i} of {len(module.predictors())}.") - score = evaluate(module_clone, devset=devset, **eval_kwargs) + score = evaluate(module_clone, devset=trainset, **eval_kwargs) if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") total_calls += 1 if self.verbose: print("----------------") diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 5dfd691cab..e96ba9d7ec 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -23,9 +23,9 @@ from dspy.teleprompt import MIPROOptimizer -teleprompter = MIPROOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, n=10, init_temperature=1.0) +teleprompter = MIPROOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, num_candidates=10, init_temperature=1.0) kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) -compiled_prompt_opt = teleprompter.compile(program, devset=devset[:DEV_NUM], trials_num=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) +compiled_prompt_opt = teleprompter.compile(program, trainset=trainset[:TRAIN_NUM], num_trials=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) eval_score = evaluate(compiled_prompt_opt, devset=evalset[:EVAL_NUM], **kwargs) Note that this teleprompter takes in the following parameters: @@ -33,7 +33,7 @@ * prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). * task_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). * metric: The task metric used for optimization. -* n: The number of new prompts and sets of fewshot examples to generate and evaluate. Default=10. +* num_candidates: The number of new prompts and sets of fewshot examples to generate and evaluate. Default=10. * init_temperature: The temperature used to generate new prompts. Higher roughly equals more creative. Default=1.0. * verbose: Tells the method whether or not to print intermediate steps. * track_stats: Tells the method whether or not to track statistics about the optimization process. @@ -105,8 +105,8 @@ class DatasetDescriptorWithPriorObservations(dspy.Signature): observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add") class MIPRO(Teleprompter): - def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): - self.n = n + def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, num_candidates=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): + self.n = num_candidates self.metric = metric self.init_temperature = init_temperature self.prompt_model = prompt_model if prompt_model is not None else dspy.settings.lm @@ -279,43 +279,24 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo return candidates, evaluated_candidates - def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, trials_num=None, optuna_trials_num=None): + def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, num_trials=None): # Define ANSI escape codes for colors YELLOW = '\033[93m' BLUE = '\033[94m' BOLD = '\033[1m' ENDC = '\033[0m' # Resets the color to default - # Check if both trials_num and optuna_trials_num are None - if trials_num is None and optuna_trials_num is None: - raise ValueError(f"{YELLOW}{BOLD}You must specify the number of trials using the 'trials_num' parameter.{ENDC}") - - # Check if the deprecated parameter is used - if optuna_trials_num is not None: - print("in it!") - # Issue a deprecation warning - warnings.warn( - "`trials_num` is deprecated and will be removed in a future version. " - "Use `trials_num` instead.", - DeprecationWarning - ) - # Use trials_num as a fallback if trials_num is not provided - if trials_num is None: - trials_num = optuna_trials_num - random.seed(seed) - estimated_task_model_calls_wo_module_calls = len(devset) * trials_num # M * T * P + estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P estimated_prompt_model_calls = 10 + self.n * len(student.predictors()) # num data summary calls + N * P - - user_message = textwrap.dedent(f"""\ {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: - {YELLOW}- Task Model: {BLUE}{BOLD}{len(devset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{trials_num}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} + {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{num_trials}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.n}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} @@ -326,7 +307,7 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, For a preliminary estimate of potential costs, we recommend you perform your own calculations based on the task and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: - {YELLOW}- Reducing the number of trials (`trials_num`), the size of the trainset, or the number of LM calls in your program.{ENDC} + {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC} To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. @@ -348,7 +329,7 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, else: # Set up program and evaluation function module = student.deepcopy() - evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) + evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) # In the case where the bootstrapped and labeled demos are set to 0, we'll stil bootstrap examples to use in our meta prompt if max_bootstrapped_demos==0 and max_labeled_demos==0: #TODO: address case when max_bootstrapped alone is 0 @@ -371,10 +352,10 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, # Create a new basic bootstrap few - shot program . rng = random.Random(i) - shuffled_devset = devset[:] # Create a copy of devset - rng.shuffle(shuffled_devset) # Shuffle the copy + shuffled_trainset = trainset[:] # Create a copy of devset + rng.shuffle(shuffled_trainset) # Shuffle the copy tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings) - candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_devset) + candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_trainset) # Store the candidate demos for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): @@ -383,7 +364,7 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, demo_candidates[id(module_p)].append(candidate_p.demos) # Generate N candidate prompts - instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, devset) + instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, trainset) # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples if max_bootstrapped_demos==0 and max_labeled_demos==0: @@ -397,7 +378,7 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, trial_logs = {} # Define our trial objective - def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, devset): + def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, trainset): def objective(trial): nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables candidate_program = baseline_program.deepcopy() @@ -444,17 +425,17 @@ def objective(trial): # Evaluate with the new prompts total_score = 0 batch_size = 100 - num_batches = math.ceil(len(devset) / batch_size) + num_batches = math.ceil(len(trainset) / batch_size) for i in range(num_batches): start_index = i * batch_size - end_index = min((i + 1) * batch_size, len(devset)) - split_dev = devset[start_index:end_index] - split_score = evaluate(candidate_program, devset=split_dev, display_table=0) + end_index = min((i + 1) * batch_size, len(trainset)) + split_trainset = trainset[start_index:end_index] + split_score = evaluate(candidate_program, devset=split_trainset, display_table=0) if self.verbose: print(f"{i}st split score: {split_score}") - total_score += split_score * len(split_dev) - curr_weighted_avg_score = total_score / min((i+1)*100,len(devset)) + total_score += split_score * len(split_trainset) + curr_weighted_avg_score = total_score / min((i+1)*100,len(trainset)) if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") trial.report(curr_weighted_avg_score, i) @@ -487,10 +468,10 @@ def objective(trial): return objective # Run the trial - objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, devset) + objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) sampler = optuna.samplers.TPESampler(seed=seed) study = optuna.create_study(direction="maximize", sampler=sampler) - score = study.optimize(objective_function, n_trials=trials_num) + score = study.optimize(objective_function, n_trials=num_trials) if best_program is not None and self.track_stats: best_program.trial_logs = trial_logs diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index 04da45a9c9..bacd01d6cc 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -1,4 +1,5 @@ from .copro_optimizer import COPRO +import warnings """ =============================================================== DEPRECATED!!! @@ -32,4 +33,13 @@ class SignatureOptimizer(COPRO): def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): - super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) \ No newline at end of file + # warnings.warn( + # "`SignatureOptimizer` is deprecated and will be removed in a future version. " + # "Use `COPRO` instead.", + # DeprecationWarning + # ) + print(u"\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") + super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) + + def compile(self, student, *, devset, eval_kwargs): + super().compile(student, trainset=devset, eval_kwargs=eval_kwargs) \ No newline at end of file diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index a4091eafac..4e6076dc50 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -1,4 +1,5 @@ from dspy.teleprompt.mipro_optimizer import MIPRO +import warnings """ =============================================================== @@ -36,6 +37,35 @@ class BayesianSignatureOptimizer(MIPRO): def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): + # warnings.warn( + # "`BayesianSignatureOptimizer` is deprecated and will be removed in a future version. " + # "Use `MIPRO` instead.", + # DeprecationWarning + # ) print(u"\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") + + super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) + + def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, trials_num=None, optuna_trials_num=None): + # Define ANSI escape codes for colors + YELLOW = '\033[93m' + BLUE = '\033[94m' + BOLD = '\033[1m' + ENDC = '\033[0m' # Resets the color to default - super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) \ No newline at end of file + # Check if both trials_num and optuna_trials_num are None + if trials_num is None and optuna_trials_num is None: + raise ValueError(f"{YELLOW}{BOLD}You must specify the number of trials using the 'trials_num' parameter.{ENDC}") + + # Check if the deprecated parameter is used + if optuna_trials_num is not None: + # Issue a deprecation warning + warnings.warn( + "`optuna_trials_num` is deprecated and will be removed in a future version. " + "Use `trials_num` instead.", + DeprecationWarning + ) + # Use trials_num as a fallback if trials_num is not provided + if trials_num is None: + trials_num = optuna_trials_num + super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=trials_num) \ No newline at end of file From 1a80a9bdf0c5798749f6ac21ef910a94643f2719 Mon Sep 17 00:00:00 2001 From: Michael Ryan Date: Thu, 7 Mar 2024 01:44:00 -0800 Subject: [PATCH 138/243] ensure deprecated optimizers return expected output --- dspy/teleprompt/signature_opt.py | 2 +- dspy/teleprompt/signature_opt_bayesian.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index bacd01d6cc..ea4bfa61c1 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -42,4 +42,4 @@ def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_tem super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) def compile(self, student, *, devset, eval_kwargs): - super().compile(student, trainset=devset, eval_kwargs=eval_kwargs) \ No newline at end of file + return super().compile(student, trainset=devset, eval_kwargs=eval_kwargs) \ No newline at end of file diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 4e6076dc50..e1caef71d1 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -68,4 +68,4 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, # Use trials_num as a fallback if trials_num is not provided if trials_num is None: trials_num = optuna_trials_num - super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=trials_num) \ No newline at end of file + return super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=trials_num) \ No newline at end of file From 0c7f82e52ffdd431b509148178b41944f527283d Mon Sep 17 00:00:00 2001 From: chentienan Date: Thu, 7 Mar 2024 18:07:50 +0800 Subject: [PATCH 139/243] FIX: BootstrapFewShotWithRandomSearch.metric_threshold is not set in __init__ * will result in AttributeError: 'BootstrapFewShotWithRandomSearch' object has no attribute 'metric_threshold' when execute `compile` * can be reproduced by executing the following colab's `Compilation With Assertions` part. ** https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/longformqa/longformqa_assertions.ipynb#scrollTo=544FmRbtuzgS --- dspy/teleprompt/random_search.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index e1663cae80..b95bd6e461 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -31,6 +31,7 @@ def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_la self.num_threads = num_threads self.stop_at_score = stop_at_score + self.metric_threshold = metric_threshold self.min_num_samples = 1 self.max_num_samples = max_bootstrapped_demos self.max_errors = max_errors From eb6cf859d27fe99a2cf037f81e206430249b2916 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 10:51:01 -0800 Subject: [PATCH 140/243] additonal updates to notebook, merging in refactor changes --- dspy/teleprompt/mipro_optimizer.py | 12 +- examples/qa/hotpot/hotpotqa_optimized.ipynb | 409 -------------------- 2 files changed, 6 insertions(+), 415 deletions(-) delete mode 100644 examples/qa/hotpot/hotpotqa_optimized.ipynb diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index e96ba9d7ec..2580e09455 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -25,7 +25,7 @@ teleprompter = MIPROOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, num_candidates=10, init_temperature=1.0) kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) -compiled_prompt_opt = teleprompter.compile(program, trainset=trainset[:TRAIN_NUM], num_trials=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) +compiled_prompt_opt = teleprompter.compile(program, trainset=trainset[:TRAIN_NUM], trials_num=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) eval_score = evaluate(compiled_prompt_opt, devset=evalset[:EVAL_NUM], **kwargs) Note that this teleprompter takes in the following parameters: @@ -279,7 +279,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo return candidates, evaluated_candidates - def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, num_trials=None): + def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, trials_num=None): # Define ANSI escape codes for colors YELLOW = '\033[93m' BLUE = '\033[94m' @@ -288,7 +288,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo random.seed(seed) - estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P + estimated_task_model_calls_wo_module_calls = len(trainset) * trials_num # M * T * P estimated_prompt_model_calls = 10 + self.n * len(student.predictors()) # num data summary calls + N * P user_message = textwrap.dedent(f"""\ @@ -296,7 +296,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: - {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{num_trials}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} + {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{trials_num}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.n}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} @@ -307,7 +307,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo For a preliminary estimate of potential costs, we recommend you perform your own calculations based on the task and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: - {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} + {YELLOW}- Reducing the number of trials (`trials_num`), the size of the trainset, or the number of LM calls in your program.{ENDC} {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC} To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. @@ -471,7 +471,7 @@ def objective(trial): objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) sampler = optuna.samplers.TPESampler(seed=seed) study = optuna.create_study(direction="maximize", sampler=sampler) - score = study.optimize(objective_function, n_trials=num_trials) + score = study.optimize(objective_function, n_trials=trials_num) if best_program is not None and self.track_stats: best_program.trial_logs = trial_logs diff --git a/examples/qa/hotpot/hotpotqa_optimized.ipynb b/examples/qa/hotpot/hotpotqa_optimized.ipynb deleted file mode 100644 index c9b793a6a2..0000000000 --- a/examples/qa/hotpot/hotpotqa_optimized.ipynb +++ /dev/null @@ -1,409 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\"DSPy7\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### FAQ 🙋\n", - "#### 1) How does MIPRO work?\n", - "At a high level, the MIPRO program optimizer works by first proposing candidate fewshot example sets and instructions for each prompt in your program, and then optimizing over these fewshot example sets and instructions as hyperparameters for a specified number of trials. Each trial, the optimizer evaluates different combinations of prompts on a train set, which allows it to learn which combinations yield the best performance.\n", - "\n", - "#### 2) How much will MIPRO cost me to run?\n", - "Note that __this notebook__ is free to run, because all LM calls have been cached. However, when using an optimizer on your own program, here is a breakdown of the upper bound of the number of calls to the task model and prompt model respectively:\n", - "\n", - "- **Task model calls**: MIPRO makes up to __O(TxPxM)__ task model calls, where T is the number of trials, P is the number of prompts in the program, and M is the size of the train set. This is because the model is evaluating the program on the train set each trial. In practice, this should be lower given that MIPRO tunes poor trials early (ie. it may stop a trial after running on the first 100 or so examples if performance is poor).\n", - "\n", - "- **Prompt model calls**: MIPRO makes up to N*P+10 prompt model calls, where N is the number of instruction / fewshot example set candidates to generate for each prompt, and P is the number of prompts in the program. The extra 10 calls comes from generating a summary of the data in the training set, which we use in the meta prompt to create better instructions.\n", - "\n", - "#### 3) How should I configure the hyperparameters?\n", - "We have yet to run full hyperparameter sweeps with MIPRO, but based off of initial experimintation, we'd recommend the following:\n", - "- __Trial num__: Gains can be seen after about 20-30 trials. However, 100-200 trials can help with adding on additional marginal gains.\n", - "- __n__: This hyperparameter controls the number of candidate prompts and fewshot example sets that are generated to optimize over. With more trials and less prompts to optimize, we can set n to be higher, as we have more trials to explore different combinations of prompts. If your program has between 2-3 modules and is the `num_trials=30`, we'd recommend ~`n=10`. If n is higher (say `n=100`), then we can go higher to ~`n=15`. If you have a program with only 1 module and are keeping the program 0-shot (ie. no fewshot examples), then `num_trials` should be set to equal `n`, because each trial can explore a new instruction.\n", - "- __Training set size__: Between 200 and 500 training examples are recommended. Increasing the training set size can help prevent overfitting, but adds to the expense to run.\n", - "\n", - "#### 4) What should I do if I want to reduce the cost?\n", - "You can always update hyperparameters accordingly, such as using a smaller train set, using less trials, or using a program with less modules.\n", - "Alternatively, one strategy would be to optimize using a cheaper task model (ie. locally hosted Llama-2), as initial experiments have shown that prompts optimized for a smaller model also transfer to working well on a larger model.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 0] Setup" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from huggingface_hub import hf_hub_download\n", - "import zipfile\n", - "import os\n", - "\n", - "# Define the repository ID on Hugging Face\n", - "repo_id = 'kopsahlong/test3'\n", - "cache_file_path = hf_hub_download(repo_id=repo_id, filename='notebook_cache_v3.zip')\n", - "compiled_program_file_path = hf_hub_download(repo_id=repo_id, filename='compiled_program.pickle')\n", - "# Unzipping the file\n", - "with zipfile.ZipFile(cache_file_path, 'r') as zip_ref:\n", - " zip_ref.extractall(\".\")\n", - "\n", - "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = \"notebook_cache\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# TODO: add in DSPy setup" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will also specify the __prompt LM model__ (in this case GPT 3.5), the __task LM model__ (Llama 13B) and the retrieval model we'll be using for our task (a HotPotQA multihop retrieval task)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os \n", - "import dspy\n", - "import openai\n", - "import os\n", - "\n", - "### NOTE: if you'd like to run this code without a cache, you can remove these lines to configure your OPEN AI key ###\n", - "# os.environ['OPENAI_API_KEY'] = \"TODO: ADD YOUR OPEN AI KEY HERE\"\n", - "# openai.api_key = os.environ.get('OPENAI_API_KEY')\n", - "# openai.api_base = \"https://api.openai.com/v1\"\n", - "\n", - "prompt_model_name = \"gpt-3.5-turbo-1106\"\n", - "task_model_name = \"meta-llama/Llama-2-13b-chat-hf\"\n", - "colbert_v2_endpoint = \"http://20.102.90.50:2017/wiki17_abstracts\"\n", - "\n", - "prompt_model = dspy.OpenAI(model=prompt_model_name, max_tokens=150)\n", - "task_model = dspy.HFClientTGI(model=task_model_name, port=[7140, 7141, 7142, 7143], max_tokens=150)\n", - "\n", - "colbertv2 = dspy.ColBERTv2(url=colbert_v2_endpoint)\n", - "\n", - "dspy.settings.configure(rm=colbertv2, lm=task_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1] Define Task" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, we'll define the program that we'd like to run, which is a multihop [...] (we can say that it was loosely inspired by a certain paper). We additionally load in the data, and define how we'd like to evaluate this task." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from dspy.evaluate import Evaluate\n", - "import re \n", - "from dspy.datasets import HotPotQA\n", - "\n", - "class ReturnRankedDocuments(dspy.Signature):\n", - " \"\"\"Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.\"\"\"\n", - " question = dspy.InputField(desc=\"The question we're trying to answer.\")\n", - " context = dspy.InputField(desc=\"List of potentially related passages.\")\n", - " ranking = dspy.OutputField(desc=\"A comma separated list of numbers corresponding to passage indices, ranked in descending order by their helpfulness in answering our question.\")\n", - "\n", - "class RankingMultiHop(dspy.Module):\n", - " def __init__(self, hops, num_passages_to_retrieve, max_passages_in_context):\n", - " super().__init__()\n", - " self.hops = hops\n", - " self.num_passages_to_retrieve = num_passages_to_retrieve\n", - " self.max_passages_in_context = max_passages_in_context\n", - " self.retrieve = dspy.Retrieve(k = self.num_passages_to_retrieve)\n", - " self.generate_query = dspy.ChainOfThought(\"context ,question->search_query\")\n", - " self.generate_answer = dspy.ChainOfThought(\"context ,question->answer\")\n", - " self.generate_ranking = dspy.ChainOfThought(ReturnRankedDocuments)\n", - " \n", - " def forward(self,question):\n", - " context = []\n", - " full_context = []\n", - " top_context = []\n", - " max_passage_num = self.max_passages_in_context\n", - " for hop in range(self.hops):\n", - " # Get a new query\n", - " query = self.generate_query(context = context, question = question).search_query\n", - " # Get new passages\n", - " context = self.retrieve(query).passages\n", - " # Add these new passages to the previous top context \n", - " full_context = top_context + context\n", - " # Get the most important indices, ranked\n", - " most_important_indices = self.generate_ranking(question=question, context=full_context).ranking\n", - " indices = [int(num) for num in re.findall(r'\\d+', most_important_indices)]\n", - "\n", - " if len(indices) < max_passage_num:\n", - " indices = range(1,max_passage_num+1)\n", - "\n", - " valid_indices = [index-1 for index in indices if index-1 < len(context)]\n", - " top_indices = sorted(valid_indices, key=lambda x: x)[:max_passage_num+1]\n", - " most_important_context_list = [context[idx] for idx in top_indices]\n", - " # Save the top context\n", - " top_context = most_important_context_list\n", - "\n", - " return dspy.Prediction(context=context, answer=self.generate_answer(context = top_context , question = question).answer)\n", - "\n", - "program = RankingMultiHop(hops=4, num_passages_to_retrieve=5, max_passages_in_context=5)\n", - "\n", - "# Load and configure the datasets.\n", - "TRAIN_SIZE = 500\n", - "EVAL_SIZE = 500\n", - "\n", - "hotpot_dataset = HotPotQA(train_seed=1, eval_seed=2023, test_size=0)\n", - "trainset = [x.with_inputs('question') for x in hotpot_dataset.train][:TRAIN_SIZE]\n", - "devset = [x.with_inputs('question') for x in hotpot_dataset.dev][:EVAL_SIZE]\n", - "\n", - "# Set up metrics\n", - "NUM_THREADS = 10\n", - "\n", - "metric = dspy.evaluate.answer_exact_match\n", - "\n", - "# kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=None)\n", - "kwargs = dict(num_threads=NUM_THREADS, display_progress=True)\n", - "evaluate = Evaluate(devset=devset, metric=metric, **kwargs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2] Baseline Evaluation\n", - "Now, we'll quickly evaluate our baseline program so that we can see how the performance using the Prompt Optimizer compares. We should see performance of about __16%__ on our trainset, and __21.4%__ on our devset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "baseline_train_score = evaluate(program,devset=trainset)\n", - "baseline_eval_score = evaluate(program, devset=devset)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3] Optimizing with MIPRO" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### 3a] Compile Program" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import cloudpickle as pickle\n", - "from dspy.teleprompt import BayesianSignatureOptimizer\n", - "\n", - "LOAD_PRECOMPILED_PROGRAM = True\n", - "\n", - "# By default, we will load the precompiled program\n", - "if LOAD_PRECOMPILED_PROGRAM:\n", - " # Load a our precompiled program\n", - " with open(compiled_program_file_path, 'rb') as file:\n", - " # Load the data from the file\n", - " compiled_program = pickle.load(file)\n", - "# Otherwise, if desired, the program can be compiled from scratch \n", - "else:\n", - " # Define hyperparameters:\n", - " N = 10 # The number of instructions and fewshot examples that we will generate and optimize over\n", - " trials = 30 # The number of optimization trials to be run (we will test out a new combination of instructions and fewshot examples in each trial) \n", - " temperature = 1.0 # The temperature configured for generating new instructions\n", - "\n", - " # Compile\n", - " eval_kwargs = dict(num_threads=16, display_progress=True, display_table=0)\n", - " teleprompter = BayesianSignatureOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, n=N, init_temperature=temperature, verbose=True)\n", - " compiled_program = teleprompter.compile(program.deepcopy(), devset=trainset, optuna_trials_num=trials, max_bootstrapped_demos=1,max_labeled_demos=2, eval_kwargs=eval_kwargs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "compiled_program" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### 3b] Evaluate optimized program" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "bayesian_train_score = evaluate(compiled_program, devset=trainset)\n", - "bayesian_eval_score = evaluate(compiled_program, devset=devset)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### 3c] Visualizing scores & prompts over trials" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's take a look at how this optimization looked over the course of each trial. We see that, in general, performance increases as trials go on, until it saturates after ~trial 13. Note that some of the 'pruned' trials have high scores, but were pruned early because they had comparitively lower scores on the easier slices of the data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "trial_logs = compiled_program.trial_logs\n", - "\n", - "# Extracting trial numbers, scores, and pruning status\n", - "trial_numbers = list(trial_logs.keys())\n", - "scores = [trial_logs[trial]['score'] for trial in trial_numbers]\n", - "pruning_status = [trial_logs[trial]['pruned'] for trial in trial_numbers]\n", - "\n", - "# Plot setup\n", - "plt.figure(figsize=(5, 3))\n", - "\n", - "# Plotting each point\n", - "for trial_number, score, pruned in zip(trial_numbers, scores, pruning_status):\n", - " if pruned:\n", - " plt.scatter(trial_number, score, color='grey', label='Pruned Trial' if 'Pruned Trial' not in plt.gca().get_legend_handles_labels()[1] else \"\")\n", - " else:\n", - " plt.scatter(trial_number, score, color='green', label='Successful Trial' if 'Successful Trial' not in plt.gca().get_legend_handles_labels()[1] else \"\")\n", - "\n", - "plt.xlabel('Trial Number')\n", - "plt.ylabel('Score')\n", - "plt.title('Trial Scores with Pruning Status')\n", - "plt.grid(True)\n", - "plt.legend()\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also visualize the best prompts discovered by MIPRO as our trials progress... " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "best_score = 0\n", - "\n", - "def get_signature(predictor):\n", - " if (hasattr(predictor, 'extended_signature')):\n", - " return predictor.extended_signature\n", - " elif (hasattr(predictor, 'signature')):\n", - " return predictor.signature\n", - "\n", - "print(f\"Basline program | Score: {best_score}:\")\n", - "for i,predictor in enumerate(program.predictors()):\n", - " print(f\"Prompt {i+1} Instruction: {get_signature(predictor).instructions}\")\n", - "print() \n", - "\n", - "print(\"----------------\")\n", - "\n", - "for trial_num in compiled_program.trial_logs:\n", - " program_score = compiled_program.trial_logs[trial_num][\"score\"]\n", - " program_pruned = compiled_program.trial_logs[trial_num][\"pruned\"]\n", - " if program_score > best_score and not program_pruned:\n", - " best_score = program_score\n", - " best_program_so_far = compiled_program.trial_logs[trial_num][\"program\"]\n", - " if trial_num % 5 == 0:\n", - " print(f\"Best program after {trial_num} trials | Score: {best_score}:\")\n", - " for i,predictor in enumerate(best_program_so_far.predictors()):\n", - " print(f\"Prompt {i+1} Instruction: {get_signature(predictor).instructions}\")\n", - " print() " - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "dspy_test", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From d366d629b0ecea51d4e2f5c736e54fe8c7ba20cb Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 10:52:04 -0800 Subject: [PATCH 141/243] updates to notebook --- examples/qa/hotpot/hotpotqa_optimized.ipynb | 666 ++++++++++++++++++++ 1 file changed, 666 insertions(+) create mode 100644 examples/qa/hotpot/hotpotqa_optimized.ipynb diff --git a/examples/qa/hotpot/hotpotqa_optimized.ipynb b/examples/qa/hotpot/hotpotqa_optimized.ipynb new file mode 100644 index 0000000000..e97102d971 --- /dev/null +++ b/examples/qa/hotpot/hotpotqa_optimized.ipynb @@ -0,0 +1,666 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "li3F9kMOqZHz" + }, + "source": [ + "\"DSPy7" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3wEDck3ZqZH0" + }, + "source": [ + "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7DzBCQ0UqZH0" + }, + "source": [ + "### FAQ 🙋\n", + "#### 1) How does MIPRO work?\n", + "At a high level, the MIPRO program optimizer works by first __proposing__ candidate fewshot example sets and instructions for each prompt in your program, and then __optimizing__ over these fewshot example sets and instructions as hyperparameters for a specified number of trials. Each trial, the optimizer evaluates different combinations of prompts on a train set, which allows it to learn which combinations yield the best performance.\n", + "\n", + "#### 2) How much will MIPRO cost me to run?\n", + "Note that __this notebook__ is free to run, because all LM calls have been cached. However, when using an optimizer on your own program, here is a breakdown of the upper bound of the number of calls to the task model and prompt model respectively:\n", + "\n", + "- **Task model calls**: MIPRO makes up to __O(TxPxM)__ task model calls, where T is the number of trials, P is the number of prompts in the program, and M is the size of the train set. This is because the model is evaluating the program on the train set each trial. In practice, this should be lower given that MIPRO tunes poor trials early (ie. it may stop a trial after running on the first 100 or so examples if performance is poor).\n", + "\n", + "- **Prompt model calls**: MIPRO makes up to N*P+10 prompt model calls, where N is the number of instruction / fewshot example set candidates to generate for each prompt, and P is the number of prompts in the program. The extra 10 calls comes from generating a summary of the data in the training set, which we use in the meta prompt to create better instructions.\n", + "\n", + "#### 3) How should I configure the hyperparameters?\n", + "We have yet to run full hyperparameter sweeps with MIPRO, but based off of initial experimintation, we'd recommend the following:\n", + "- __Trial num__: Gains can be seen after about 20-30 trials. However, 100-200 trials can help with adding on additional marginal gains.\n", + "- __n__: This hyperparameter controls the number of candidate prompts and fewshot example sets that are generated to optimize over. With more trials and less prompts to optimize, we can set n to be higher, as we have more trials to explore different combinations of prompts. If your program has between 2-3 modules and is the `num_trials=30`, we'd recommend ~`n=10`. If n is higher (say `n=100`), then we can go higher to ~`n=15`. If you have a program with only 1 module and are keeping the program 0-shot (ie. no fewshot examples), then `num_trials` should be set to equal `n`, because each trial can explore a new instruction.\n", + "- __Training set size__: Between 200 and 500 training examples are recommended. Increasing the training set size can help prevent overfitting, but adds to the expense to run.\n", + "\n", + "#### 4) What should I do if I want to reduce the cost?\n", + "You can always update hyperparameters accordingly, such as using a smaller train set, using less trials, or using a program with less modules.\n", + "Alternatively, one strategy would be to optimize using a cheaper task model (ie. locally hosted Llama-2), as initial experiments have shown that prompts optimized for a smaller model also transfer to working well on a larger model.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SgTc-CutqZH1" + }, + "source": [ + "### 0] Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rbGIXWcqqZH1" + }, + "source": [ + "First, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook. We'll also load in our pre optimized program from hugging face to inspect later." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "l4Fsh7EhqZH1", + "outputId": "bc43f9ad-e090-4e5d-bd54-1e998225b44d" + }, + "outputs": [], + "source": [ + "from huggingface_hub import hf_hub_download\n", + "import zipfile\n", + "import os\n", + "\n", + "repo_id = 'kopsahlong/DSPy_MIPRO_notebook_cache'\n", + "cache_file_path = hf_hub_download(repo_id=repo_id, filename='MIPRO_notebook_cache.zip')\n", + "compiled_program_file_path = hf_hub_download(repo_id=repo_id, filename='compiled_program.pickle')\n", + "with zipfile.ZipFile(cache_file_path, 'r') as zip_ref:\n", + " zip_ref.extractall(\".\")\n", + "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = f\"{os.getcwd()}/MIPRO_notebook_cache\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5Vo4Tb9srSow" + }, + "source": [ + "Next, we will install __DSPy__ if it's not there already." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "JpijP_d7qZH2", + "outputId": "daf24b9e-7030-4bf1-a08f-ff8b4ad42e22" + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import sys\n", + "import os\n", + "import regex as re\n", + "\n", + "try: # When on google Colab, let's clone the notebook so we download the cache.\n", + " import google.colab\n", + " repo_path = 'dspy'\n", + "\n", + " !git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path\n", + "except:\n", + " repo_path = '.'\n", + "\n", + "if repo_path not in sys.path:\n", + " sys.path.append(repo_path)\n", + "\n", + "\n", + "import pkg_resources # Install the package if it's not installed\n", + "if not \"dspy-ai\" in {pkg.key for pkg in pkg_resources.working_set}:\n", + " !pip install -U pip\n", + " !pip install dspy-ai\n", + " !pip install openai~=0.28.1\n", + " !pip install -e $repo_path\n", + " !pip install --upgrade cloudpickle==3.0.0\n", + "\n", + "import dspy" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "U-DaFCBvqZH2" + }, + "source": [ + "We will also specify the __prompt LM model__ (in this case GPT 3.5), the __task LM model__ (Llama 13B) and the retrieval model we'll be using for our task (a HotPotQA multihop retrieval task)." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "UHWzGRVgqZH2" + }, + "outputs": [], + "source": [ + "### NOTE: if you'd like to run this code without a cache, you can remove these lines to configure your OPEN AI key ###\n", + "# os.environ['OPENAI_API_KEY'] = \"TODO: ADD YOUR OPEN AI KEY HERE\"\n", + "# openai.api_key = os.environ.get('OPENAI_API_KEY')\n", + "# openai.api_base = \"https://api.openai.com/v1\"\n", + "\n", + "prompt_model_name = \"gpt-3.5-turbo-1106\"\n", + "task_model_name = \"meta-llama/Llama-2-13b-chat-hf\"\n", + "colbert_v2_endpoint = \"http://20.102.90.50:2017/wiki17_abstracts\"\n", + "\n", + "prompt_model = dspy.OpenAI(model=prompt_model_name, max_tokens=150)\n", + "task_model = dspy.HFClientTGI(model=task_model_name, port=[7140, 7141, 7142, 7143], max_tokens=150)\n", + "\n", + "colbertv2 = dspy.ColBERTv2(url=colbert_v2_endpoint)\n", + "\n", + "dspy.settings.configure(rm=colbertv2, lm=task_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BFoPwDrUqZH2" + }, + "source": [ + "### 1] Define Task" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s4Cyb1JtqZH2" + }, + "source": [ + "Here, we'll define the program that we'd like to run, which is a multihop [...] (we can say that it was loosely inspired by a certain paper). We additionally load in the data, and define how we'd like to evaluate this task." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "hiVgd3N7qZH3", + "outputId": "09e1ea66-7c8d-438a-8c37-1ab96fe8cdf0" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/miniconda3/envs/dspy_test/lib/python3.10/site-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n", + " table = cls._concat_blocks(blocks, axis=0)\n" + ] + } + ], + "source": [ + "import re\n", + "from dspy.evaluate import Evaluate\n", + "from dspy.datasets import HotPotQA\n", + "from dsp.utils import EM\n", + "\n", + "class ReturnRankedDocuments(dspy.Signature):\n", + " \"\"\"Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.\"\"\"\n", + " question = dspy.InputField(desc=\"The question we're trying to answer.\")\n", + " context = dspy.InputField(desc=\"List of potentially related passages.\")\n", + " ranking = dspy.OutputField(desc=\"A comma separated list of numbers corresponding to passage indices, ranked in descending order by their helpfulness in answering our question.\")\n", + "\n", + "class RankingMultiHop(dspy.Module):\n", + " def __init__(self, hops, num_passages_to_retrieve, max_passages_in_context):\n", + " super().__init__()\n", + " self.hops = hops\n", + " self.num_passages_to_retrieve = num_passages_to_retrieve\n", + " self.max_passages_in_context = max_passages_in_context\n", + " self.retrieve = dspy.Retrieve(k = self.num_passages_to_retrieve)\n", + " self.generate_query = dspy.ChainOfThought(\"context ,question->search_query\")\n", + " self.generate_answer = dspy.ChainOfThought(\"context ,question->answer\")\n", + " self.generate_ranking = dspy.ChainOfThought(ReturnRankedDocuments)\n", + "\n", + " def forward(self,question):\n", + " context = []\n", + " full_context = []\n", + " top_context = []\n", + " max_passage_num = self.max_passages_in_context\n", + " for hop in range(self.hops):\n", + " # Get a new query\n", + " query = self.generate_query(context = context, question = question).search_query\n", + " # Get new passages\n", + " context = self.retrieve(query).passages\n", + " # Add these new passages to the previous top context\n", + " full_context = top_context + context\n", + " # Get the most important indices, ranked\n", + " most_important_indices = self.generate_ranking(question=question, context=full_context).ranking\n", + " indices = [int(num) for num in re.findall(r'\\d+', most_important_indices)]\n", + "\n", + " if len(indices) < max_passage_num:\n", + " indices = range(1,max_passage_num+1)\n", + "\n", + " valid_indices = [index-1 for index in indices if index-1 < len(context)]\n", + " top_indices = sorted(valid_indices, key=lambda x: x)[:max_passage_num+1]\n", + " most_important_context_list = [context[idx] for idx in top_indices]\n", + " # Save the top context\n", + " top_context = most_important_context_list\n", + "\n", + " return dspy.Prediction(context=context, answer=self.generate_answer(context = top_context , question = question).answer)\n", + "\n", + "program = RankingMultiHop(hops=4, num_passages_to_retrieve=5, max_passages_in_context=5)\n", + "\n", + "# Load and configure the datasets.\n", + "TRAIN_SIZE = 500\n", + "EVAL_SIZE = 500\n", + "\n", + "hotpot_dataset = HotPotQA(train_seed=1, eval_seed=2023, test_size=0)\n", + "trainset = [x.with_inputs('question') for x in hotpot_dataset.train][:TRAIN_SIZE]\n", + "devset = [x.with_inputs('question') for x in hotpot_dataset.dev][:EVAL_SIZE]\n", + "\n", + "# Set up metrics\n", + "NUM_THREADS = 10\n", + "\n", + "metric = dspy.evaluate.answer_exact_match\n", + "\n", + "kwargs = dict(num_threads=NUM_THREADS, display_progress=True)\n", + "evaluate = Evaluate(devset=devset, metric=metric, **kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hRGld1C1qZH3" + }, + "source": [ + "### 2] Baseline Evaluation\n", + "Now, we'll quickly evaluate our baseline program so that we can see how the performance using the Prompt Optimizer compares. We should see performance of about __21.6%__ on our trainset, and __22.6%__ on our devset." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "MU2aHQBTqZH3", + "outputId": "fd60fbb3-ca89-4ecb-911b-24751f220cc6" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 108 / 500 (21.6): 100%|██████████| 500/500 [00:33<00:00, 14.90it/s]\n", + "/lfs/0/kristaoo/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", + " df = df.applymap(truncate_cell)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 108 / 500 (21.6%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 113 / 500 (22.6): 100%|██████████| 500/500 [00:33<00:00, 15.00it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 113 / 500 (22.6%)\n" + ] + } + ], + "source": [ + "baseline_train_score = evaluate(program,devset=trainset)\n", + "baseline_eval_score = evaluate(program, devset=devset)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cjCoL27yqZH3" + }, + "source": [ + "### 3] Optimizing with MIPRO" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eEGOKjXprc7z" + }, + "source": [ + "Now let's get into the key method in this notebook - optimizing our program with MIPRO!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G04L5j9iqZH3" + }, + "source": [ + "#### 3a] Compile Program\n", + "First, we'll get our optimized program. By default, we set `LOAD_PRECOMPILED_PROGRAM` to `True`, so that you can quickly access a program we've precompiled for you. However, if you wish to optimize yourself, `LOAD_PRECOMPILED_PROGRAM` can be set to `False` (though please note that this will require adding in your own LM API keys in the __Setup__ section above).\n", + "\n", + "MIPRO only needs a metric, DSPy module, and training set to see huge gains on your task! You can instantiate a MIPRO Optimizer and compile in just two lines:\n", + "```python\n", + "teleprompter = BayesianSignatureOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, num_candidates=N, init_temperature=temperature)\n", + "compiled_program = teleprompter.compile(program, trainset=trainset, num_trials=trials, max_bootstrapped_demos=1,max_labeled_demos=2, eval_kwargs=eval_kwargs)\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "TpFW7IaYqZH3", + "outputId": "7125bbed-b79e-4178-c390-fc81cc698106" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/dspy/examples/qa/hotpot/MIPRO_notebook_cache/compiler\n" + ] + } + ], + "source": [ + "import cloudpickle as pickle\n", + "from dspy.teleprompt import BayesianSignatureOptimizer\n", + "\n", + "LOAD_PRECOMPILED_PROGRAM = True\n", + "\n", + "# By default, we will load the precompiled program\n", + "if LOAD_PRECOMPILED_PROGRAM:\n", + " # Load a our precompiled program\n", + " with open(compiled_program_file_path, 'rb') as file:\n", + " # Load the data from the file\n", + " compiled_program = pickle.load(file)\n", + "# Otherwise, if desired, the program can be compiled from scratch\n", + "else:\n", + " # Define hyperparameters:\n", + " N = 10 # The number of instructions and fewshot examples that we will generate and optimize over\n", + " trials = 30 # The number of optimization trials to be run (we will test out a new combination of instructions and fewshot examples in each trial) \n", + " temperature = 1.0 # The temperature configured for generating new instructions\n", + "\n", + " # Compile\n", + " eval_kwargs = dict(num_threads=16, display_progress=True, display_table=0)\n", + " teleprompter = MIPRO(prompt_model=prompt_model, task_model=task_model, metric=metric, num_candidates=N, init_temperature=temperature, verbose=True)\n", + " compiled_program = teleprompter.compile(program, trainset=trainset, trials_num=trials, max_bootstrapped_demos=1,max_labeled_demos=2, eval_kwargs=eval_kwargs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uqVVnaEBqZH3" + }, + "source": [ + "#### 3b] Evaluate optimized program\n", + "Now, we evaluate our program that has been optimized with MIPRO. We see that performance on train and dev have improved by __+20pt__ and __+17.8pt__ respectively!" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VvnBp7huqZH3", + "outputId": "fdba6cfe-51b4-4ea1-f5af-9c2a5fed2064" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 208 / 500 (41.6): 100%|██████████| 500/500 [00:32<00:00, 15.27it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 208 / 500 (41.6%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 202 / 500 (40.4): 100%|██████████| 500/500 [00:33<00:00, 15.04it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 202 / 500 (40.4%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "bayesian_train_score = evaluate(compiled_program, devset=trainset)\n", + "bayesian_eval_score = evaluate(compiled_program, devset=devset)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "j3UWn_UnqZH4" + }, + "source": [ + "#### 3c] Visualizing scores & prompts over trials" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pBYTLTwWqZH4" + }, + "source": [ + "Now, let's take a look at how this optimization looked over the course of each trial. We see that, in general, __performance increases overtime__, until it saturates after ~trial #13. Note that some of the 'pruned' trials have high scores, but were pruned early because they had comparitively lower scores on the easier slices of the data." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 333 + }, + "id": "rtMUNeicqZH4", + "outputId": "453b554f-4654-4184-d125-758288ddd6ee" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAckAAAE8CAYAAACrYErbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAABL40lEQVR4nO3deVyU1f4H8M+A7MOAKJuxqgio4IJaZCCaC26pYGZ6u+JWFpRmWnq7Jtri0q1cMuqWqTfDXC5atxQ1V3BLyTWJlNCxRDFKB0TZ5vz+8DXzcxwGmGGGGZjP+/XipfOs3zPPzHyf5zznnEcihBAgIiIiLTbmDoCIiMhSMUkSERHpwCRJRESkA5MkERGRDkySREREOjBJEhER6cAkSUREpAOTJBERkQ5MkkRERDowSVKt4uLiEBcXZ9C6EokEqampRo3HmunzfkokEqSkpJg2oEa2du1aSCQSXLp0ydyhkBVhkmzmJBJJvf72799vthhv3LiB6dOnIywsDE5OTvDy8kKvXr3w2muvobS01GxxWbrDhw8jNTUVN2/eNOp2L126pPHZsLW1RUBAAEaNGoVTp04ZdV9NSXZ2NgYPHoyHHnoIjo6OCAgIwPDhw5Genq5epqysDKmpqQ36PpnquJJhWpg7ADKtL774QuP1f/7zH+zevVtrenh4eI3r79q1y2SxAcCff/6JHj16QKFQYNKkSQgLC0NxcTHOnDmDtLQ0PP/885BKpSaNoam4c+cOWrT4/6/s4cOHsWDBAiQlJcHd3d3o+3v66acxZMgQVFdXIzc3F2lpadixYweOHj2Krl27Gn1/dXnmmWcwduxYODg4NPq+N2/ejKeeegpdu3bF9OnT0bJlSxQUFODgwYP49NNPMW7cOAD3kuSCBQsAwOAaGFMfV9IPk2Qz97e//U3j9dGjR7F7926t6Q8qKyuDs7Mz7O3tTRkeVq9eDblcjkOHDuHRRx/VmKdQKEy+//vdvn0bLi4ujbY/fTk6Ojbq/rp3767xOenduzeeeOIJpKWl4ZNPPqlxHVO+h7a2trC1tTXJtuuSmpqKjh074ujRo1qfyaKiIrPERI2D1a2EuLg4dO7cGTk5OYiNjYWzszP+8Y9/qOfdf0ZcUVGBN954A1FRUXBzc4OLiwtiYmKwb98+g/adn58PW1tbPPLII1rzZDKZVmI4duwYhgwZgpYtW8LFxQWRkZFYvny5xjJ79+5FTEwMXFxc4O7ujhEjRiA3N1djmdTUVEgkEpw/fx7jxo1Dy5Yt8dhjj6nnr1+/HlFRUXBycoKHhwfGjh2LK1euaGzjwoULSExMhI+PDxwdHeHn54exY8fi1q1bOsu7YsUK2NraalSlvffee5BIJJg5c6Z6WnV1NVxdXfHaa6+pp91/TzI1NRWzZ88GAAQHB6urRh+8X7dt2zZ07twZDg4O6NSpEzIzM3XGVpd+/foBAAoKCgD8/z3CAwcO4IUXXoCXlxf8/PwAAElJSQgKCtLahup9v5/q/mldsdZ0TzIoKAjDhg1DdnY2evXqBUdHR7Rt2xb/+c9/tPZ95swZ9OnTB05OTvDz88Nbb72FNWvW1Os+Z35+Pnr27FnjSZuXlxeAe9XUnp6eAIAFCxaoj4nqmJ05cwZJSUlo27YtHB0d4ePjg0mTJqG4uFjj/dF1XFXV4GvXrtWK4cH71SUlJZgxYwaCgoLg4OAALy8vDBgwAD/++GOt5SRtvJIkAEBxcTEGDx6MsWPH4m9/+xu8vb1rXE6hUOCzzz7D008/jalTp6KkpASrV6/GoEGD8MMPP+hdDRcYGIjq6mp88cUXmDBhQq3L7t69G8OGDYOvry+mT58OHx8f5Obm4ttvv8X06dMBAN9//z0GDx6Mtm3bIjU1FXfu3MHKlSvRu3dv/Pjjj1o/3E8++SRCQkLwzjvvQPXUuLfffhvz5s3DmDFjMGXKFNy4cQMrV65EbGwsTp48CXd3d1RUVGDQoEEoLy/Hiy++CB8fH/z+++/49ttvcfPmTbi5udVYhpiYGCiVSmRnZ2PYsGEAgKysLNjY2CArK0u93MmTJ1FaWorY2Ngat5OQkIBffvkFGzZswAcffIDWrVsDgPpHGrh3Dy0jIwMvvPACXF1dsWLFCiQmJkIul6NVq1a1vtc1yc/PBwCtdV944QV4enrijTfewO3bt/XebkNjvXjxIkaPHo3JkydjwoQJ+Pzzz5GUlISoqCh06tQJAPD777+jb9++kEgkmDt3LlxcXPDZZ5/Vu+o2MDAQe/bswW+//aY+EXiQp6en+hbBqFGjkJCQAACIjIwEcO/z++uvv2LixInw8fHBTz/9hH//+9/46aefcPToUUgkklqP640bN+oVKwBMmzYNW7ZsQUpKCjp27Iji4mJkZ2cjNzcX3bt3r/d2CIAgq5KcnCwePOx9+vQRAMTHH3+stXyfPn1Enz591K+rqqpEeXm5xjJ//fWX8Pb2FpMmTdKYDkDMnz+/1niuXbsmPD09BQARFhYmpk2bJtLT08XNmzc1lquqqhLBwcEiMDBQ/PXXXxrzlEql+v9du3YVXl5eori4WD3t9OnTwsbGRvz9739XT5s/f74AIJ5++mmNbV26dEnY2tqKt99+W2P62bNnRYsWLdTTT548KQCIzZs311q+B1VXVwuZTCZeffVVdeytWrUSTz75pLC1tRUlJSVCCCHef/99YWNjo1HWB9/Pd999VwAQBQUFWvsBIOzt7cXFixc13gcAYuXKlbXGWFBQIACIBQsWiBs3bohr166J/fv3i27dugkA4r///a8QQog1a9YIAOKxxx4TVVVVGtuYMGGCCAwM1Nq26n03JFbV/u4vb2BgoAAgDh48qJ5WVFQkHBwcxCuvvKKe9uKLLwqJRCJOnjypnlZcXCw8PDx0vof3W716tTrOvn37innz5omsrCxRXV2tsdyNGzd0fu7Lysq0pm3YsEErfl3HVXVc1qxZo7WdB/fp5uYmkpOTay0T1Q+rWwkA4ODggIkTJ9a5nK2trbrKSalU4s8//0RVVRV69OhhUFWOt7c3Tp8+jWnTpuGvv/7Cxx9/jHHjxsHLywtvvvmm+uru5MmTKCgowIwZM7QaM6iq7woLC3Hq1CkkJSXBw8NDPT8yMhIDBgzA9u3btfY/bdo0jdcZGRlQKpUYM2YM/vjjD/Wfj48PQkJC1NXKqivFnTt3oqysrN7ltbGxwaOPPoqDBw8CAHJzc1FcXIw5c+ZACIEjR44AuHd12blz5wY13Ojfvz/atWunfh0ZGQmZTIZff/21XuvPnz8fnp6e8PHxQVxcHPLz87FkyRL1FZLK1KlTG3yvsCGxduzYETExMerXnp6eCA0N1Vg3MzMT0dHRGjUdHh4eGD9+fL3imzRpEjIzMxEXF4fs7Gy8+eabiImJQUhICA4fPlyvbTg5Oan/f/fuXfzxxx/q2wzGrgZ1d3fHsWPHcPXqVaNu1xoxSRIA4KGHHqp3I5l169YhMjISjo6OaNWqFTw9PfHdd9/Vei+uNr6+vkhLS0NhYSHy8vKwYsUKdfXd6tWrAfx/VV/nzp11bufy5csAgNDQUK154eHh+OOPP7SqA4ODgzVeX7hwAUIIhISEwNPTU+MvNzdX3UgjODgYM2fOxGeffYbWrVtj0KBBWLVqVb3eg5iYGOTk5ODOnTvIysqCr68vunfvji5duqirXLOzszV++A0REBCgNa1ly5b466+/6rX+s88+i927d2PPnj3IyclBUVERXn31Va3lHnwPGzvW+qx7+fJltG/fXmu5mqbpMmjQIOzcuRM3b97EwYMHkZycjMuXL2PYsGH1arzz559/Yvr06fD29oaTkxM8PT3V752h3x1dli5dinPnzsHf3x+9evVCampqvU+OSBPvSRIAzbPc2qxfvx5JSUkYOXIkZs+eDS8vL9ja2mLRokXqRGYoiUSCDh06oEOHDhg6dChCQkLw5ZdfYsqUKQ3abm0eLLdSqYREIsGOHTtqvDq6vzvKe++9h6SkJHz99dfYtWsXXnrpJSxatAhHjx7Ved8KAB577DFUVlbiyJEjyMrKUifDmJgYZGVl4eeff8aNGzcanCR1Xd2prs7rEhISgv79+9e5XE2fnQcb56hUV1fXOL0hsTa0nPpydnZGTEwMYmJi0Lp1ayxYsAA7duyo8576mDFjcPjwYcyePRtdu3aFVCqFUqlEfHw8lEplnfvV5z0dM2YMYmJisHXrVuzatQvvvvsulixZgoyMDAwePLh+BSUATJKkpy1btqBt27bIyMjQ+NLOnz/fqPtp27YtWrZsicLCQgBQV8WdO3dO5w93YGAgACAvL09r3s8//4zWrVvX2T2hXbt2EEIgODgYHTp0qDPOiIgIRERE4J///CcOHz6M3r174+OPP8Zbb72lc51evXrB3t4eWVlZyMrKUrdmjI2Nxaeffoo9e/aoX9dG14+mJWjZsmWNneFVV/uNLTAwEBcvXtSaXtM0ffTo0QMA1J9TXcfkr7/+wp49e7BgwQK88cYb6ukXLlzQWlbXNlq2bAkAWu+rrvfU19cXL7zwAl544QUUFRWhe/fuePvtt5kk9cTqVtKL6qz9/rP0Y8eOqe+l6evYsWM1toj84YcfUFxcrK467d69O4KDg7Fs2TKtHwlVLL6+vujatSvWrVunscy5c+ewa9cuDBkypM54EhISYGtriwULFmhdiQgh1M31FQoFqqqqNOZHRETAxsYG5eXlte7D0dERPXv2xIYNGyCXyzWuJO/cuYMVK1agXbt28PX1rXU7qoRviSOztGvXDrdu3cKZM2fU0woLC7F161azxDNo0CAcOXJEY8SgP//8E19++WW91leduDxIdZ9b9Tl1dnYGoH1MavreAMCyZcu0tqnruMpkMrRu3Vp9P1vlo48+0nhdXV2tVX3r5eWFNm3a1PnZJG28kiS9DBs2DBkZGRg1ahSGDh2KgoICfPzxx+jYsaNBQ8h98cUX+PLLLzFq1ChERUXB3t4eubm5+Pzzz+Ho6Kjur2ljY4O0tDQMHz4cXbt2xcSJE+Hr64uff/4ZP/30E3bu3AkAePfddzF48GBER0dj8uTJ6i4gbm5u9Rr3tF27dnjrrbcwd+5cXLp0CSNHjoSrqysKCgqwdetWPPvss5g1axb27t2LlJQUPPnkk+jQoQOqqqrwxRdfwNbWFomJiXXuJyYmBosXL4abmxsiIiIA3PshCw0NRV5eHpKSkurcRlRUFADg9ddfx9ixY2FnZ4fhw4dbxIAIY8eOxWuvvYZRo0bhpZdeQllZGdLS0tChQwez9NV79dVXsX79egwYMAAvvviiugtIQEAA/vzzzzqvykeMGIHg4GAMHz4c7dq1w+3bt/H999/jf//7H3r27Inhw4cDuFf13LFjR2zcuBEdOnSAh4cHOnfujM6dOyM2NhZLly5FZWUlHnroIezatUvd5/R+tR3XKVOmYPHixZgyZQp69OiBgwcP4pdfftFYv6SkBH5+fhg9ejS6dOkCqVSK77//HsePH8d7771npHfUipinUS2Zi64uIJ06dapx+Qe7gCiVSvHOO++IwMBA4eDgILp16ya+/fbbGpv8ox5dQM6cOSNmz54tunfvLjw8PESLFi2Er6+vePLJJ8WPP/6otXx2drYYMGCAcHV1FS4uLiIyMlKrS8P3338vevfuLZycnIRMJhPDhw8X58+f11hG1RXhxo0bNcb13//+Vzz22GPCxcVFuLi4iLCwMJGcnCzy8vKEEEL8+uuvYtKkSaJdu3bC0dFReHh4iL59+4rvv/++1vKqfPfddwKAGDx4sMb0KVOmCABi9erVWuvU9H6++eab4qGHHhI2NjYa3QYA1NgFIDAwUEyYMKHW2FRdDd59991al1N1yTh+/HiN83ft2iU6d+4s7O3tRWhoqFi/fr3OLiD1iVVXF5ChQ4dqrfvg51aIe912YmJihIODg/Dz8xOLFi0SK1asEADEtWvXai3rhg0bxNixY0W7du2Ek5OTcHR0FB07dhSvv/66UCgUGssePnxYREVFCXt7e41j9ttvv4lRo0YJd3d34ebmJp588klx9epVvY5rWVmZmDx5snBzcxOurq5izJgxoqioSGMb5eXlYvbs2aJLly7q70mXLl3ERx99VGsZqWYSIUx0d5uIyMLNmDEDn3zyCUpLS8025B1ZNt6TJCKrcOfOHY3XxcXF+OKLL/DYY48xQZJOvCdJRFYhOjoacXFxCA8Px/Xr17F69WooFArMmzfP3KGRBWOSJCKrMGTIEGzZsgX//ve/IZFI0L17d6xevbrOrjZk3XhPkoiISAfekyQiItKBSZKIiEiHZn9PUqlU4urVq3B1dbXoYbyIiMh0hBAoKSlBmzZtYGNT/+vDZp8kr169Cn9/f3OHQUREFuDKlSu1PoDgQc0+Sbq6ugK498bIZDKDt1NZWYldu3Zh4MCBsLOzM1Z4FsvayguwzNZQZmsrL8Ayq8qsUCjg7++vzgn11eyTpKqKVSaTNThJOjs7QyaTWcUHzdrKC7DM1lBmaysvwDI/WGZ9b7ux4Q4REZEOTJJEREQ6NPvqViJqGqqV1ciSZ6GwpBC+rr6ICYiBrQ3HVCXzYpIkIrPLyM3A9Mzp+E3xm3qan8wPy+OXIyE8wYyRkbVjdSsRmVVGbgZGbxqtkSAB4HfF7xi9aTQycjPMFBkRkyQRmVG1shrTM6dDQHsIadW0GZkzUK2sbuzQiAAwSRKRGWXJs7SuIO8nIHBFcQVZ8qxGjIro/zFJEpHZFJYUGnU5ImNjkiQis/F19TXqckTGxiRJRGYTExADP5kfJKh5FBQJJPCX+SMmIKaRIyO6h0mSiMzG1sYWy+OXA4BWolS9Xha/jP0lyWyYJInIrBLCE7BlzBY8JHtIY7qfzA9bxmxhP0kyK4tJkosXL4ZEIsGMGTPU0+7evYvk5GS0atUKUqkUiYmJuH79uvmCJCKTSAhPwKXpl7Bvwj6kJ6Rj34R9KJhewARp4aqV1dh/aT82nN2A/Zf216urjr7rGLIPY7KIEXeOHz+OTz75BJGRkRrTX375ZXz33XfYvHkz3NzckJKSgoSEBBw6dMhMkRKRqdja2CIuKM7cYVA9GTJKkr7rWMJITGa/kiwtLcX48ePx6aefomXLlurpt27dwurVq/H++++jX79+iIqKwpo1a3D48GEcPXrUjBETEVk3Q0ZJ0ncdSxmJyexXksnJyRg6dCj69++Pt956Sz09JycHlZWV6N+/v3paWFgYAgICcOTIETzyyCM1bq+8vBzl5eXq1wqFAsC954tVVlYaHKdq3YZsoymxtvICLLM1sLbyAsYvc7WyGq/tfA2ONo41zpdAgjk752BI2yHqBlf6rmPIPu5XU5kNLb9ECKE9HlQj+eqrr/D222/j+PHjcHR0RFxcHLp27Yply5YhPT0dEydO1Eh4ANCrVy/07dsXS5YsqXGbqampWLBggdb09PR0ODs7m6QcRERk2crKyjBu3DjcunULMpms3uuZ7UryypUrmD59Onbv3g1Hx5rPFgwxd+5czJw5U/1aoVDA398fAwcO1OuNeVBlZSV2796NAQMGWMXTva2tvADLbA1ltrbyAsYv85bzWzD5m8l1Lrf6idUY3XG0QesYso/71VRmVa2ivsyWJHNyclBUVITu3burp1VXV+PgwYP48MMPsXPnTlRUVODmzZtwd3dXL3P9+nX4+Pjo3K6DgwMcHBy0ptvZ2RnlA2Ks7TQV1lZegGW2BtZWXsB4ZfZ188Ud5Z16Lafan77rGLKPmtxfZkPLbraGO48//jjOnj2LU6dOqf969OiB8ePHq/9vZ2eHPXv2qNfJy8uDXC5HdHS0ucImIrJqhoySpO86ljQSk9mSpKurKzp37qzx5+LiglatWqFz585wc3PD5MmTMXPmTOzbtw85OTmYOHEioqOjdTbaISIi0zJklCR917GkkZjM3gWkNh988AGGDRuGxMRExMbGwsfHBxkZfABrc2DuDsJEZDhDRknSdx1LGYnJ7F1A7rd//36N146Ojli1ahVWrVplnoDIJCyhg3Bjq1ZWI0uehcKSQvi6+iImIIbjkVKTlhCegBGhI/T6XOu7jiH7MDaLSpLU/Kk6CD/4JHpVB+HmOFanNZ4UkHUwZJQkfdcx90hMFl3dSs1LtbIa0zOnayVIAOppMzJnNKuqV0sZNYSIDMMkSY0mS56llSzuJyBwRXEFWfKsRozKdBr7pID3eYmMj9Wt1GgKSwqNupyl0+ekoKHVSazSNQ1T30vmvWrLxyRJjcbX1deoy1m6xjopsMb7vI3B1CcePLFpGljdSo3GkjoIN1R9qjYb46SgMat0rak619T3knmvuulgkqRGY0kdhBsiIzcDQcuD0HddX4zLGIe+6/oiaHmQ1g9bY5wUNNZ93vqWuTkw9YmHNTZga8qYJKlRWUoHYUPpcwXQGCcFjVGla21XPaY+8bC2BmxNHZMkNbqE8ARcmn4J+ybsQ3pCOvZN2IeC6QUWnyANuQIw9UmBqat0rfGqx9QnHtbWgK2pY8MdMgtzdxA2hKGtVU05aoiqSvd3xe81JjIJJPCT+RlcpduYLXQthalPPKytAVtTxyRJVE8NuQIw1UmBqkp39KbRkECikSiNUaVrjVc9pj7xMPX2ybhY3UpNgiW0rLTUKwBTVulaaplNydT3kptLAzZrwStJ0lKtrEa2PBsAkC3PRmxwrFm/sJbSn8ySrwBMVaVryWU2JdWJR02fu2Xxyxr8uTP19sl4mCRJgyohFZcWY0PkBgxNH4pW0lZm6+BsSR3lTV21aYz49KnSrc/JkCWX2dSj1Zj6CRSW8IQLqhurW+vhwR+Tuqr6LKFq0BCW1tTfEltWNvUuLCqqfo9D04cCAIamD9XZ79ESy9xY/TZVJx5PRzyNuKA4oycwU2+fGo5XknXQ98rKUqoG9VVXQpJAghmZMzAidESjfZEttWVlU78CuP/q3MnGST29tqtzSyqzJdUuNDdKpRJyuRwlJSVwdXVFQEAAbGys+1qKSbIW+v6YNOUvryUmJEtuWdkUu7AADTsZsoQyW+LJXHORm5uLzMxMKBQK9TSZTIb4+HiEh4ebMTLzsu5ThFroW9VniVWD+rDEhGSNLStNramP9tLU47dUubm52LRpk0aCBACFQoFNmzYhNzfXTJGZH5OkDvp+GZv6l9cSE1JjDoiu733npsoST4b00dTjt0RKpRKZmZm1LpOZmQmlUtlIEVkWJkkd9P0yNvUvryU+oaOx+pPp04ilsZiq8Zclngzpo6nHb4nkcrnWFeSDFAoF5HJ5I0VkWZgkddD3y9jUv7yW2sHZ1C0rLa1FryomU7XctMSTIX009fgtUUlJiVGXa26YJHXQ98vYHL68ltjUXxWXKQZEt8T7yKZO2pZ6MlRfTT1+S+Tq6mrU5ZobJkkd9P0yNpcvryohfTfuOwDAd+O+s4gndJiiP5ml3UdurKRtqSdD9dXU47c0AQEBkMlktS4jk8kQEBDQSBFZFibJWuj7ZTT0y2tpgw/Y2tjisYDHAACPBTxm8YndUJZ2H7kxk7alngzVV1N93JolsrGxQXx8fK3LxMfHW21/SfaTrIOqE/XBgoNQnFPgu3Hf1TqWqb6drpvq4APNgaXdR27spK06Gdp+bnuTPBmyhH6bzUV4eDjGjBnDfpI1YJKsB31/TOr75W3Kgw80B5Y2eLelJW2yLuHh4QgNDeWIOw+w7tKbkSU2GmlOlEolLl26hLNnz+LSpUs19vGytPvIzaHxF1kW1TBzwL2uHnX1dbSxsUFQUBAiIiIQFBRk9QkS4JWk2VjiMHDNhT7Da93/yKLi0mL1dHM8ssiSn7hBTY/qe1BaWorIyEikp6dDKpVaffWpvniaYCaW1mikuTBkeC1LasTClptkDBxmznh4JWkmvP9kfPUdXis0NFSrGsmSGrFY0hM3qOlpyPeAtDFJmomlNRppDvQZXisoKKhxgjIQW26SoZrT98AS8DTCTCyt0UhzwOG1iPg9MDYmSTPi/Sfj4vBaRPweGBurW82M95+MRzW8Vm1VTdY8vBZZB34PjItXkhbAFOOS3s/Shr0zFQ6vRcTvgbHxXWrmTPnYJUukGl7rwQGbZTIZxowZw/5hZBX4PTAeVrc2Y9Y67B2H1yL6/+9BQUEBzp07h3HjxiE4OJjfAz3x3WqmrH3YOw6vRXTve6C698gTRcPwHWumLO1ZiURETRGTZDPFYe+IiBqOSbKZ4rB3REQNxyTZTPGxS0REDcck2Uxx2Dui5qc+z0kl4zJrkkxLS0NkZCRkMhlkMhmio6OxY8cO9fy7d+8iOTkZrVq1glQqRWJiIq5fv27GiJsWDntH1Hzk5uZi+fLlWLduHTIyMrBu3TosX76cj70yMbP2k/Tz88PixYsREhICIQTWrVuHESNG4OTJk+jUqRNefvllfPfdd9i8eTPc3NyQkpKChIQEHDp0yJxhNykc9o6o6VM9H/JBqudDcoAA0zFrkhw+fLjG67fffhtpaWk4evQo/Pz8sHr1aqSnp6Nfv34AgDVr1iA8PBxHjx7FI488Yo6QmyQ+domo6eLzIc3LYkbcqa6uxubNm3H79m1ER0cjJycHlZWV6N+/v3qZsLAwBAQE4MiRIzqTZHl5OcrLy9WvVYP8VlZWorKy0uD4VOs2ZBtNibWVF2CZrUFTLK9cLkdpaWmtCbC0tBQFBQU1DlreFMvcUDWV2dDyS4QQ2kOyNKKzZ88iOjoad+/ehVQqRXp6OoYMGYL09HRMnDhRI+EBQK9evdC3b18sWbKkxu2lpqZiwYIFWtPT09Ph7OxskjIQEZFlKysrw7hx43Dr1i2tMW1rY/YrydDQUJw6dQq3bt3Cli1bMGHCBBw4cMDg7c2dOxczZ85Uv1YoFPD398fAgQP1emMeVFlZid27d2PAgAGws7MzeDtNhbWVF2CZjV1mpVKJ3377DaWlpZBKpfDz8zN7dWBTPMZyuRzp6el1Ljdu3DidV5JNrcwNVVOZa3t0WG3MniTt7e3Rvn17AEBUVBSOHz+O5cuX46mnnkJFRQVu3rwJd3d39fLXr1+Hj4+Pzu05ODjAwcFBa7qdnZ1RPiDG2k5TYW3lBVhmY8jNzUVmZqbGD5NMJkN8fLxFNDBpSsc4ODgYUqm0zudD1jV4eVMqs7HcX2ZDy25xd3mVSiXKy8sRFRUFOzs77NmzRz0vLy8Pcrkc0dHRZoyQiGqjaon54I+6qiUmuyzoh8+HNC+zXknOnTsXgwcPRkBAAEpKSpCeno79+/dj586dcHNzw+TJkzFz5kx4eHhAJpPhxRdfRHR0NFu2ElkotsQ0DdXzIS356ry5MmuSLCoqwt///ncUFhbCzc0NkZGR2LlzJwYMGAAA+OCDD2BjY4PExESUl5dj0KBB+Oijj8wZMhHVQi6X13nvR6FQQC6XIygoqHGCaib4nFTzMGuSXL16da3zHR0dsWrVKqxataqRIiKihigpKTHqcqRJ9ZxUajw8BSEio3F1dTXqckTmxiRJREYTEBBQZ1crmUxWY1cFIkvEJElERsOWmNTc8JNKREalaon54BWlTCbjQNzU5Jh9MAEian7YEpOaCyZJIjIJtsSk5oCndURERDowSRIREenAJElERKQDkyQREZEOTJJEREQ6MEkSERHpwCRJRESkA5MkERGRDkySREREOjBJEhER6cAkSUREpAOTJBERkQ4NSpIVFRXIy8tDVVWVseIhIiKyGAYlybKyMkyePBnOzs7o1KkT5HI5AODFF1/E4sWLjRogERGRuRiUJOfOnYvTp09j//79cHR0VE/v378/Nm7caLTgiIiIzMmg50lu27YNGzduxCOPPAKJRKKe3qlTJ+Tn5xstOGoalEolH65LRM2SQUnyxo0b8PLy0pp++/ZtjaRJzV9ubi4yMzOhUCjU02QyGeLj4xEeHm7GyIiIGs6g0/0ePXrgu+++U79WJcbPPvsM0dHRxomMLF5ubi42bdqkkSABQKFQYNOmTcjNzTVTZERExmHQleQ777yDwYMH4/z586iqqsLy5ctx/vx5HD58GAcOHDB2jGSBlEolMjMza10mMzMToaGhrHoloibLoF+vxx57DKdPn0ZVVRUiIiKwa9cueHl54ciRI4iKijJ2jGSB5HK51hXkgxQKhbrlMxFRU6T3lWRlZSWee+45zJs3D59++qkpYqI6VCurkSXPQmFJIXxdfRETEANbG9tGjaGkpMSoyxERWSK9ryTt7Ozw3//+1xSxUD1k5GYgaHkQ+q7ri3EZ49B3XV8ELQ9CRm5Go8bh6upq1OWIiCyRQdWtI0eOxLZt24wcCtUlIzcDozeNxm+K3zSm/674HaM3jW7URBkQEACZTFbrMjKZDAEBAY0UERGR8RnUcCckJAQLFy7EoUOHEBUVBRcXF435L730klGCo/9XrazG9MzpEBBa8wQEJJBgRuYMjAgd0ShVrzY2NoiPj8emTZt0LhMfH89GO0TUpBmUJFevXg13d3fk5OQgJydHY55EImGSNIEseZbWFeT9BASuKK4gS56FuKC4RokpPDwcY8aMYT9JImq2DEqSBQUFxo6D6lBYUmjU5YwlPDwcoaGhHHGHiJolg5Lk/YS4V/3HkXZMy9fV16jLGZONjQ2CgoIafb9ERKZm8On+f/7zH0RERMDJyQlOTk6IjIzEF198YczY6D4xATHwk/lBgppPRiSQwF/mj5iAmEaOjIio+TIoSb7//vt4/vnnMWTIEGzatAmbNm1CfHw8pk2bhg8++MDYMRIAWxtbLI9fDgBaiVL1eln8skbvL0lE1JwZVN26cuVKpKWl4e9//7t62hNPPIFOnTohNTUVL7/8stECpP+XEJ6ALWO2YHrmdI1GPH4yPyyLX4aE8AQzRkdE1PwYlCQLCwvx6KOPak1/9NFHUVjYuA1HrE1CeAJGhI4w+4g7RETWwKDq1vbt29fYP27jxo0ICQlpcFBUO1sbW8QFxeHpiKcRFxTHBElUD6rnngL3xh5WKpVmjoiaAoOuJBcsWICnnnoKBw8eRO/evQEAhw4dwp49e2rtXE5EZA6q556WlpYiMjIS6enpkEql7M9LdTLoSjIxMRHHjh1D69atsW3bNmzbtg2tW7fGDz/8gFGjRhk7RiIig/G5p9QQBveTjIqKwvr1640ZCxGRUfG5p9RQBn0qtm/fjp07d2pN37lzJ3bs2NHgoIiIjIHPPaWGMihJzpkzB9XV1VrThRCYM2dOg4MiIjIGPveUGsqgJHnhwgV07NhRa3pYWBguXrzY4KCIiIyBzz2lhjIoSbq5ueHXX3/Vmn7x4kWtx2bVZtGiRejZsydcXV3h5eWFkSNHIi8vT2OZu3fvIjk5Ga1atYJUKkViYiKuX79uSNhEZGX43FNqKIOS5IgRIzBjxgzk5+erp128eBGvvPIKnnjiiXpv58CBA0hOTsbRo0exe/duVFZWYuDAgbh9+7Z6mZdffhn/+9//sHnzZhw4cABXr15FQgJHliGiuqmee1obPveUamNQ69alS5ciPj4eYWFh8PPzAwBcuXIFsbGx+Ne//lXv7TzY6mzt2rXw8vJCTk4OYmNjcevWLaxevRrp6eno168fAGDNmjUIDw/H0aNH8cgjjxgSPhFZkfufe1paWqqezueeUn0YlCTd3Nxw+PBh7N69G6dPn4aTkxO6dOmCmJiGPYHi1q1bAAAPDw8AQE5ODiorK9G/f3/1MmFhYQgICMCRI0dqTJLl5eUoLy9Xv1a1bKusrERlZaXBsanWbcg2mhJrKy/AMjdn7du3xwsvvIDLly8jNzcXTz31FAIDA2FjY9Psy24tx/h+NZXZ0PJLhOqBkPVw5MgRFBcXY9iwYepp69atw/z581FWVoaRI0di5cqVcHBw0DsQpVKJJ554Ajdv3kR2djYAID09HRMnTtRIegDQq1cv9O3bF0uWLNHaTmpqKhYsWKA1PT09Hc7OznrHRURETV9ZWRnGjRuHW7du1Xmf+n56XUkuXLgQcXFx6iR59uxZTJ06FRMmTEB4eDjeffddtGnTBqmpqXoFDwDJyck4d+6cOkEaau7cuZg5c6b6tUKhgL+/PwYOHKjXG/OgyspK7N69GwMGDICdnV2DYmwKrK28AMtsDWW2tvICLLOqzHX1l9VFryR56tQpvPnmm+rXX331FXr16oVPP/0UAODv74/58+frnSRTUlLw7bff4uDBg+p7nADg4+ODiooK3Lx5E+7u7urp169fh4+PT43bcnBwqPFK1s7OzigfEGNtp6mwtvICLLM1sLbyAiyzoWXXq0nXX3/9BW9vb/XrAwcOYPDgwerXPXv2xJUrV+q9PSEEUlJSsHXrVuzduxfBwcEa86OiomBnZ4c9e/aop+Xl5UEulyM6Olqf0ImIiPSmV5L09vZGQUEBAKCiogI//vijRuOZkpISvbJ1cnIy1q9fj/T0dLi6uuLatWu4du0a7ty5A+BeA6HJkydj5syZ2LdvH3JycjBx4kRER0ezZSsREZmcXtWtQ4YMwZw5c7BkyRJs27YNzs7OGi1az5w5g3bt2tV7e2lpaQCAuLg4jelr1qxBUlISAOCDDz6AjY0NEhMTUV5ejkGDBuGjjz7SJ2wiIiKD6JUk33zzTSQkJKBPnz6QSqVYt24d7O3t1fM///xzDBw4sN7bq0/DWkdHR6xatQqrVq3SJ1QiIqIG0ytJtm7dGgcPHsStW7cglUpha2urMX/z5s2QSqVGDZCIiMhcDB5MoCaqQQCIiIiaAw5YSEREpAOTJBERkQ5MkkRERDowSRIREenAJElERKQDkyQREZEOTJJEREQ6MEkSERHpwCRJRESkA5MkERGRDkySREREOjBJEhER6cAkSUREpAOTJBERkQ5MkkRERDowSRIREenAJElERKQDkyQREZEOTJJEREQ6MEkSERHpwCRJRESkA5MkERGRDkySREREOjBJEhER6cAkSUREpAOTJBERkQ5MkkRERDowSRIREenAJElERKQDkyQREZEOTJJEREQ6MEkSERHpwCRJRESkA5MkERGRDkySREREOjBJEhER6cAkSUREpAOTJBERkQ5MkkRERDowSRIREenQwtwBWIrq6mpUVlbqnF9ZWYkWLVrg7t27qK6ubsTIzMPayguYtsx2dnawtbU16jaJyPTMmiQPHjyId999Fzk5OSgsLMTWrVsxcuRI9XwhBObPn49PP/0UN2/eRO/evZGWloaQkBCjxSCEwLVr13Dz5s06l/Px8cGVK1cgkUiMtn9LZW3lBUxfZnd3d/j4+FjN+0nUHJg1Sd6+fRtdunTBpEmTkJCQoDV/6dKlWLFiBdatW4fg4GDMmzcPgwYNwvnz5+Ho6GiUGFQJ0svLC87Ozjp/wJRKJUpLSyGVSmFj0/xrqa2tvIDpyiyEQFlZGYqKigAAvr6+Rts2EZmWWZPk4MGDMXjw4BrnCSGwbNky/POf/8SIESMAAP/5z3/g7e2Nbdu2YezYsQ3ef3V1tTpBtmrVqtZllUolKioq4OjoaBVJw9rKC5i2zE5OTgCAoqIieHl5seqVqImw2HuSBQUFuHbtGvr376+e5ubmhocffhhHjhzRmSTLy8tRXl6ufq1QKADcu9/04D3H8vJyCCHg6OgIpVJZazxCCPW/dS3bHFhbeQHTl9nR0RFCCNy5cwcODg5G374hVN+J2u7HNyfWVl6AZX5wmr4sNkleu3YNAODt7a0x3dvbWz2vJosWLcKCBQu0pu/atQvOzs4a01q0aAEfHx/cvn273m9gSUlJvZZrLqytvIDpylxRUYE7d+7gwIEDqKqqMsk+DLV7925zh9CorK28AMtcVlZm0DYsNkkaau7cuZg5c6b6tUKhgL+/PwYOHAiZTKax7N27d3HlyhVIpdI673EKIVBSUgJXV1eraHhhbeUFTF/mu3fvwsnJCbGxsUa7p95QlZWV2L17NwYMGAA7Oztzh2Ny1lZegGVWlVlVq6gvi02SPj4+AIDr169rNHS4fv06unbtqnM9BweHGquy7OzstD4g1dXVkEgksLGxqfMelKr6TbV8c2fO8h46dAjTpk3Dzz//jKFDh2Lbtm11rpOamopt27bh1KlTBu/3wTJLJBKtFtcNiUG1zZo+i+ZmiTGZkrWVF2CZDS27xf7aBwcHw8fHB3v27FFPUygUOHbsGKKjo80YmfnduHEDzz//PAICAuDg4AAfHx8MGjQIhw4dMndoRjFz5kx07doVBQUFWLt2bYO3l5qaColEUutfTQoLC3U2LCMi62DWK8nS0lJcvHhR/bqgoACnTp2Ch4cHAgICMGPGDLz11lsICQlRdwFp06ZNvc/sG0u1shpZ8iwUlhTC19UXMQExsLUxXevFxMREVFRUYN26dWjbti2uX7+OPXv2oLi42GT7bEz5+fmYNm0a/Pz8jLK9WbNmYdq0aerXPXv2xLPPPoupU6fWuLyqhauqNoOIrJdZryRPnDiBbt26oVu3bgDuXUF069YNb7zxBgDg1VdfxYsvvohnn30WPXv2RGlpKTIzMy3mfg4AZORmIGh5EPqu64txGePQd11fBC0PQkZuhkn2d/PmTWRlZWHJkiXo27cvAgMD0atXL8ydOxdPPPEEAODSpUuQSCQa1X43b96ERCLB/v371dN++uknDBs2DDKZDK6uroiJiUF+fr56/vr16xEREQEHBwf4+voiJSVFY3tTpkyBp6cnZDIZ+vXrh9OnT6vnnz59Gn379oWrqytkMhmioqJw4sQJAMDly5cxfPhwtGzZEi4uLujUqRO2b9+ujru4uBiTJk2CRCLB2rVrsXbtWri7u2u8D9u2bav3fUOpVAofHx/1n62tLVxdXdWvx44di5SUFLz88sto166d+upRIpFoVPW+9tpr6NChA5ydndG2bVvMmzfPqloMElkjs15JxsXFqZvd10QikWDhwoVYuHBhI0ZVfxm5GRi9aTQENMvwu+J3jN40GlvGbEFCuPYgCQ0hlUohlUqxbds2PPLIIwZ3Jfj9998RGxuLuLg47N27FzKZDIcOHVK3ukxLS8Ps2bOxaNEiDBkyBLdu3dKozn3yySfh5OSEHTt2wM3NDZ988gkef/xx/PLLL/Dw8MD48ePRrVs3pKWlwdbWFqdOnVLfE0hOTkZFRQUOHjwIFxcXnD9/HlKpFP7+/igsLERoaCgWLlyIp556Cm5ubti4cWPD37g6rFu3DtOmTUNmZiakUmmNy7i6umLt2rVo06YNzp49i6lTp8LV1RWvvvqqyeMjIvOw2IY7lq5aWY3pmdO1EiQACAhIIMGMzBkYETrCqFWvLVq0wNq1azF16lR8/PHH6N69O/r06YOxY8ciMjKy3ttZtWoV3Nzc8NVXX6mTV4cOHdTz33nnHSQnJ+Oll15SN9zp2bMnACA7Oxs//PADioqK1En6X//6F7Zt24YtW7bg2WefhVwux+zZsxEWFgYAGkMJyuVyJCYmIiIiAgDQtm1b9TzVsG1ubm6NWt0ZEhKCJUuWQKFQaLWCVvnnP/+p/n9QUBBmzZqFr776ikmSqBmz2IY7li5LnoXfFL/pnC8gcEVxBVnyLKPvOzExEVevXsU333yD+Ph47N+/H927d9erkcupU6cQExNTY4uvoqIiXL16FX369Klx3dOnT6O0tBStWrVSX9lKpVIUFBSoq2tnzpyJKVOmoH///li8eLFGNe5LL72Et956C71798b8+fNx5swZ/d4AE4iKiqpzmY0bN6J3797w8fGBVCrFP//5T8jl8kaIjojMhUnSQIUlhUZdTl+Ojo4YMGAA5s2bh8OHDyMpKQnz588HAPWV3/1V2Q/eO1MNk1aT2uYB9xpc+fr64tSpUxp/eXl5mD17NoB7LUp/+uknDB06FHv37kXHjh2xdetWAMCUKVPw66+/4plnnsHZs2fRo0cPrFy5Uuf+bGxstKrljX0v0MXFpdb5R44cwfjx4zFkyBB8++23OHnyJF5//XVUVFQYNQ4isixMkgbyda3fINX1Xa6hOnbsiNu3bwMAPD09AdzrwqDyYN+9yMhIZGVl1ZhsXF1dERQUhAMHDtS4r+7du+PatWto0aIF2rdvr/HXunVr9XIdOnTAyy+/jF27diEhIQFr1qxRz/P398e0adOQkZGBV155BZ9++qnOsnl6eqKkpERdvprKY2qHDx9GYGAgXn/9dfTo0QMhISG4fPlyo8ZARI2PSdJAMQEx8JP5QYKaW1hKIIG/zB8xATFG3W9xcTH69euH9evX48yZMygoKMDmzZuxdOlS9UDwTk5OeOSRR7B48WLk5ubiwIEDGvfTACAlJQUKhQJjx47FiRMncOHCBXzxxRfIy8sDALzxxhtYtWoVVq5ciQsXLuDHH39UX+31798f0dHRGDlyJHbt2oVLly7h8OHDeP3113HixAncuXMHKSkp2L9/Py5fvoxDhw7h+PHjCA8PBwDMmDEDO3fuREFBAX788Ufs27dPPa8mDz/8MJydnfGPf/wD+fn5SE9PN0r/SX2EhIRALpfjq6++Qn5+PlasWKG+Miai5otJ0kC2NrZYHr8cALQSper1svhlRu8vKZVK8fDDD+ODDz5AbGwsOnfujHnz5mHq1Kn48MMP1ct9/vnnqKqqQlRUlLq/6f1atWqFvXv3orS0FH369EFUVBQ+/fRT9T3KCRMm4J133kFaWho6deqEYcOG4cKFC/fKJ5Fg+/btiI2NxcSJE9GhQweMHTsWly9fhre3N2xtbVFcXIy///3v6NChA8aMGYPBgwerx9Strq5GcnIywsPDER8fjw4dOuCjjz7SWWYPDw+sX78e27dvR0REBDZs2IDU1FSjvq91eeKJJ/Dyyy8jJSUFXbt2xeHDhzFv3rxGjYGIGp9E1NYHoxlQKBRwc3PDrVu3ahy7taCgAMHBwXX2vVQqleqWj/cP05aRm4HpmdM1GvH4y/yxLH6Z0bt/NCZd5W3OTF1mfT5vjaWyshLbt2/HkCFDrGLIMmsrL8Ay3z92q65cUBt2AWmghPAEjAgd0agj7hARUeNgkjQCWxtbxAXFmTsMIiIyMuuoRyMiIjIAkyQREZEOTJJEREQ6MEkSERHpwCRJRESkA5MkERGRDkySREREOjBJUqMICgrCsmXLjLa9tWvXwt3d3awxEFHzxyTZBCUlJUEikUAikcDe3h7t27fHwoULUVVVZe7QDBIUFKQuT01/SUlJWus89dRT+OWXXxo/WCKyKhxxxwiUSiXkcjlKSkrg6uqKgIAAk493Gh8fjzVr1qC8vBzbt29HcnIy7OzsMHfuXK1lKyoqYG9vb9J4GuL48eOorq4GcO+RVImJicjLy1OPr/jg8y0rKyvh5ORU53MviYgaileSDZSbm4vly5dj3bp1yMjIwLp167B8+XLk5uaadL8ODg7w8fFBYGAgnn/+efTv3x/ffPMNgHtXmiNHjsTbb7+NNm3aIDQ0FMC9p3ds27ZNYzvu7u7qx05dunQJEokEGRkZePzxx9GmTRt069YNR44c0VgnOzsbMTExcHJygr+/P1566SWNZz0WFRVh+PDhcHJyQnBwML788stay+Lp6QkfHx/4+PjAw8MDAODl5QUfHx/cvXsX7u7u2LhxI/r06QNHR0d8+eWXWtWt+fn5GDFiBLy9vSGVStGzZ098//33hry1RERqTJINkJubi02bNkGhUGhMVygU2LRpk8kT5f2cnJxQUVGhfr1nzx7k5eVh9+7d+Pbbb/Xa1uuvv46ZM2fi4MGDCAkJwdNPP62uys3Pz0d8fDwSExNx5swZbNy4EdnZ2UhJSVGvn5SUhCtXrmDfvn3YsmULPvroIxQVFTWofHPmzMH06dORm5uLQYMGac0vLS3FkCFDsGfPHpw8eRLx8fEYPnw45HJ5g/ZLRNaN1a0GUiqVyMzMrHWZzMxMhIaGmrTqVQiBPXv2YOfOnXjxxRfV011cXPDZZ58ZVM06a9YsDB06FAqFAqmpqYiIiMDFixcRFhaGRYsWYfz48ZgxYwaAew8jXrFiBfr06YO0tDTI5XLs2LEDP/zwA3r27AkAWL16da0PVa6PGTNmICFB96PHunTpgi5duqhfv/nmm9i6dSu++eYbjQRORKQPJkkDyeVyrSvIBykUCsjlcgQFBRl9/99++y2kUikqKyuhVCoxbtw4jQcRR0REGHwfMjIyUv1/X19fAPeqUMPCwnD69GmcOXNGowpVCAGlUomCggL88ssvaNGiBaKiotTzw8LC9G6J+qAePXrUOr+0tBSpqan47rvvUFhYiKqqKty5c4dXkkTUIEySBiopKTHqcvrq27cv0tLSYG9vjzZt2qBFC81D6eLiorWORCLBg8/Yrqys1Fru/gezSiQSAPeunIF7yei5557DSy+9pLVeQECAyVqc1lSe+82aNQu7d+/Gv/71L7Rv3x5OTk4YPXq0RhU0EZG+mCQN5OrqatTl9OXi4oL27dvrtY6npycKCwvVry9cuICysjK9ttG9e3ecP39e577DwsJQVVWFnJwcdXVrXl4ebt68qdd+9HXo0CEkJSVh1KhRAO4l80uXLpl0n0TU/LHhjoECAgLUXRR0kclkCAgIaKSI6tavXz98+OGHOHnyJE6cOIFp06ZpXDXWx2uvvYbDhw8jJSUFp06dwoULF/D111+r7/uFhoYiPj4ezz33HI4dO4acnBxMmTLF5N01QkJCkJGRgVOnTuH06dMYN26c+uqXiMhQTJIGsrGxQXx8fK3LxMfHm7y/pD7ee+89+Pv7IyYmBuPGjcOsWbPg7Oys1zYiIyNx4MAB/PLLL4iJiUG3bt3wxhtvoE2bNupl1qxZgzZt2qBPnz5ISEjAs88+Cy8vL2MXR8P777+Pli1b4tFHH8Xw4cMxaNAgdO/e3aT7JHqQUqnEpUuXcPbsWVy6dIknas2ARDx4k6qZUSgUcHNzw61bt7Su/O7evYuCggIEBwfD0dGx1u0olUooFArIZDKNxJebm4vMzEyNRjwymQzx8fENbtFpTrrK25yZusz6fN4aS2VlJbZv344hQ4boXavQFJmyvJb6W2Btxxioucy15YLa8J5kA4WHhyM0NLTRR9whIsuh6jP9IFWf6TFjxjTpk2ZrxiRpBDY2Nibp5kFEls9S+kyTafCIERE1gD59pqnpYZIkImoAc/eZJtNikgS0OtgTmQI/Z82TuftMk2lZdZJUtXrSt0M9kSFUnzNraWFoLZpin2mqP6tuuGNrawt3d3f1EyqcnZ3Vw7A9SKlUoqKiAnfv3rWKm+/WVl7AdGUWQqCsrAxFRUVwd3eHra2t0bZN5qfqM11T61YVS+szTfVn1UkSAHx8fACgzkc5CSFw584dODk56UykzYm1lRcwfZnd3d3VnzdqXsLDwzFmzBiL7CdJDWP1SVIikcDX1xdeXl41DvatUllZiYMHDyI2NtYqqsusrbyAactsZ2fHK8hmjn2mmyerT5Iqtra2tf6I2draoqqqCo6OjlaRNKytvIB1lpmMi32mmx+e4hAREenAJElERKQDkyQREZEOzf6epKoDd13DRtWlsrISZWVlUCgUVnG/ytrKC7DM1lBmaysvwDLf/xQQQP9BPZp9klQNBeXv72/mSIiIyNxKSkrg5uZW7+Wb/fMklUolrl69CldX1wb1fVMoFPD398eVK1f0ehZZU2Vt5QVYZmsos7WVF2CZVWUWQqCkpARt2rTRq1tOs7+StLGxgZ+fn9G2J5PJrOaDBlhfeQGW2RpYW3kBlhmAXleQKmy4Q0REpAOTJBERkQ5MkvXk4OCA+fPnw8HBwdyhNAprKy/AMlsDaysvwDI3VLNvuENERGQoXkkSERHpwCRJRESkA5MkERGRDkySREREOjBJ1sOqVasQFBQER0dHPPzww/jhhx/MHZLJpKamQiKRaPyFhYWZOyyjOnjwIIYPH442bdpAIpFg27ZtGvOFEHjjjTfg6+sLJycn9O/fHxcuXDBPsEZQV3mTkpK0jnl8fLx5gjWSRYsWoWfPnnB1dYWXlxdGjhyJvLw8jWXu3r2L5ORktGrVClKpFImJibh+/bqZIm6Y+pQ3Li5O6zhPmzbNTBE3XFpaGiIjI9UDBkRHR2PHjh3q+cY6vkySddi4cSNmzpyJ+fPn48cff0SXLl0waNAgFBUVmTs0k+nUqRMKCwvVf9nZ2eYOyahu376NLl26YNWqVTXOX7p0KVasWIGPP/4Yx44dg4uLCwYNGoS7d+82cqTGUVd5ASA+Pl7jmG/YsKERIzS+AwcOIDk5GUePHsXu3btRWVmJgQMH4vbt2+plXn75Zfzvf//D5s2bceDAAVy9ehUJCQlmjNpw9SkvAEydOlXjOC9dutRMETecn58fFi9ejJycHJw4cQL9+vXDiBEj8NNPPwEw4vEVVKtevXqJ5ORk9evq6mrRpk0bsWjRIjNGZTrz588XXbp0MXcYjQaA2Lp1q/q1UqkUPj4+4t1331VPu3nzpnBwcBAbNmwwQ4TG9WB5hRBiwoQJYsSIEWaJp7EUFRUJAOLAgQNCiHvH1M7OTmzevFm9TG5urgAgjhw5Yq4wjebB8gohRJ8+fcT06dPNF1QjaNmypfjss8+Menx5JVmLiooK5OTkoH///uppNjY26N+/P44cOWLGyEzrwoULaNOmDdq2bYvx48dDLpebO6RGU1BQgGvXrmkcczc3Nzz88MPN+pjv378fXl5eCA0NxfPPP4/i4mJzh2RUt27dAgB4eHgAAHJyclBZWalxnMPCwhAQENAsjvOD5VX58ssv0bp1a3Tu3Blz585FWVmZOcIzuurqanz11Ve4ffs2oqOjjXp8m/0A5w3xxx9/oLq6Gt7e3hrTvb298fPPP5spKtN6+OGHsXbtWoSGhqKwsBALFixATEwMzp07B1dXV3OHZ3LXrl0DgBqPuWpecxMfH4+EhAQEBwcjPz8f//jHPzB48GAcOXIEtra25g6vwZRKJWbMmIHevXujc+fOAO4dZ3t7e7i7u2ss2xyOc03lBYBx48YhMDAQbdq0wZkzZ/Daa68hLy8PGRkZZoy2Yc6ePYvo6GjcvXsXUqkUW7duRceOHXHq1CmjHV8mSdIwePBg9f8jIyPx8MMPIzAwEJs2bcLkyZPNGBmZytixY9X/j4iIQGRkJNq1a4f9+/fj8ccfN2NkxpGcnIxz5841u3vruugq77PPPqv+f0REBHx9ffH4448jPz8f7dq1a+wwjSI0NBSnTp3CrVu3sGXLFkyYMAEHDhww6j5Y3VqL1q1bw9bWVqtF1PXr1+Hj42OmqBqXu7s7OnTogIsXL5o7lEahOq7WfMzbtm2L1q1bN4tjnpKSgm+//Rb79u3TeGSej48PKioqcPPmTY3lm/px1lXemjz88MMA0KSPs729Pdq3b4+oqCgsWrQIXbp0wfLly416fJkka2Fvb4+oqCjs2bNHPU2pVGLPnj2Ijo42Y2SNp7S0FPn5+fD19TV3KI0iODgYPj4+GsdcoVDg2LFjVnPMf/vtNxQXFzfpYy6EQEpKCrZu3Yq9e/ciODhYY35UVBTs7Ow0jnNeXh7kcnmTPM51lbcmp06dAoAmfZwfpFQqUV5ebtzja9y2Rc3PV199JRwcHMTatWvF+fPnxbPPPivc3d3FtWvXzB2aSbzyyiti//79oqCgQBw6dEj0799ftG7dWhQVFZk7NKMpKSkRJ0+eFCdPnhQAxPvvvy9OnjwpLl++LIQQYvHixcLd3V18/fXX4syZM2LEiBEiODhY3Llzx8yRG6a28paUlIhZs2aJI0eOiIKCAvH999+L7t27i5CQEHH37l1zh26w559/Xri5uYn9+/eLwsJC9V9ZWZl6mWnTpomAgACxd+9eceLECREdHS2io6PNGLXh6irvxYsXxcKFC8WJEydEQUGB+Prrr0Xbtm1FbGysmSM33Jw5c8SBAwdEQUGBOHPmjJgzZ46QSCRi165dQgjjHV8myXpYuXKlCAgIEPb29qJXr17i6NGj5g7JZJ566inh6+sr7O3txUMPPSSeeuopcfHiRXOHZVT79u0TALT+JkyYIIS41w1k3rx5wtvbWzg4OIjHH39c5OXlmTfoBqitvGVlZWLgwIHC09NT2NnZicDAQDF16tQmfxJYU3kBiDVr1qiXuXPnjnjhhRdEy5YthbOzsxg1apQoLCw0X9ANUFd55XK5iI2NFR4eHsLBwUG0b99ezJ49W9y6dcu8gTfApEmTRGBgoLC3txeenp7i8ccfVydIIYx3fPmoLCIiIh14T5KIiEgHJkkiIiIdmCSJiIh0YJIkIiLSgUmSiIhIByZJIiIiHZgkiYiIdGCSJCIi0oFJkqiRpKamomvXrnqtI5FIsG3bNpPEYwyXLl2CRCJRjwNK1NwwSRIZQCKR1PqXmpqqtc6sWbM0Blw2hqSkJEgkEixevFhj+rZt2yCRSIy6LyJrxOdJEhmgsLBQ/f+NGzfijTfeQF5ennqaVCpV/18IgerqakilUo3pxuLo6IglS5bgueeeQ8uWLY2+fXOoqKiAvb29ucMg4pUkkSF8fHzUf25ubpBIJOrXP//8M1xdXbFjxw5ERUXBwcEB2dnZWtWtx48fx4ABA9C6dWu4ubmhT58++PHHH/WOpX///vDx8cGiRYt0LlNTVe+yZcsQFBSkfp2UlISRI0finXfegbe3N9zd3bFw4UJUVVVh9uzZ8PDwgJ+fH9asWaO1/Z9//hmPPvooHB0d0blzZ60H3547dw6DBw+GVCqFt7c3nnnmGfzxxx/q+XFxcUhJScGMGTPQunVrDBo0SO/3gcgUmCSJTGTOnDlYvHgxcnNzERkZqTW/pKQEEyZMQHZ2No4ePYqQkBAMGTIEJSUleu3H1tYW77zzDlauXInffvutQTHv3bsXV69excGDB/H+++9j/vz5GDZsGFq2bIljx45h2rRpeO6557T2M3v2bLzyyis4efIkoqOjMXz4cBQXFwMAbt68iX79+qFbt244ceIEMjMzcf36dYwZM0ZjG+vWrYO9vT0OHTqEjz/+uEHlIDIWJkkiE1m4cCEGDBiAdu3awcPDQ2t+v3798Le//Q1hYWEIDw/Hv//9b5SVlWldhdXHqFGj0LVrV8yfP79BMXt4eGDFihUIDQ3FpEmTEBoairKyMvzjH/9ASEgI5s6dC3t7e2RnZ2usl5KSgsTERISHhyMtLQ1ubm5YvXo1AODDDz9Et27d8M477yAsLAzdunXD559/jn379uGXX35RbyMkJARLly5FaGgoQkNDG1QOImNhkiQykR49etQ6//r165g6dSpCQkLg5uYGmUyG0tJSyOVyg/a3ZMkSrFu3Drm5uQatDwCdOnWCjc3//yx4e3sjIiJC/drW1hatWrVCUVGRxnr3P+29RYsW6NGjhzqO06dPY9++fep7slKpFGFhYQCA/Px89XpRUVEGx01kKmy4Q2QiLi4utc6fMGECiouLsXz5cgQGBsLBwQHR0dGoqKgwaH+xsbEYNGgQ5s6di6SkJI15NjY2ePDRsZWVlVrbsLOz03gtkUhqnKZUKusdV2lpKYYPH44lS5ZozfP19VX/v673i8gcmCSJzOTQoUP46KOPMGTIEADAlStXNBqzGGLx4sXo2rWrVnWlp6cnrl27BiGEumuIMfs2Hj16FLGxsQCAqqoq5OTkICUlBQDQvXt3/Pe//0VQUBBatOBPDjUtrG4lMpOQkBB88cUXyM3NxbFjxzB+/Hg4OTk1aJsREREYP348VqxYoTE9Li4ON27cwNKlS5Gfn49Vq1Zhx44dDdrX/VatWoWtW7fi559/RnJyMv766y9MmjQJAJCcnIw///wTTz/9NI4fP478/Hzs3LkTEydORHV1tdFiIDIFJkkiM1m9ejX++usvdO/eHc888wxeeukleHl5NXi7Cxcu1KoODQ8Px0cffYRVq1ahS5cu+OGHHzBr1qwG70tl8eLFWLx4Mbp06YLs7Gx88803aN26NQCgTZs2OHToEKqrqzFw4EBERERgxowZcHd317j/SWSJJOLBGxVEREQEgFeSREREOjFJEhER6cAkSUREpAOTJBERkQ5MkkRERDowSRIREenAJElERKQDkyQREZEOTJJEREQ6MEkSERHpwCRJRESkw/8BttvSUsJRp3EAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "trial_logs = compiled_program.trial_logs\n", + "\n", + "# Extracting trial numbers, scores, and pruning status\n", + "trial_numbers = list(trial_logs.keys())\n", + "scores = [trial_logs[trial]['score'] for trial in trial_numbers]\n", + "pruning_status = [trial_logs[trial]['pruned'] for trial in trial_numbers]\n", + "\n", + "# Plot setup\n", + "plt.figure(figsize=(5, 3))\n", + "\n", + "# Plotting each point\n", + "for trial_number, score, pruned in zip(trial_numbers, scores, pruning_status):\n", + " if pruned:\n", + " plt.scatter(trial_number, score, color='grey', label='Pruned Trial' if 'Pruned Trial' not in plt.gca().get_legend_handles_labels()[1] else \"\")\n", + " else:\n", + " plt.scatter(trial_number, score, color='green', label='Successful Trial' if 'Successful Trial' not in plt.gca().get_legend_handles_labels()[1] else \"\")\n", + "\n", + "plt.xlabel('Trial Number')\n", + "plt.ylabel('Score')\n", + "plt.title('Trial Scores with Pruning Status')\n", + "plt.grid(True)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5_Rwxfa1qZH4" + }, + "source": [ + "We can also __visualize the best prompts__ discovered by MIPRO as our trials progress... (though note that score increases are also due to the selected fewshot examples, which are not shown here for conciseness)." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "NnARfPRHqZH4", + "outputId": "9a9a9276-a56d-4f8d-c1d6-218d5d3d12f2" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Basline program | Score: 0:\n", + "Prompt 1 Instruction: Given the fields `context`, `question`, produce the fields `search_query`.\n", + "Prompt 2 Instruction: Given the fields `context`, `question`, produce the fields `answer`.\n", + "Prompt 3 Instruction: Given a question we are trying to answer and a list of passages, return a comma separated list of the numbers associated with each passage. These numbers should be ordered by helpfulness in answering the question, with most helpful passage number first, and the least helpful last.\n", + "\n", + "----------------\n", + "Best program after 0 trials | Score: 35.2:\n", + "Prompt 1 Instruction: Given the fields `context` and `question`, identify the specific information being asked for and produce a concise and accurate response.\n", + "Prompt 2 Instruction: Given a fact-based question related to pop culture, history, or entertainment, identify specific works or individuals and provide a concise answer directly corresponding to the question posed.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment, and a list of passages, rank the passages by their relevance to the question. Return a comma-separated list of the numbers associated with each passage, ordered from the most relevant (helpful) to the least relevant.\n", + "\n", + "Best program after 5 trials | Score: 35.6:\n", + "Prompt 1 Instruction: Given a fact-based question related to pop culture, history, or entertainment, with a focus on identifying specific works or individuals, generate a search query that includes the key elements of the question for precise information retrieval.\n", + "Prompt 2 Instruction: Given the information in the `context`, answer the `question` with a concise and specific response.\n", + "Prompt 3 Instruction: Given a question we are trying to answer and a list of passages, return a comma-separated list of the numbers associated with each passage, ordered by helpfulness in answering the question, with the most helpful passage number first and the least helpful last.\n", + "\n", + "Best program after 10 trials | Score: 38.2:\n", + "Prompt 1 Instruction: Given the varied syntax and focus on identifying specific works or individuals in fact-based questions related to pop culture, history, and entertainment, generate a search query that retrieves precise information relevant to the specific and varied questions posed.\n", + "Prompt 2 Instruction: Given a fact-based question related to pop culture, history, or entertainment and the corresponding context, provide a concise and accurate answer identifying specific works or individuals.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment, and a list of relevant passages, rank the passages by their relevance to the question. Return a comma-separated list of the numbers associated with each passage in order of relevance, with the most helpful passage number first, and the least helpful last.\n", + "\n", + "Best program after 15 trials | Score: 38.2:\n", + "Prompt 1 Instruction: Given the varied syntax and focus on identifying specific works or individuals in fact-based questions related to pop culture, history, and entertainment, generate a search query that retrieves precise information relevant to the specific and varied questions posed.\n", + "Prompt 2 Instruction: Given a fact-based question related to pop culture, history, or entertainment and the corresponding context, provide a concise and accurate answer identifying specific works or individuals.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment, and a list of relevant passages, rank the passages by their relevance to the question. Return a comma-separated list of the numbers associated with each passage in order of relevance, with the most helpful passage number first, and the least helpful last.\n", + "\n", + "Best program after 20 trials | Score: 41.6:\n", + "Prompt 1 Instruction: Given the fields `context` and `question`, determine the specific information being asked and compile a concise search query to retrieve the requested information.\n", + "Prompt 2 Instruction: Given a fact-based question related to pop culture, history, or entertainment, identify and provide the specific individual or work being referred to in the context.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment, and a list of passages, rank the passages in order of relevance to the question. Return a comma-separated list of the corresponding passage numbers, with the most relevant passage first and the least relevant last.\n", + "\n", + "Best program after 25 trials | Score: 41.6:\n", + "Prompt 1 Instruction: Given the fields `context` and `question`, determine the specific information being asked and compile a concise search query to retrieve the requested information.\n", + "Prompt 2 Instruction: Given a fact-based question related to pop culture, history, or entertainment, identify and provide the specific individual or work being referred to in the context.\n", + "Prompt 3 Instruction: Given a fact-based question related to pop culture, history, or entertainment, and a list of passages, rank the passages in order of relevance to the question. Return a comma-separated list of the corresponding passage numbers, with the most relevant passage first and the least relevant last.\n", + "\n" + ] + } + ], + "source": [ + "best_score = 0\n", + "\n", + "def get_signature(predictor):\n", + " if (hasattr(predictor, 'extended_signature')):\n", + " return predictor.extended_signature\n", + " elif (hasattr(predictor, 'signature')):\n", + " return predictor.signature\n", + "\n", + "print(f\"Basline program | Score: {best_score}:\")\n", + "for i,predictor in enumerate(program.predictors()):\n", + " print(f\"Prompt {i+1} Instruction: {get_signature(predictor).instructions}\")\n", + "print()\n", + "\n", + "print(\"----------------\")\n", + "\n", + "for trial_num in compiled_program.trial_logs:\n", + " program_score = compiled_program.trial_logs[trial_num][\"score\"]\n", + " program_pruned = compiled_program.trial_logs[trial_num][\"pruned\"]\n", + " if program_score > best_score and not program_pruned:\n", + " best_score = program_score\n", + " best_program_so_far = compiled_program.trial_logs[trial_num][\"program\"]\n", + " if trial_num % 5 == 0:\n", + " print(f\"Best program after {trial_num} trials | Score: {best_score}:\")\n", + " for i,predictor in enumerate(best_program_so_far.predictors()):\n", + " print(f\"Prompt {i+1} Instruction: {get_signature(predictor).instructions}\")\n", + " print()" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "dspy_test", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 26700f11145c0d1b04bd2dd1662c21313f29c1c1 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 10:53:32 -0800 Subject: [PATCH 142/243] notebook name change --- .../{hotpotqa_optimized.ipynb => hotpotqa_with_MIPRO.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/qa/hotpot/{hotpotqa_optimized.ipynb => hotpotqa_with_MIPRO.ipynb} (100%) diff --git a/examples/qa/hotpot/hotpotqa_optimized.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb similarity index 100% rename from examples/qa/hotpot/hotpotqa_optimized.ipynb rename to examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb From 76e0cb1bf829bf6bf2eb3be3d79b0a20e8d90e7f Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 11:15:30 -0800 Subject: [PATCH 143/243] minor changes to notebook+copro code (better print statements) --- dspy/teleprompt/copro_optimizer.py | 6 +++--- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index 4cd7e3ba3a..9d304cf9ed 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -151,7 +151,7 @@ def compile(self, student, *, trainset, eval_kwargs): # For each iteration in depth... for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors - if self.verbose: print(f"Starting iteration {d}/{self.depth}.") + print(f"Iteration Depth: {d+1}/{self.depth}.") latest_scores = [] @@ -176,9 +176,9 @@ def compile(self, student, *, trainset, eval_kwargs): # Score the instruction / prefix if self.verbose: print("----------------") for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i}") + if self.verbose: print(f"Predictor {i+1}") self._print_signature(predictor) - if self.verbose: print(f"At Depth {d}/{self.depth}, Evaluating Prompt Candidate #{c_i}/{len(candidates_)} for Predictor {p_i} of {len(module.predictors())}.") + print(f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.") score = evaluate(module_clone, devset=trainset, **eval_kwargs) if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") total_calls += 1 diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index e97102d971..3c1df4f37d 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -15,7 +15,8 @@ "id": "3wEDck3ZqZH0" }, "source": [ - "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy" + "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy\n", + "" ] }, { @@ -136,7 +137,8 @@ " !pip install -e $repo_path\n", " !pip install --upgrade cloudpickle==3.0.0\n", "\n", - "import dspy" + "import dspy\n", + "import openai" ] }, { @@ -392,7 +394,7 @@ ], "source": [ "import cloudpickle as pickle\n", - "from dspy.teleprompt import BayesianSignatureOptimizer\n", + "from dspy.teleprompt import MIPRO\n", "\n", "LOAD_PRECOMPILED_PROGRAM = True\n", "\n", From f71416e90330e7139e679f22d63dbbb7378a34b1 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 19:22:36 +0000 Subject: [PATCH 144/243] Automatic Style fixes --- dspy/teleprompt/__init__.py | 12 ++++++------ dspy/teleprompt/mipro_optimizer.py | 12 +++++------- dspy/teleprompt/signature_opt.py | 5 +++-- dspy/teleprompt/signature_opt_bayesian.py | 7 ++++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/dspy/teleprompt/__init__.py b/dspy/teleprompt/__init__.py index ba14c1b940..b8b9226338 100644 --- a/dspy/teleprompt/__init__.py +++ b/dspy/teleprompt/__init__.py @@ -1,11 +1,11 @@ -from .teleprompt import * from .bootstrap import * -from .vanilla import * -from .random_search import * +from .copro_optimizer import COPRO from .finetune import * -from .teleprompt_optuna import * from .knn_fewshot import * +from .mipro_optimizer import MIPRO +from .random_search import * from .signature_opt import SignatureOptimizer from .signature_opt_bayesian import BayesianSignatureOptimizer -from .mipro_optimizer import MIPRO -from .copro_optimizer import COPRO \ No newline at end of file +from .teleprompt import * +from .teleprompt_optuna import * +from .vanilla import * diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 2580e09455..eb2de8ed6a 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -1,7 +1,8 @@ import math import random -from collections import defaultdict +import sys import textwrap +from collections import defaultdict import optuna @@ -12,9 +13,6 @@ from dspy.signatures.signature import signature_to_template from dspy.teleprompt import BootstrapFewShot from dspy.teleprompt.teleprompt import Teleprompter -import sys -import warnings - """ USAGE SUGGESTIONS: @@ -344,7 +342,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo for i in range(self.n): if i == 0: # Story empty set of demos as default for index 0 for module_p in module.predictors(): - if id(module_p) not in demo_candidates.keys(): + if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append([]) else: @@ -359,7 +357,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo # Store the candidate demos for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): - if id(module_p) not in demo_candidates.keys(): + if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append(candidate_p.demos) @@ -442,7 +440,7 @@ def objective(trial): # Handle pruning based on the intermediate value. if trial.should_prune(): - print(f"Trial pruned.") + print("Trial pruned.") trial_logs[trial_num]["score"] = curr_weighted_avg_score trial_logs[trial_num]["pruned"] = True trial_num += 1 diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index ea4bfa61c1..e3cc931e07 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -1,5 +1,6 @@ + from .copro_optimizer import COPRO -import warnings + """ =============================================================== DEPRECATED!!! @@ -38,7 +39,7 @@ def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_tem # "Use `COPRO` instead.", # DeprecationWarning # ) - print(u"\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") + print("\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) def compile(self, student, *, devset, eval_kwargs): diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index e1caef71d1..995378cf16 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -1,6 +1,7 @@ -from dspy.teleprompt.mipro_optimizer import MIPRO import warnings +from dspy.teleprompt.mipro_optimizer import MIPRO + """ =============================================================== DEPRECATED!!! @@ -42,7 +43,7 @@ def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10 # "Use `MIPRO` instead.", # DeprecationWarning # ) - print(u"\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") + print("\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) @@ -63,7 +64,7 @@ def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, warnings.warn( "`optuna_trials_num` is deprecated and will be removed in a future version. " "Use `trials_num` instead.", - DeprecationWarning + DeprecationWarning, ) # Use trials_num as a fallback if trials_num is not provided if trials_num is None: From e155a45eaf7c845f2e242d184ad1df2f19f00714 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Fri, 8 Mar 2024 02:01:50 +0530 Subject: [PATCH 145/243] Add local language model folder --- .../local_language_model_clients/HFModel.md | 7 +++ docs/api/local_language_model_clients/MLC.md | 41 +++++++++++++ .../local_language_model_clients/Ollama.md | 45 ++++++++++++++ docs/api/local_language_model_clients/TGI.md | 61 +++++++++++++++++++ .../_category_.json | 8 +++ docs/api/local_language_model_clients/vLLM.md | 31 ++++++++++ 6 files changed, 193 insertions(+) create mode 100644 docs/api/local_language_model_clients/HFModel.md create mode 100644 docs/api/local_language_model_clients/MLC.md create mode 100644 docs/api/local_language_model_clients/Ollama.md create mode 100644 docs/api/local_language_model_clients/TGI.md create mode 100644 docs/api/local_language_model_clients/_category_.json create mode 100644 docs/api/local_language_model_clients/vLLM.md diff --git a/docs/api/local_language_model_clients/HFModel.md b/docs/api/local_language_model_clients/HFModel.md new file mode 100644 index 0000000000..162238ae59 --- /dev/null +++ b/docs/api/local_language_model_clients/HFModel.md @@ -0,0 +1,7 @@ +# dspy.HFModel + +Initialize `HFModel` within your program with the desired model to load in. Here's an example call: + +```python +llama = dspy.HFModel(model = 'meta-llama/Llama-2-7b-hf') +``` \ No newline at end of file diff --git a/docs/api/local_language_model_clients/MLC.md b/docs/api/local_language_model_clients/MLC.md new file mode 100644 index 0000000000..6a36f374bc --- /dev/null +++ b/docs/api/local_language_model_clients/MLC.md @@ -0,0 +1,41 @@ +# dspy.ChatModuleClient + +## Prerequisites + +1. Install the required packages using the following commands: + + ```shell + pip install --no-deps --pre --force-reinstall mlc-ai-nightly-cu118 mlc-chat-nightly-cu118 -f https://mlc.ai/wheels + pip install transformers + git lfs install + ``` + + Adjust the pip wheels according to your OS/platform by referring to the provided commands in [MLC packages](https://mlc.ai/package/). + +## Running MLC Llama-2 models + +1. Create a directory for prebuilt models: + + ```shell + mkdir -p dist/prebuilt + ``` + +2. Clone the necessary libraries from the repository: + + ```shell + git clone https://github.com/mlc-ai/binary-mlc-llm-libs.git dist/prebuilt/lib + cd dist/prebuilt + ``` + +3. Choose a Llama-2 model from [MLC LLMs](https://huggingface.co/mlc-ai) and clone the model repository: + + ```shell + git clone https://huggingface.co/mlc-ai/mlc-chat-Llama-2-7b-chat-hf-q4f16_1 + ``` + +4. Initialize the `ChatModuleClient` within your program with the desired parameters. Here's an example call: + + ```python + llama = dspy.ChatModuleClient(model='dist/prebuilt/mlc-chat-Llama-2-7b-chat-hf-q4f16_1', model_path='dist/prebuilt/lib/Llama-2-7b-chat-hf-q4f16_1-cuda.so') + ``` +Please refer to the [official MLC repository](https://github.com/mlc-ai/mlc-llm) for more detailed information and [documentation](https://mlc.ai/mlc-llm/docs/get_started/try_out.html). diff --git a/docs/api/local_language_model_clients/Ollama.md b/docs/api/local_language_model_clients/Ollama.md new file mode 100644 index 0000000000..9a063e6616 --- /dev/null +++ b/docs/api/local_language_model_clients/Ollama.md @@ -0,0 +1,45 @@ +# dspy.OllamaLocal + +:::note +Adapted from documentation provided by https://github.com/insop +::: + +Ollama is a good software tool that allows you to run LLMs locally, such as Mistral, Llama2, and Phi. +The following are the instructions to install and run Ollama. + +### Prerequisites + +Install Ollama by following the instructions from this page: + +- https://ollama.ai + +Download model: `ollama pull` + +Download a model by running the `ollama pull` command. You can download Mistral, Llama2, and Phi. + +```bash +# download mistral +ollama pull mistral +``` + +Here is the list of other models you can download: +- https://ollama.ai/library + +### Running Ollama model + +Run model: `ollama run` + +You can test a model by running the model with the `ollama run` command. + +```bash +# run mistral +ollama run mistral +``` + +### Sending requests to the server + +Here is the code to load a model through Ollama: + +```python +lm = dspy.OllamaLocal(model='mistral') +``` diff --git a/docs/api/local_language_model_clients/TGI.md b/docs/api/local_language_model_clients/TGI.md new file mode 100644 index 0000000000..0b8311cc8b --- /dev/null +++ b/docs/api/local_language_model_clients/TGI.md @@ -0,0 +1,61 @@ +# dspy.HFClientTGI + +## Prerequisites + +- Docker must be installed on your system. If you don't have Docker installed, you can get it from [here](https://docs.docker.com/get-docker/). + +## Setting up the Text-Generation-Inference Server + +1. Clone the Text-Generation-Inference repository from GitHub by executing the following command: + + ``` + git clone https://github.com/huggingface/text-generation-inference.git + ``` + +2. Change into the cloned repository directory: + + ``` + cd text-generation-inference + ``` + +3. Execute the Docker command under the "Get Started" section to run the server: + + + ``` + model=meta-llama/Llama-2-7b-hf # set to the specific Hugging Face model ID you wish to use. + num_shard=2 # set to the number of shards you wish to use. + volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run + + docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:0.9 --model-id $model --num-shard $num_shard + ``` + + This command will start the server and make it accessible at `http://localhost:8080`. + +If you want to connect to [Meta Llama 2 models](https://huggingface.co/meta-llama), make sure to use version 9.3 (or higher) of the docker image (ghcr.io/huggingface/text-generation-inference:0.9.3) and pass in your huggingface token as an environment variable. + +``` + docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data -e HUGGING_FACE_HUB_TOKEN={your_token} ghcr.io/huggingface/text-generation-inference:0.9.3 --model-id $model --num-shard $num_shard +``` + +## Sending requests to the server + +After setting up the text-generation-inference server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientTGI`. + +Initialize the `HFClientTGI` within your program with the desired parameters. Here is an example call: + + ```python + lm = dspy.HFClientTGI(model="meta-llama/Llama-2-7b-hf", port=8080, url="http://localhost") + ``` + + Customize the `model`, `port`, and `url` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. + + +### FAQs + +1. If your model doesn't require any shards, you still need to set a value for `num_shard`, but you don't need to include the parameter `--num-shard` on the command line. + +2. If your model runs into any "token exceeded" issues, you can set the following parameters on the command line to adjust the input length and token limit: + - `--max-input-length`: Set the maximum allowed input length for the text. + - `--max-total-tokens`: Set the maximum total tokens allowed for text generation. + +Please refer to the [official Text-Generation-Inference repository](https://github.com/huggingface/text-generation-inference) for more detailed information and documentation. diff --git a/docs/api/local_language_model_clients/_category_.json b/docs/api/local_language_model_clients/_category_.json new file mode 100644 index 0000000000..8965dcf411 --- /dev/null +++ b/docs/api/local_language_model_clients/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Local Language Model Clients", + "position": 6, + "link": { + "type": "generated-index", + "description": "DSPy supports various methods including `built-in wrappers`, `server integration`, and `external package integration` for model loading. This documentation provides a concise introduction on how to load in models within DSPy extending these capabilities for your specific needs." + } +} \ No newline at end of file diff --git a/docs/api/local_language_model_clients/vLLM.md b/docs/api/local_language_model_clients/vLLM.md new file mode 100644 index 0000000000..6658addd2b --- /dev/null +++ b/docs/api/local_language_model_clients/vLLM.md @@ -0,0 +1,31 @@ +# dspy.HFClientVLLM + +### Setting up the vLLM Server + +Follow these steps to set up the vLLM Server: + +1. Build the server from source by following the instructions provided in the [Build from Source guide](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source). + +2. Start the server by running the following command, and specify your desired model, host, and port using the appropriate arguments. The default server address is http://localhost:8000. + +Example command: + +```bash + python -m vllm.entrypoints.openai.api_server --model mosaicml/mpt-7b --port 8000 +``` + +This will launch the vLLM server. + +### Sending requests to the server + +After setting up the vLLM server and ensuring that it displays "Connected" when it's running, you can interact with it using the `HFClientVLLM`. + +Initialize the `HFClientVLLM` within your program with the desired parameters. Here is an example call: + +```python + lm = dspy.HFClientVLLM(model="mosaicml/mpt-7b", port=8000, url="http://localhost") +``` + +Customize the `model`, `port`, `url`, and `max_tokens` according to your requirements. The `model` parameter should be set to the specific Hugging Face model ID you wish to use. + +Please refer to the [official vLLM repository](https://github.com/vllm-project/vllm) for more detailed information and documentation. From 189f858a0524b6ea2852d268175f8976a7f82fc1 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 7 Mar 2024 14:24:16 -0800 Subject: [PATCH 146/243] Fixed multi-module typed signature optimizer --- dspy/functional/functional.py | 3 + dspy/primitives/module.py | 74 +- dspy/teleprompt/signature_opt_typed.py | 4 +- examples/functional/signature_opt_typed.ipynb | 861 +++++------------- intro.ipynb | 125 ++- tests/functional/test_functional.py | 4 +- tests/primitives/test_program.py | 87 +- 7 files changed, 401 insertions(+), 757 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 44f9f5a5d5..d24dd4d9d7 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -82,6 +82,9 @@ def __init__(self, signature, max_retries=3): def copy(self) -> "TypedPredictor": return TypedPredictor(self.signature, self.max_retries) + def __repr__(self): + return f"TypedPredictor({self.signature})" + @staticmethod def _make_example(type_) -> str: # Note: DSPy will cache this call so we only pay the first time TypedPredictor is called. diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 90908e5c3a..33690c9242 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -1,4 +1,5 @@ import copy +from collections import deque from collections.abc import Generator import ujson @@ -9,45 +10,48 @@ def __init__(self): pass def named_parameters(self): - """ - Unlike PyTorch, handles (non-recursive) lists of parameters too. - """ - + """Unlike PyTorch, handles lists of parameters too.""" from dspy.predict.parameter import Parameter - visited = set() - named_parameters = [] - - def add_parameter(param_name, param_value): - if isinstance(param_value, Parameter) and id(param_value) not in visited: - visited.add(id(param_value)) - named_parameters.append((param_name, param_value)) - - for name, value in self.__dict__.items(): - if isinstance(value, Parameter): - add_parameter(name, value) + # Remove the 'self.' prefix from the names + return [(name[5:], param) for name, param in self.named_sub_modules(Parameter)] - elif isinstance(value, BaseModule): - # When a sub-module is pre-compiled, keep it frozen. - if not getattr(value, "_compiled", False): - for sub_name, param in value.named_parameters(): - add_parameter(f"{name}.{sub_name}", param) + def named_sub_modules(self, type_=None, skip_compiled=False) -> Generator[tuple[str, "BaseModule"], None, None]: + """Find all sub-modules in the module, as well as their names. - elif isinstance(value, (list, tuple)): - for idx, item in enumerate(value): - add_parameter(f"{name}[{idx}]", item) - - elif isinstance(value, dict): - for key, item in value.items(): - add_parameter(f"{name}['{key}']", item) - - return named_parameters - - def named_sub_modules(self, root_name="base") -> Generator[tuple[str, "BaseModule"], None, None]: - yield root_name, self - for name, value in self.__dict__.items(): - if isinstance(value, BaseModule): - yield from value.named_sub_modules(root_name=f"{root_name}.{name}") + Say self.children[4]['key'].sub_module is a sub-module. Then the name will be + 'children[4][key].sub_module'. But if the sub-module is accessible at different + paths, only one of the paths will be returned. + """ + if type_ is None: + type_ = BaseModule + + queue = deque([("self", self)]) + seen = {id(self)} + + def add_to_queue(name, item): + if id(item) not in seen: + seen.add(id(item)) + queue.append((name, item)) + + while queue: + name, item = queue.popleft() + if isinstance(item, type_): + yield name, item + + if isinstance(item, BaseModule): + if skip_compiled and getattr(item, "_compiled", False): + continue + for sub_name, sub_item in item.__dict__.items(): + add_to_queue(f"{name}.{sub_name}", sub_item) + + elif isinstance(item, (list, tuple)): + for i, sub_item in enumerate(item): + add_to_queue(f"{name}[{i}]", sub_item) + + elif isinstance(item, dict): + for key, sub_item in item.items(): + add_to_queue(f"{name}[{key}]", sub_item) def parameters(self): return [param for _, param in self.named_parameters()] diff --git a/dspy/teleprompt/signature_opt_typed.py b/dspy/teleprompt/signature_opt_typed.py index c86d297c14..37ae273969 100644 --- a/dspy/teleprompt/signature_opt_typed.py +++ b/dspy/teleprompt/signature_opt_typed.py @@ -272,6 +272,8 @@ def optimize_signature( pass elif strategy == "best": i = scores.index(max(scores)) + if verbose: + print(f"Best signature: {i} with score: {scores[i]}") for name, p in named_predictors: p.signature = candidates[name][i].to_signature() else: @@ -279,6 +281,6 @@ def optimize_signature( return OptimizerResult( program=module, - signatures=[{name: sigs[i].to_signature()} for name, sigs in candidates.items() for i in range(n_iterations)], + signatures=[{name: sigs[i].to_signature() for name, sigs in candidates.items()} for i in range(n_iterations)], scores=scores, ) diff --git a/examples/functional/signature_opt_typed.ipynb b/examples/functional/signature_opt_typed.ipynb index feab8d6356..81bdbe77a4 100644 --- a/examples/functional/signature_opt_typed.ipynb +++ b/examples/functional/signature_opt_typed.ipynb @@ -92,648 +92,23 @@ "execution_count": 5, "metadata": {}, "outputs": [], - "source": [ - "class BasicQA(dspy.Signature):\n", - " \"\"\"Answer questions with short factoid answers.\"\"\"\n", - "\n", - " question = dspy.InputField()\n", - " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 1 typed predictors to optimize.\n", - "Generating 6 initial signatures for base...\n", - "\n", - "================================================================================\n", - "Running eval iteration 0...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 3086.59it/s]\n", - "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", - " df = df.applymap(truncate_cell)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 1...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 1 / 50 (2.0): 100%|██████████| 50/50 [00:00<00:00, 1268.65it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 1 / 50 (2.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 2...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1031.35it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 3...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1364.88it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 4...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 6 / 50 (12.0): 100%|██████████| 50/50 [00:00<00:00, 892.68it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 6 / 50 (12.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 5...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 50 (10.0): 100%|██████████| 50/50 [00:00<00:00, 1055.56it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 5 / 50 (10.0%)\n", - "\n", - "================================================================================\n", - "Running eval iteration 6...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 942.15it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 7 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 7...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1054.12it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 8 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 8...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 957.29it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 9 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 9...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0): 100%|██████████| 50/50 [00:00<00:00, 1015.95it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 12 / 50 (24.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 10 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 10...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 11 / 50 (22.0): 100%|██████████| 50/50 [00:00<00:00, 839.64it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 11 / 50 (22.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 11 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 11...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 833.32it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 12 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 12...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1105.97it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 13 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 13...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1112.59it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 14 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 14...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1096.58it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 15 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 15...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1092.70it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 16 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 16...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1097.79it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 17 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 17...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 547.69it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 18 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 18...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 964.67it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 19 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 19...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 1014.22it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 20 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 20...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 906.14it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 21 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 21...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:00<00:00, 1017.81it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 22 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 22...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:00<00:00, 1032.48it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 23 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 23...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 726.33it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 24 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 24...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 957.55it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 25 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 25...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:00<00:00, 1009.53it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 26 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 26...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0): 100%|██████████| 50/50 [00:00<00:00, 1064.53it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 15 / 50 (30.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 27 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 27...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0): 100%|██████████| 50/50 [00:00<00:00, 1052.90it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 18 / 50 (36.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 28 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 28...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0): 100%|██████████| 50/50 [00:00<00:00, 731.18it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 20 / 50 (40.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 29 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 29...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0): 100%|██████████| 50/50 [00:02<00:00, 18.61it/s]\n", - "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", - " df = df.applymap(truncate_cell)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 16 / 50 (32.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 30 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 30...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0): 100%|██████████| 50/50 [00:02<00:00, 18.23it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 50 (34.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 31 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 31...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0): 100%|██████████| 50/50 [00:02<00:00, 20.82it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Average Metric: 19 / 50 (38.0%)\n", - "Generating new signature for base...\n", - "Tested the signature, and it's not in the list of 32 to avoid.\n", - "\n", - "================================================================================\n", - "Running eval iteration 32...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 17 / 49 (34.7): 98%|█████████▊| 49/50 [00:14<00:00, 20.66it/s]" - ] - } - ], "source": [ "from dspy.evaluate import Evaluate\n", "from dspy.evaluate.metrics import answer_exact_match\n", - "from dspy.functional import TypedPredictor\n", + "from dspy.functional import TypedPredictor, TypedChainOfThought\n", "from dspy.teleprompt.signature_opt_typed import optimize_signature\n", "\n", - "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n", - "\n", + "evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ "result = optimize_signature(\n", - " student=TypedPredictor(BasicQA),\n", + " student=TypedChainOfThought(\"question -> answer\"),\n", " evaluator=evaluator,\n", " initial_prompts=6,\n", " n_iterations=100,\n", @@ -754,22 +129,7 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "predictor = Predict(BasicQA(question -> answer\n", - " instructions='Answer questions with short factoid answers.'\n", - " question = Field(annotation=str required=True json_schema_extra={'__dspy_field_type': 'input', 'prefix': 'Question:', 'desc': '${question}'})\n", - " answer = Field(annotation=str required=True json_schema_extra={'desc': 'often between 1 and 5 words', '__dspy_field_type': 'output', 'prefix': 'Answer:'})\n", - "))" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "result.program" ] @@ -789,16 +149,16 @@ { "data": { "text/plain": [ - "[]" + "[]" ] }, - "execution_count": 10, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVjElEQVR4nO3deXhc5Xk3/u+ZVetIlmRrQZJ3MMYLYIMRNg6LA5iUQPDbXyBOYygXlNSkgK82iRuykISKpr8fIfR1TJuXmKbBcUtelkAbXDAgbPAqMIsJxjbGkrElW5al0Taj0cz5/THznDmznJlzZs6MlvP9XJcurJmR5vGx8bl13/dzP5IsyzKIiIiI8sQ22gsgIiIia2HwQURERHnF4IOIiIjyisEHERER5RWDDyIiIsorBh9ERESUVww+iIiIKK8YfBAREVFeOUZ7AfFCoRBOnDiB0tJSSJI02sshIiIiHWRZRl9fH+rq6mCzpc5tjLng48SJE2hoaBjtZRAREVEG2tvbUV9fn/I1Yy74KC0tBRBevMfjGeXVEBERkR5erxcNDQ3KfTyVMRd8iFKLx+Nh8EFERDTO6GmZYMMpERER5RWDDyIiIsorBh9ERESUVww+iIiIKK8YfBAREVFeMfggIiKivGLwQURERHnF4IOIiIjyisEHERER5RWDDyIiIsorBh9ERESUVww+iIiIKK/G3MFyRDS+dfX78dRbn2FgeCTm8QX1ZfjKRamP2SYia2DwQUSm2vTWUWx4/UjC45IEXDF7MqpK3KOwKiIaSxh8EJGpjpwaAABced5kXFDnAQA8ueMofIEQegYDDD6IiMEHEZnrWPcgAOAbTVNx9ZxqAMBz73yOE70+DMaVYojImthwSkSmkWUZbWfCmY/GimLl8UKXHQAwOBwclXUR0djC4IOITNM9MIyB4SAkCaifVKg8XuwOJ1mZ+SAigMEHEZlIlFxqPAUocNqVxwudzHwQURSDDyIyTduZcPDRWFEU87iS+fAz+CAiBh9EZKK2SOZjamVs8CF6PuJnfxCRNTH4ICLTHNPKfLDhlIhUGHwQkWnauiM7XSqLYx4vcrHhlIiiGHwQkWlE5mNqXOajiJkPIlJh8EFEphgaDuJUnx9AYs8HG06JSI3BBxGZov1sOOtRWuBAWaEz5jllq22AwQcRZRl8PPLII5AkCffff7/ymM/nw9q1a1FZWYmSkhKsWrUKnZ2d2a6TiMY4peRSWQRJkmKeK3ZHgg8/ez6IKIvgY+/evfiXf/kXLFiwIObxBx54AC+++CKeeeYZtLS04MSJE7jllluyXigRjW3KNtuK4oTnCiMNp9xqS0RAhsFHf38/Vq9ejV/96leYNGmS8nhvby+efPJJPProo7j66quxaNEibNq0CW+//TZ27dpl2qKJaOwRZ7o0xDWbAtGttkNsOCUiZHiq7dq1a/GlL30JK1aswE9/+lPl8dbWVgQCAaxYsUJ5bM6cOWhsbMTOnTtx2WWXJXwvv98Pv9+vfO71ejNZElFK77Sdxf/3PwfhD4RiHr98VhXWffHcvK8nGJLx4PMfYN45ZVi9ZGre3z8XjmkMGAPUQ8YYfBBRBsHHli1b8M4772Dv3r0Jz3V0dMDlcqG8vDzm8erqanR0dCT9fs3NzXjooYeMLoPIkF/vOIq3Dp9JeHzfsbP4fxbXo35S4g0zlw6c6MXv9rSjtOAkvnZpY0KPxHgULbsky3yE/6lh5oOIAINll/b2dtx33314+umnUVBQYMoC1q9fj97eXuWjvb3dlO9LpCaaIe+9ahae+PrFeOLrF2NOTSkAYMehrryv55Q3nO3r842gZzCQ9/c3WzAk43j3EACgMUnmQzScsueDiACDwUdraytOnTqFiy++GA6HAw6HAy0tLXj88cfhcDhQXV2N4eFh9PT0xHxdZ2cnampqkn5Pt9sNj8cT80FktmORfoQbF9bh+nm1uH5eLa69IPx3cvvh/Acfp/ujpUZRrhjPOrw+DAdDcNol1JYVJjxfqEw4ZeaDiAwGH9dccw0++OAD7N+/X/lYvHgxVq9erfza6XRi27ZtytccPHgQbW1taGpqMn3xRHr0DA7D6wv/xK0+c2T57CoAwFuHuxAMyXldU1efKviIBEbjmfg91E8qgt2WWEISDafDIyEEgqGE54nIWgz1fJSWlmLevHkxjxUXF6OyslJ5/M4778S6detQUVEBj8eDb33rW2hqakrabEqUD6IXYXKpW2l8BICFDeUocTvQMxjAgRO9WFBfnrc1dakyH+0TIPMhfg/xB8oJ6us+OBxEWSHnGxJZmen/Avz85z/Hn/3Zn2HVqlVYvnw5ampq8Oyzz5r9NkS6aZ034rTbcNmMSgDA9jz3fXT1Dyu/Fusbz7ROsxVcdhsckYwIm06JKKOttmpvvPFGzOcFBQXYsGEDNmzYkO23JjKFyHwka4Rcfm4VXv1TJ7YfOo21V83K25omWs9Hqm22ACBJEgpddvT5Rth0SkQ824UmvrYz2pM3l80K9320Hjub1+PerVZ2AbjdloiiGHzQhHesO9wM2ViZuAtjelUxzikvRCAoY/fR7rytSd1w2uH1wTfOD1yLnuuSGOAJRWK7Lc93IbI8Bh804bUp/QiJN0ZJknBFZNfL9k/y0/fhHwkqu28cNgmyDBw/O36zH72DAfQOhWeVNFQkBnhCkYsn2xJRGIMPmtD8I0Gc9PoAaPcjXDF7MgBgx+HTeVmTaDZ12iXMrg4POhvPTafq3URFLu02MvHcoJ/BB5HVMfigCe342SHIcvin7spiV9LXXD6zEpIEfNLZj45eX87XJEoulcVuTIsERG3juO9DlLWSjVVXK3JxyikRhTH4oAmtTbUFVOv8lEnFLsw/pwwAsCMP005Fs2lVqUtp0BzPmY9022wFNpwSkcDggyY0MXlTq+QiiL6PHYdyX3pRgo8St7L9dzxnPpQAL801LmTmg4giGHzQhNYWOews1S4MAFg2S/R9dCGU41HrouejqsStbP8d18FHmhkfghixzswHETH4oAmtLdKP0JCmJHDx1HIUuezo6h/Gxx19OV3T6UjPx+RSt1KqaOsezHnQkyttOmZ8ANHD5QbYcEpkeQw+KGt//OAklv3ja3i37ayhr/u7Z97DLb98K6cHjWmNVo/ndtixZHoFAGB7itLLkdP9WPFoC55953jGa1KXXerKC+CwSRgeCaGzL3fNrj97+WPctOEt9A4GdH/N0HAQX3p8O370hwOarxkeCeFEbzi7lGwrs5qS+Qiw7EJkdQw+KGv/9cFJHD87hG1/OqX7a/wjQfz+neN4p60HR7tyc6qrLMu6SwKAesutdtPp5t1tOHyqH/+5rz3jdUWDDxccdhvOmRSejZGrptOh4SD+z/ajeK+9B1s/6tD9dR983osDJ7z4j73tkOXkWZnjZweV3URVJcl3EwlFbmY+iCiMwQdlTdxM1SPD0xFbYAEoA6rMdqrPD/9ICHabhLpy7eFXgmg63XO0W3Pi6I7IAXRtWQQKStmlxA0AMaWXXNjzWTeGI9mlHQYO0BN/nkOBYMxZNGrHutPvJhKUIWNsOCWyPAYflDXRQGkk+FDfvI2UAowQmYS68gI47en/qs+aUoJqjxv+kRD2fpY4ar3T68PBznA/yEmvD/6RzH6CVxpOS+OCjxxlPrZ/Ei0jGWmoVf95aq2tTec2W0AdfDDzQWR1DD4oa+ImdVp1THw6YgsskLvMh7LNNk0vghAetR4pvSTJEKgfC49EHzK8puGRkPL7rYpkPkRJKFen26rLSN0Dw/jopFfX16nPn9EqCUXPdNETfETKLgw+iCyPwQdlZXgkhJ5I5kJ9s0pHbIEFgJ4cBR/ipNV0O13UlHNekgQf8Y2omWQqzgyEr5HdJqG80Akg2qiZi7LLKa8PH3f0QZKARVMnAUj+e0tGHUxqrU3Z6ZJmKzOg3mrLsguR1TH4oKyImykAnO73azYmxhNbYIEcZj4MNJsKS2eFg4+PTnqV3gwACIVk7Dh8BkA0Y6HO3ujV1Re+oVcWu2CzhXskomUX8xtvRdZjXl0ZblxQG3lM3yC1mLKLZvAROTFYR4CnDBljwymR5TH4oKyImykQzoL06TwuXZ3G9+as7KJvm61aVYkbc2s9AIC3j0QzBB939KGr349Cpx1/FrmJZ1ImETf0yZF+DyA6GfTsYABen7nXQpSKrphdhSvODZeU9n52Vtegr9MxZZfEwChmN5GOa1wc2e0yxFNtiSyPwQdlJb7JVE/pRX3TAoCeQf29Ika0d+sb+x0vWelFZAsum1GBWVNKYr6/EadVMz6EErdD2aZqZtOpLMvYHsl8LJtdhRlVxagrK8DwSAh7kjTUxovNfCT2t5zu88MXCO8mEtuFU1EOltMZoBLRxMXgg7ISvwWzS0fTqdgCK+Si7NLvH8GZgfBa9JQE1ETT6fZDp5UykghEls2eHG0QzSBQ6EoSfADRvhQz+z4OdvbhdF84W7No6iRIkoRlOs+wkWU5Jvjo6vcnBA0i86N3N5FoOPWPhBAcp9NcicgcDD4oKwmZDx3bbeNv2rloOBVlgopiF0oLnIa+dvG0SXA7bOj0+nH4VD98gSD2HA1nCq6YXZXVSHRRpqoqjR3INTUHp9tu/yQcMC2ZUQG3I5x1iAZWqZtOB4aD8AXCAWKhM/y18YGR3tNsBZH5ADjrg8jqGHxQVtQ9H4De4CMcGLgd4b9+uch8GJk/Ea/AacelkVHrbx7qwr7PzsI/EkK1x43ZU0pQV14Iu02CfySkOXxLi3j95LjMh9gtom7EzZYouYiAAwg31EpSuIflVIpx7qJ8Vui049zqcJkpPjASDbLpxqoLbocNkR5bzvogsjgGH5QVEWyI4ZZ6ej5Er8QFdeHGzlw0nOo97EzLFaryhNhiu2zWZEiSBKfdhnPKMxuJLq5PfNllqslll3C2Jrw7R/xegHAmaF5dGQDgrRRj5NWNsaIkFN/jYmR0PRCeo1IcKb0w+CCyNgYflBVxk5peFf7pV8+gMdErsKC+HADQMxjQvUVXr0y22aqJbMGuT7vx+sHwmTXLz43exBuVMomxTIVWz0djFn0kybQeOwtfIJqtURN9H6Isk3qdLtUQtNjf67EMArxCNp0SERh8UJbEdszzazwxn6cibrDzzwn/BD4Skk3/STibsgsAzKkpRVWJG0OBID7p7AcQnQECRIMFoztekm21BaKZjxM9Qxgeyf6UX6VBNpKtUbsi8vvYfrhLM+gTQWRViVuZEJtYdjF+jbndlogABh+UJXEznVNTGvN5KuKGfV5NKVz23PR9REsC+voR4kmShGWzKpXPz6/1xGQrlAZRA8FHIBjC2UExWj224XRyqRsFThtCcjgAyZYoFalLLsKiaZNQ4LThdJ9fOasmnggiq0rdSQOtmN1EBrJLonmVmQ8ia2PwQRlT30znRAZzpQs+4m9ansiI8R4TD5cLBEP4PHIDzzTzAcQ2ai6Pu4lnst22O/L7ttskTCqKDT4kSYqWctIENL1DqctUZ/r9OHAifH6LOlsjuB12LJkeDqy0TrlVl4fEuo6fHcJI5HRckfWYVOSEx8BuomI3D5cjIgYflAX1zVQM3upKM2Jd9EiIm1ZZYTgNb2bm42SPD8GQDLfDhilx5Q0jlqkCjmVxwUcmczlENqFCNVpdTewa+axLu4/khf2fY+FD/4Mte9s1X/PWkXCj6fm1noTyjqA01Go0nYrG2MklLtR4CuBy2DASknGyN7xDRhmrbjCzVMSGUyICgw/KgvpmKm7yvkAo5amlSp9A5KZVHskAmBl8dHjDN8jasoKkN3m9qj0FuP3yaVhxfrWSKRBEOad7YBh9OkeiazWbCvPOCWePUk0f/X3r8Zj/JrP9E+2Si3BhQzkA4FCknyXVWm02CQ2TYnf3ZDK6HojO+uCcDyJrY/BBGVPfoIrdDuXGkmq7bfxZIGWRskvvkHkj1tPd5I340ZcvwP9ZsxguR+z/KiVuByqLIyPRdWY/lD6KuH4PQQQLbx3uSjoBVD3sbH97T9JzYGRZVrIZqYIP0adxsjd5g6uYVFsVCSob4zI9RrfZCsx8EBHA4IOyIG5QIrUvbvap+j7it2dGgw/zMh9mBh+pKKUXnX0fyvXSWNfC+nKUuh3oGQzgwInehOf3ftatjKUPhmTsjJRX1I6c7sfJXh9cDhsumVahuZbJJW4UuewIycDxs4nr74obhiYyPWK7rQg+GjLNfLDhlMjSGHxQxuJ/khf/TbXdNlp2yWHwoezUSJ5hMIv4qV9v5kNrm63gsNvQNDNc3kk2/jy+OTRZs6j4uiXTK1DgtCc8L6gbXOPXPzg8omQmEjIf2ZZd2HBKRDAYfGzcuBELFiyAx+OBx+NBU1MT/vjHPyrPX3nllZAkKebjnnvuMX3RNDbE/3SsJ/OhVXYxc7dLspNjc8Hodls9GZnoibqJB7+9GQksblxYp/ma6HwP7ZKLoNU0K0bmFzhtKI5kKtSB1ohqN5HRrcxFznDZJVVfEBFNfIaCj/r6ejzyyCNobW3Fvn37cPXVV+Omm27CgQMHlNfcddddOHnypPLxs5/9zPRF09gQfzMVPyVrTTkNJLlp5SLzcVoc3jbmyi7pMzJie2/rsbMxTZmn+/z408nw9tm/vfZc2G0SPjszGDN7Y3gkhF2fnon5PqloHWZ3uj/csFtV4lYGlKkzHyciu4lcGewmim61ZdmFyMoMBR833ngjbrjhBsyePRvnnnsuHn74YZSUlGDXrl3Ka4qKilBTU6N8eDwe0xdNY0P8zTRd5uNEz1DCTau8aPz2fExVDoPTGXzoCIqmVhahflIhAkEZuz+N7noR57BcUOfB1MpiXNxYDiC2PPNO21kMDgdRVeJShr6lXr9G8JFknSLQ6vOPYP/xHgDhgMTobiI2nBIRkEXPRzAYxJYtWzAwMICmpibl8aeffhpVVVWYN28e1q9fj8HB1P8w+/1+eL3emA8aH+JvpqKXQWu3i/oIdnHTymXDqVZvhVnEzfvzniEEgulHousJiiRJUpVeooGFUk6JPLdsVjizseNwtPQiekCWzqrSFRSI7c7xI+KTrbPAaUeNpyDyPuH3NNrvAXCrLRGFGQ4+PvjgA5SUlMDtduOee+7Bc889h7lz5wIAvva1r+G3v/0tXn/9daxfvx7//u//jq9//espv19zczPKysqUj4aGhsx+J5R38TepyZGGU63MR3y/B2B+8CHLckIvSq5MKXXD7bAhGJLTjkQfCYbQPaivHCRKJqKnI7x9NjK7IxJ0LFO25Z5RtuVuV7bYpi+5ALHbZ9WD4aLBmyvp60UgZGSsuhANPpj5ILIyh9EvOO+887B//3709vbi97//PdasWYOWlhbMnTsXd999t/K6+fPno7a2Ftdccw2OHDmCmTNnJv1+69evx7p165TPvV4vA5BxINnNNFp2Sd7zkWx7ptlll4HhIHyBcBYi17tdxI6RQ6f6cezMYMrmy+6BYcgyYJPCQ9lSuXxmJSQJOHSqHx29Pnh9AXR6/XA7bFg8bRIAYGF9GUoLHOgdCuCDz3sxrbII70fKIXqaTQHgnPJC2KTwIW+n+/yYEslspDp5d89n3cqU00xG1ytlFz+DDyIrM5z5cLlcmDVrFhYtWoTm5mYsXLgQv/jFL5K+dsmSJQCAw4cPa34/t9ut7J4RHzT2dQ8m3kzT9XyI0erqwVQeVeYjlGSwllGi5FPksis3ulzSu91W7MCpKHbBnqYkUl7kwoL6cgDh8eci03Cpavusw27D5TPF+Syn8faRM5Bl4NzqEtSUFehau8thQ115YcL6RTlN6+Rd5fNMMh+i4TTAsguRlWU95yMUCsHvT36z2b9/PwCgtrY227ehMSY6Wt2t3EzFbpfB4WDSU0vbusVOl8SyiyyHmxmzXleemk0FcR5LuuCjq9/YDhzl2PtDp5Xyy/K4cooor7x5qEt5jegF0asxyY6XVJmP2K81fmJwdMgYMx9EVmboR8P169dj5cqVaGxsRF9fHzZv3ow33ngDW7duxZEjR7B582bccMMNqKysxPvvv48HHngAy5cvx4IFC3K1fhol0ZtptIRQ7LKjwGmDLxBCV78fxe7oXy9ZltEWyXyo0/Vuhx2FTjuGAkH0DgaUYCTjdaUZYW62xgpx5on2YXCA6qA2nU2wy2ZX4X+/fhg7DnUp/RHxh9uJxtR3jp1V3j/VSPVkplYW4e0jZ2JmlWgGH6o/N0kC6iPnvRhR7BJzPpj5ILIyQ8HHqVOn8I1vfAMnT55EWVkZFixYgK1bt+KLX/wi2tvb8eqrr+Kxxx7DwMAAGhoasGrVKjz44IO5WjuNomQ3U0mSUFXixvGzQ+jq98f0QJwZGMbAcDBy04r9Cbqs0BkOPkzo+8jXNlshut02dcOp0XVd3DgJRS47zgxEMybx22enVhajsaIIbd2D6PT64bRLWDJDe6R6MkrmRhU8aZ1Bo/7zrPEUpJygqqUwkvnwBUIIhuS0JSgimpgMBR9PPvmk5nMNDQ1oaWnJekE0PmjdTCeXhoMPMStCEGn9ZDetskInOrw+U4KP03EHouWaKEW0nRmALMvKUK540eulLyPjcthw2YxKvPbxKQDhjEay771sdhU2724DACyaOslwn0t8z8rQcFCZPhp/DScVOVHqdqDPP5JRsykQzXwA4UbXEnfu+3KIaOzh//lxPunsgwRgdnX6IU1G7f70jDLhU6gsceMKnXMZjGg7M4h9x2KPZrfbJFx57hSUFekvbfQOBfBJZ1/CIWVaN1OtptP2FAeRifX0mHCybb622Qr1kwohSeFdNmcGhjUzG0Z7PoBwwKEOPpJZrgo+9G6xVYs/30VcP5fDhtK4wECSJDRUFOGjk96Mg48Cpw2SFO7xGRweYfBBZFH8P18lEAxh1S/fhgxg5/qrUVqQXf+B2kcnvPjqv+5K+tymOy7BVedNMe29AGD1k7vQnqQU8GcLavG/v3ax7u/z989+gP/64CT+9S8W4doLapTHtW6mWsHHkdP9AJIPpjJz1kf0ULn8BB9uhx21ngKc6PXh2JlBzeAiWsowFnwIWttnm2ZWwSYBIdl4vwcQzdx09Q+j3z+iNOxOVo1WV5teVYyPTnoxrcp4sykQDmCKnHYMDAfDTafmx/hENA4w+FDxBYLKjotdn3bji3OrTfve4idLT4EDFzaGZzX86aQXp/v8CRMms9U7GFACD5Gu9weC2H20G28cPI1AMASnPf1GJ/9IENs+7gQAbD3QGRd8aJRdNAaNiePfL4r83tVMDT6Um2d+Gk6B8A38RK8P7d2DWDQ18fcHAO2RY+uNNGnOmlKK9SvnoNBlV2ZwxCsrdOIfvjIfp/v8mH9OmeG1ewqcmFTkxNnBANq7B9M27P7VF2ag0GXH/1pUb/i9hCK3Ixx8cNAYkWUx+FAZCUbnTOw4dNrU4KM/EtRc2DgJv/nLSwEAf/fMe3im9Tj6fOZ2/otAp6rEjX+/MzxrJRiSseinr6BnMID32nuweFr6xsTWY2eVgV07Dp+O6Wk4rbF7o0oZsR4toXh9Abzb3gMg+U/n5SL4MOFk23xvtQWAqRXF2PVpd8IZKcJIMITPz4aDQaNTQf/qC8mH86ndemmjoe8Zr7GiCGcHe3HszCC6B5LP+BAW1Jfj//3z8qzejyPWiSjrOR8TyYhqyJX6XA0z9PvCN1Z1Hb2kIPzrfhPmW6gd604c5mW3SViqzI7Q93tTv67T68ehU/3K51qZD/H5aVXmY9eR8AjwaZVFyXs+TC275OdEWzURUIjrHu9Ejw8jkQP1qkv1DQDLp0Zlx85AXnYLFSnbbZn5ILIqBh8qQVXw8WnXQEJzaDbEP7TqBjsRiCQbyJWNZGeoALGDq/QQB5W5IiUaEYwEQ7LyE3L8CPNkPR870pw5UmbSiPUB/wiGAsl3auSSaL7UKp+JoCSTU2DzYapq0Fh+go9w5mOImQ8iy2LwoRJ/MukOnTdpPURpRWQ71L/uN7vscib5zhIxpOq9471pb/TdA8P48EQvAOAbTVMBRK9H98AwQnJ40FRFUWzwkexk2/gTWeOJzEdPlmUXceMscNpQ7DI+gyJTWkfTC1rB4FjRqNpuqzXjw0wi+BjglFMiy2LwoRKMO1vkTRNLL/3+8I1VnfkocYdvumaMFVcTN8H4szfqJxVhRlUxgiFZaQDV8tbhLsgyMKemFLdcHG4u3PVpN/wjQeUmX1HkgiOucVXctAaGgxgaDqK9exBHuwZgt0loipxFEs+sskv0NNbkOzVyRWQ+TvX5MZSklKAVDI4V6u22SuYjh5kjMetjMMDgg8iqGHyojMQFH28d7koISDIlshul+ch8dCcPPoBow6c4ol2LKLksm1WFOTWlqCpxYSgQxDvHelKm5kvcDrgd4b9WXf1+peRyYUM5PBpbl8sj2ZNsg4/To9DvAYTX74n8WYpdLWpaweBYIdb1+dkhdHjDJ9bmo+wyaHLQTUTjB4MPlZFQuOxSXuREiduBnsEADkRKD9kSTaXJej7MbDgdHgnhRG+4VyXZT9rLIn0XO1JkdWRZVvpCrjh3Mmw2SZkzsf3QadVPx4mpeTFiHQg3naqDGC1mZz7yHXwA0dHjyUovqYLBsaC6tAAuhw0jIVnZop3T4EOcbMuGUyLLYvChIrbaFjjsuGxGuERg1q4X0fOhPmytOAfBx/Gzg5Dl8E+XyaZ8XjajAnabhM/ODGo2SH7aNYATvT647DZcGtmSqwQth7vS7igRKftTXh/eOhK+fsvPTR989PtHMBLXd2NEJoO8zBI9HTZ2x4ssy0rwkelU0Fyz2SQ0xM0fyeWEWLHbhVttiayLwYeKKLHYbZJys0yVITBCyXyoyy6R4MPMOR/HVDe6ZH0PpQVOXNxYDkA7sNr+STjrsXjaJOUgMFGu+eDzXnzS2QdA+wYlBny9cfA0egYDKHU7sLC+XHPNHtU18WZxLUZjwJjQGHdGitA9EJ4cmuxAvbFEfWicy26DpzB3I4CUhlNmPogsi8GHiii7OOzRMsO+Y92m/IQmgg/1nI9SZc5H9vMthFRnqAjLZoksRvK+j2RbY6s9BTi3ugSyDLx8oAOAdlOi2PHyXx+cBABcNrMyoTFVzWGPniPSM5j5+S75aJbUMrUiefAhPs/0FNh8UWdlqkpcOW3YjW61ZfBBZFUMPlRE2cVhkzC9qhjnlBciEJSx+2h3mq9MbyBF5sMXCGVVblBTmhtTBB9XRLI6bx0+k9BQGwiGlJ0w8dNIRTAiMjWaZZfI4+J1y3WcOeIxoe8jk8PbzBI93TZ58DFWd7oI6n6UXAdvypAxNpwSWRaDDxVxI3bYbJAkSbn5bv8k+9KLMucjSc8HYN7MAz07KxacU4bSAgd6hwL44PPYhtp323owMBxERbELc2s9Mc/Fz+nQmgURf/NfpuO01XITBo2NZsOpMmjs7GBMQKcnGBwLYjMfuQ4+IpkPbrUlsiwGHyoBVc8HEP1JP9221HSGR0Lwj4QzG6Xu6HZTl8OmbEvtM6n00haZppnqJ22H3YalM0VPS+zvTXy+dFZVwjTOJdMrlGmnQPrMBxA+SG2ajl0eZux46dI4byYfassK4bRLCARlZbsqMPa32QoxmY8c98ww80FEDD5UgpGeD6c9fNO9fGYlJAn4pLMfnaobilHqf2SL3bF1/1ITz3dR76xQNxAmI7IY8YPUxOdXJNkaW+RyxJzaqnWTV9+8xKm66WQbfAwOjygNjLm+eSZjt0lomJS440X04DSm+fMYbepm2FxnPoq51ZbI8iwTfJzu8+PHL36Ef3z5Y83XiJ4PkfmYVOzCgsgx5dlsuRWBRaHTntB4KcowZgwaO93nhy8Qgk0CzilPfXS7KCm9c+wsfvSHA3joxQP40R8O4P3jPQC0R6GrH68o1ii7qIIS0dyajlJ2STNiXZZl/HbXMXx0whvzuNj+63bYYkpb+SSyTeq+D/W5LmNZgdOOGk/40Lt8lV0YfBBZl2WCD68vgF+/dRRP7zqm+ZoRVc+HcFlkJPj+9rMZv3eyGR+CeMyMEetim21tWSFcjtR/tFMrizG9qhgjIRlPvf0ZNr31GZ56+zOEZODc6hLUaQQvV54XDibqJxXCqbGDpcZTALfDBpfdhqWzko9UjycaTnvSZD62HujEg89/iPu2vBvz+GlVv0c+R6urKWe8RP4cfIEgOr3hdY31ng8AmF1dAiD3zbGc80FEo/Mj4ihwRgKKVOPSleDDHr15TYqM/h4aznw3irLNtiDxcpuZ+Wgz2F/wz7ddhK0HOhCSo9fEJkm4YX6t5tdcUFeGX31jMao92j8dF7sd+PXtl8Buk5TR6enoLbu0fHIKAHDoVD8+7xlSMjyjuc1WaIzbbitKLqUFDiWzM5Y99OUL8PaRM7jqPH3Zqkwx80FElgk+7JGAIpAi+BA9H3ZVo6VosBzOYitsskPlBDN7Po4ZHOM975wyzIuUlYz44tzqtK9ZmmKcejLlheEgJdXJtuGx79Hy145Dp/HVSxoBjO6AMaExruwimk21Br6NNTMml2DG5JKcv0808xFEKCQnNDYT0cRnmbKLM/IPXKp5GgHVnA9BlC+GRzL/Ka0/so02WfAhHjOj87/tTPqdLmOVyHx4U2Q+jp0ZxPGzQ8rn6kAk3cj3fIie7xL+czAaDFqFyHwAgC+L/6+IaPyyTPAhGj1DMhDSyH4ocz5UvQzR4COLzIcvccCYIB4zY8S6crOrGNs7K5LRU3YRh91NipQw3jrcpfxZjuaMD0FkPry+EfQOBpRgsHEc/nnkUqFq0qtZ822IaHyxTPChLqWMaAQf0YbT6GvFHA4zyi6lSTMf0UPVstU+jn/SFj0RPUPa49VFpuP2y6ejxO3A2cEADkR2vShll1Hs+Sh02TEl8v7HugfG/Gm2o8VmkzhincjiLBN8OO3q4CN5ICFKMkl7PnKU+VB6PrLMfPT7R5Tx4o3j8GaXLvMxohr7ftWcydFThyMD4EbzRFu16Om2gzGH/FGs6OFy3PFCZEWWCT70ZD5E2cVpctlFbKNN1fORbeZDNDmWFznhKRj7Oyviia22vkAIviRjt9873oM+/wjKi5y4oK4sYfR9tOwyeg2nQDTw+6xrAMe7w/0pDD4ScbstkbVZJvhwqmZ3iGFi8UbixqsD0eDDb0LmI5dzPpQU/zi90ZW6HRCXPVnT6ZuRIGPprCrYbdFzd1qPncXQcDB6qNwoll2AaL/Nns+6MRwMwWGTNGemWBm32xJZm2WCD5tNUm5u6couDtO32uqZ85Hd2S7iTJexPsZbi80mpTzZdsfh2LHv4tTh4WAILZ+cVq7xqJddKsOBxp7IScj1kwpjglkKU8oubDglsiTLBB9AdHJpusyHesiYKbtdUpRdzJrzEZ0pMX5/ytbq+/D6Atjf3gMgOt5dferw8+9+DiAcKHqSBHj5JHa2iEzZeA0Gc02UXYYCLLsQWZG1gg+7mPWRZqutqkTjNKHhVGyjTT3nI7ufANvG8TZboVyMWI8bNLbzyBkEQzJmVBXHHIAmApHXPg5PPa0qcY36MK/4nS3jtQyWa8x8EFmbtYIPMWhMo+wSCCb2fJix1VYMEEs95yPbsos4PXX83uy0yi47Ilts4w+7WzqzCpIU/bMZ7X4PAKgsdsUM0WKzaXKi14lbbYmsyVDwsXHjRixYsAAejwcejwdNTU344x//qDzv8/mwdu1aVFZWoqSkBKtWrUJnZ6fpi86UGB6mvdsl0vORo7JLqTtxF0qpareLLGuPfk9lJBjC52fH/84KrbKLGC62LG5k+6RiF+arxsNPHuV+DyBcDlL/GYznYDCXCrnVlsjSDAUf9fX1eOSRR9Da2op9+/bh6quvxk033YQDBw4AAB544AG8+OKLeOaZZ9DS0oITJ07glltuycnCMyEyHwGNLEayIWMi+ND6Gj30TDgNycBQki2mepzo8WEkJMPlsCnHoo9H0UFj0eCjvXsQn50ZhN0moWlm4gm56oBktJtNBXXphQPGkivmbhciSzPUnXfjjTfGfP7www9j48aN2LVrF+rr6/Hkk09i8+bNuPrqqwEAmzZtwvnnn49du3bhsssuM2/VGRJBhdbJtiNK2UU158Mugg85o0OwQiEZ/cPaPR+FTjtsUjj46PeNKI14RoiSS8OkwnF9SFey813EVNOLGspRmmR+yRWzJ+OXbxwBAFSVju6MD2Gqqsl0PGeicqmQcz6ILC3jrQHBYBDPPPMMBgYG0NTUhNbWVgQCAaxYsUJ5zZw5c9DY2IidO3dqBh9+vx9+v1/53Ov1ZrqktByqQCIZkflwJsl8AOHeggKbPeHrUhkMBCGqKcm22kqShGK3A32+EfT5RzBFx/d89H8O4u3ItE8A6B6ITDYd5zc6EXy89P4JfPh5L4BoYBXf7yFcPLUchU47hgLBMZP5EAf7VZW4MwomrUBkPl7+sAMfn+wb5dUQWU9deSEev+2iUXt/w/8yfvDBB2hqaoLP50NJSQmee+45zJ07F/v374fL5UJ5eXnM66urq9HR0aH5/Zqbm/HQQw8ZXngmortdkpdQRM+HPUnPBxAJPpzGgg9RcrHbJKV5NV5pJPjQM2K9dyiAx187nPS5+fXlhtY21syMHOfe1T+sDA0TVpxfnfRr3A47rr2gGi/sP4EL6sqSvibfFkT6UBbUj431jEUiO5Tsz5qIcm/G4Oj+f2c4+DjvvPOwf/9+9Pb24ve//z3WrFmDlpaWjBewfv16rFu3Tvnc6/WioaEh4++Xit6yS7IhY0BmTafiULkSt0NzG2hJgQPo1TfroyfyF8btsOEXt16oPO522nF5kp6I8eTqOVPwn3/VhO4Bf8zjtWWFmHeO9o38H74yH3+1fCbm1nlyvURdFjaU4w/3Lh3X255z7dq51fiPuy/D2VH+B5DIqkY7K2v43V0uF2bNmgUAWLRoEfbu3Ytf/OIX+OpXv4rh4WH09PTEZD86OztRU1Oj+f3cbjfc7vyky8X8jkDaU22jAYckSXDZbRgOhjIKPlLN+BCMnO8iZmBUFLtw/bxaw+sZyyRJwqXTKwx/XbHbMWYCD2HBOM9C5ZrNJmHJjPEdLBNR5rKe8xEKheD3+7Fo0SI4nU5s27ZNee7gwYNoa2tDU1NTtm9jCnGybVBjzkcwyYRTILvttmKIUrJ+D6Ek0kipt+wCRPsjiIiIxhtDmY/169dj5cqVaGxsRF9fHzZv3ow33ngDW7duRVlZGe68806sW7cOFRUV8Hg8+Na3voWmpqYxsdMFiA4P02o4Fdtp48/icDlsgD+zQWPqsouWUgOZDxF8eBh8EBHROGUo+Dh16hS+8Y1v4OTJkygrK8OCBQuwdetWfPGLXwQA/PznP4fNZsOqVavg9/tx3XXX4Ze//GVOFp4JZchYmvHq6hNwAdXhctmUXVJlPoyUXSLBRzmDDyIiGqcMBR9PPvlkyucLCgqwYcMGbNiwIatF5Uq68eqi5yNp5gPRw8KMSHWonBAdsZ4++PCy7EJEROOctc52SZP5GEkyXh3IrudD9HGk6vkoVjIf6c93Yc8HERGNd5YKPpzpMh/BxN0ugKrsklHPRzj4KE6xrUnp+dCR+RBbbcUociIiovHGUsGHXQk+Um+11Sq7ZNTzkeJEW0E8Z6ThlJkPIiIarywVfDjTll0Sh4wB5pRdzJrzoQQfRWPjHBMiIiKjLBV8KOPVNTIfQa2eD6XsYvwEzgF/+p4PI5kPMWSMmQ8iIhqvLBV8KGUXjd4NzZ4PM8oubu1gwUjPB3e7EBHReGep4EPM7zDc85HFnI9+PXM+Muj54JwPIiIarywVfNiVU23TDBnT2mqr8XWp6Jrz4dY35yMQDGFgOFz6YeaDiIjGK0sFH+m22qYcr45MT7XV0fMRCT78I6kPrxNZD4Dj1YmIaPyyVPAhhoxpne0STHKqLWDObpfiFJkP9XMDKUovIvgoLXAkBEhERETjhbWCD1vqU21HtE61zXC3i38kqAwmS1V2cdptKHCG3yNV3wd3uhAR0URgreDDnvpUW7ELJn7OhzvDzId690qq4CP8fDigSBV8iJ0unG5KRETjmbWCj0g5JZinCacikChy2dOWSUp17HjhdFMiIpoILBZ8pG44je52MedsFz07XYQSHbM+xLkuDD6IiGg8s1bwkabhVGzB1cp8+DMsu6Sa8SEo221TZj7Cz5UVcrQ6ERGNX9YKPpSGU62yi8Z49SzLLqV6Mh8F6TMfLLsQEdFEYK3gQ2k4TQwiQiEZIiYxa6ttv44TbQVlxLo/oPmaniGWXYiIaPyzWPChfaqteuS65nh1gz0fYmJpsSt98FGso+eDu12IiGgisFbwYdM+1VZditEcr57DzId4TeqeD5ZdiIho/LNo8JEYRKgfi898ZDvnQ1fPR+Q1qSaccsgYERFNBJYKPpypyi6qx+J7PpzZbrXV0/PBOR9ERGQRlgo+7CkzH+HgQ5LMHzImppemoudk2x4GH0RENAFYKvgQvRzJMh/RQ+USJ5EqDad5mPOhlfnwBYLK+7PhlIiIxjNLBR+inJKs4VRsv002Bl3JfGRYdjFjzocoudhtkq6JqURERGOVpYIPu1277KKMVrclXpJMyy59Bsarl6Y5WE40m3oKHJCk1OfEEBERjWWWCj5EYJFqzofdnnhjd2ea+fCFA4ZiHcFHsdse+ZrUmY/yIo5WJyKi8c1SwYc9xZwPZbR60p6PcGCQ8Xh1A3M++odHIMuJ6xPBh4fNpkRENM5ZKviINpwm2e0SFA2n5pVdlIZTA2UXWQYGh4MJz/NEWyIimigsFXykOtVW9HykajgdCckIaRxKFy8UkjEQCSL07HYpcNqU907W96GUXRh8EBHROGet4CPFqbZaJ9oC0eAD0N/3MTAcDSD0ZD4kSUo568PLGR9ERDRBWCv4SLHbJVp20Z7zAQB+naUXkb1w2iWlYTWdVLM+OGCMiIgmCkPBR3NzMy655BKUlpZiypQpuPnmm3Hw4MGY11x55ZWQJCnm45577jF10ZlKNecjOmQs8ZKoD5rT2/eh7vfQuzW2NMWsj16eaEtERBOEoeCjpaUFa9euxa5du/DKK68gEAjg2muvxcDAQMzr7rrrLpw8eVL5+NnPfmbqojOlHCyXpOcjkKLnQ5Ikw4PG+gyc6yJEMx+BhOe424WIiCYKQ6MyX3755ZjPn3rqKUyZMgWtra1Yvny58nhRURFqamrMWaGJRNklkCSACEZKMc4kPR8A4LbbMDwSyiDzoT9YKE7R8yGGjLHhlIiIxrusej56e3sBABUVFTGPP/3006iqqsK8efOwfv16DA4Oan4Pv98Pr9cb85EroqSStOE0qJ35AIxvt40eKmfXvb6SFCfbsuGUiIgmiowPCQmFQrj//vuxdOlSzJs3T3n8a1/7GqZOnYq6ujq8//77+M53voODBw/i2WefTfp9mpub8dBDD2W6DEOiDacyZFmO6cUYSdHzAWQQfBiY8SGIM2AGUmy1LWPPBxERjXMZBx9r167Fhx9+iB07dsQ8fvfddyu/nj9/Pmpra3HNNdfgyJEjmDlzZsL3Wb9+PdatW6d87vV60dDQkOmyUlKf2zISkmNKLErwoVF2ifZ8JA4ASyba86E/WFC22sYFH7IsK7tdygs5Xp2IiMa3jIKPe++9Fy+99BLefPNN1NfXp3ztkiVLAACHDx9OGny43W643e5MlmGY+tyWYEiGU1URET0fWmUXZ2S7rd6ttgMGDpUTtE62HRgOKqUill2IiGi8M9TzIcsy7r33Xjz33HN47bXXMH369LRfs3//fgBAbW1tRgs0k3qGR3zTaSDFnA8gOuvDaM+HnnNdBK05H6Lk4rLbUOC01GgWIiKagAxlPtauXYvNmzfjhRdeQGlpKTo6OgAAZWVlKCwsxJEjR7B582bccMMNqKysxPvvv48HHngAy5cvx4IFC3LyGzDCqRoWFt90qsz5sJvT89GXSc+HRuZDOdelyKl7ZggREdFYZSj42LhxI4DwIDG1TZs24fbbb4fL5cKrr76Kxx57DAMDA2hoaMCqVavw4IMPmrbgbKiTGvHnu0QbTlP3fCQ7FyaZ/kzKLpFtufE9H73c6UJERBOIoeAj2VHvag0NDWhpaclqQbkkSRKcdgmBoJwwYl2cdKvV8+E22HDa7wsHDEaGjBVHtuXGZz64zZaIiCYSyzUQ2DWmnIqyi1Or7JJhz0dGZRd/fNmFA8aIiGjisFzw4dQ432UkxXh1IPOej2JDwUc4uDg7OIyQan0suxAR0URiueBDGTQWTF52SdfzoXerrS8QLs8Uu/RPOJ1WWYwilx19vhF83NGnPM4BY0RENJFYLviwp8l8aA4Zsxs7WG4oEnwUOPUHHy6HDZfNqAQAbD90Wnm8h5kPIiKaQCwXfDjtqXs+zBqvPjQcDj4KDWQ+AGDZrCoAwI7DXcpjLLsQEdFEYrngI3q+S/IhY2b1fPgC4dcVGsh8AMDyc8PBx+6j3UrpRux2KWfZhYiIJgDrBR8aZRcxXj3t2S46go+RYEgpzxgNPmZOLkGNpwDDIyHs/awbQHS3CzMfREQ0EVgw+AgHF/Hj1dMNGXMb6PnwqQIUo2UXSZKwbHY4+7H9ULj0wrILERFNJJYLPkRZJX68+ohSdsm+50P0ewDR4WRGXKEZfPBEWyIiGv8sF3yIIWLxDaci8+E0oedD9GoUOu0ZncWyNNJ0+qeTXpzy+uD1MfNBREQTh+WCD9HTEV92ET0f9jRbbf06yi7RbbaZXd6qEjcuqPMAAF4+0AEx1Z7BBxERTQTWCz7SlF20ej6cGWY+MiX6Pl567yQAoMhlV7IvRERE45nl7mZit0tAa8iYVs+HgbNdRM9HgcFmU7UrZk0GAOyJ7Hhh1oOIiCYK6wUfGuPVg+kmnBppODUh87F42qSYZlUGH0RENFFYL/gQp9rGZT5ED4jWkDERCMT3iiRjRtmlwGnHpdMrlM8ZfBAR0URhveBDY7dLUNntkmarrYGGU6MzPuItnz1Z+TWDDyIimiisF3woDafJh4xpjle3hwMJfT0f4dcYOVQuGdF0CnC0OhERTRzWCz7sonwS33Bq3nh1M3o+AGBOTSmqStwAmPkgIqKJw3LBh9OW/GC56Fbb1GUXf5622gLhUetfODdceqn2FGT1vYiIiMYKx2gvIN/sGg2nwbRlFwM9H8Pm9HwAwHeuPw/zzvFg1aL6rL8XERHRWGC54EOr4TSQ5mC5TMou2fZ8AMAUTwHuWDo96+9DREQ0Vliv7KI55yN1z4d7FHo+iIiIJiLLBR9aZRe9PR96yi6+4ezOdiEiIprILHd3VE611Rivnq7nIxiSE86FiecbMa/ng4iIaKKxXPChTDjVGjKWZqstkL70opztwrILERFRAusGHwlDxlKPVzcUfLDng4iISJP1gg+tIWNpej4cNglSJC7xB4Mp32MoEA5OGHwQERElslzwYU8zXl1rt4skSdFZH2kyHz4T53wQERFNNJYLPqJbbZP3fGjN+QCgO/gwc84HERHRRGO54EOUVQJxO1YCwdQ9H4D+7bbs+SAiItJmveDDnrzsEt3ton1J9E45ZdmFiIhIm6Hgo7m5GZdccglKS0sxZcoU3HzzzTh48GDMa3w+H9auXYvKykqUlJRg1apV6OzsNHXR2VAyHwmn2qae8wFEg48AMx9EREQZMxR8tLS0YO3atdi1axdeeeUVBAIBXHvttRgYGFBe88ADD+DFF1/EM888g5aWFpw4cQK33HKL6QvPVDTzEb/bJTJeXUfPR6qTbQPBkBLIMPggIiJKZOhguZdffjnm86eeegpTpkxBa2srli9fjt7eXjz55JPYvHkzrr76agDApk2bcP7552PXrl247LLLzFt5hkRwoc5ehEIyRCziyLLsIrIeAFDgslxVi4iIKK2s7o69vb0AgIqKCgBAa2srAoEAVqxYobxmzpw5aGxsxM6dO5N+D7/fD6/XG/ORS8lOtVWPWtfVcJoi+BD9HpIUzZQQERFRVMZ3x1AohPvvvx9Lly7FvHnzAAAdHR1wuVwoLy+PeW11dTU6OjqSfp/m5maUlZUpHw0NDZkuSReHLbHsov61rq22KXo+fKoBY5Kk/b2IiIisKuPgY+3atfjwww+xZcuWrBawfv169Pb2Kh/t7e1Zfb90lLKLareLetS61pAxwFjZhf0eREREyRnq+RDuvfdevPTSS3jzzTdRX1+vPF5TU4Ph4WH09PTEZD86OztRU1OT9Hu53W643e5MlpERZ7KyS1Cd+dCOx9wGgg8OGCMiIkrOUOZDlmXce++9eO655/Daa69h+vTpMc8vWrQITqcT27ZtUx47ePAg2tra0NTUZM6Ks2RXDpZL3vORouqia8jYEGd8EBERpWQo87F27Vps3rwZL7zwAkpLS5U+jrKyMhQWFqKsrAx33nkn1q1bh4qKCng8HnzrW99CU1PTmNjpAkTLKiOqACI6YExK2aehZ7y6j2UXIiKilAwFHxs3bgQAXHnllTGPb9q0CbfffjsA4Oc//zlsNhtWrVoFv9+P6667Dr/85S9NWawZlLKLKtuhZ7Q6EM18pJrzwZ4PIiKi1AwFH7Isp31NQUEBNmzYgA0bNmS8qFyKll0SMx+p+j0AnQ2nkbJLAcsuRERESVluEIXTpj3nI9VOFwBw2cMBRcqeDyXzYblLS0REpIvl7pDJG07Tj1YHdA4ZY9mFiIgoJcsFH84kDaciC5K25yPytXrKLtztQkRElJzlgo9k49VN7fngnA8iIqKUrBd8pCq7pOv50DPng8EHERFRStYLPuyJu130l13Y80FERJQt6wUfkdJKICgrW4eVIWNpyy7pd7uoD5YjIiKiRBYMPqLZDVF5CYR0Zj4454OIiChr1gs+VH0dYrJp0GjPByecEhERZcx6wYeqtCLKLaLnI+2cD7v+hlMGH0RERMlZL/hQZTdE0DGic6ut28iQMZflLi0REZEulrtDqrMbgUi5ZSQXPR/MfBARESVlueBDkiQlyBBlF8M9Hyy7EBERZcxywQcQzX6IhtOA0Z4PXWUXBh9ERETJWDL4cMaNWA8qZRd949X9es52YeaDiIgoKUsGH/En24r/OnVvtQ0mfV6WZY5XJyIiSsOSwYczbsS6OOFW93h1jZ6P4WBIGVzG4IOIiCg5SwYfSuYjruwiyjFa0m219Q1HH2fZhYiIKDlLBh9inkd82UXvVtuQHM2WqPki5Ri7TUpbwiEiIrIqSwYfStklGFt2SbfbRZ0ZSVZ6UTebShKDDyIiomQsGXxoNZzqnfMBJC+9sNmUiIgoPUsGH1pbbdONV3fYJIiERqrgg6PViYiItFnyLikyHGK8uhgylq7nQ5KklDtefJzxQURElJYlgw8xTCwYNDZeHUh9vgtHqxMREaVnyeDDaYub8xHSN14dUG23TdZwyp4PIiKitCwZfDjscQ2nQX3j1YHU57sou114rgsREZEmawYfttiGU2W8uo7MR6qyi49lFyIiorSsGXzYY0+1FT0fdpN6Plh2ISIi0mbN4COS4QjGlV309HwoJ9smHTIWfozBBxERkTaLBh/h33YgfshYtj0fLLsQERGlZc3gI268elDnhFNAZ88Hh4wRERFpMnyXfPPNN3HjjTeirq4OkiTh+eefj3n+9ttvhyRJMR/XX3+9Wes1RXzZRfR+pBsyBgAuRzirwYZTIiKizBgOPgYGBrBw4UJs2LBB8zXXX389Tp48qXz87ne/y2qRZnNESieBuPHqTiNlF875ICIiyojD6BesXLkSK1euTPkat9uNmpqajBeVawmn2ob0jVcHVEPGOOeDiIgoIzlpTnjjjTcwZcoUnHfeefjmN7+JM2fOaL7W7/fD6/XGfORa4qm2HK9ORESUL6YHH9dffz1+85vfYNu2bfjHf/xHtLS0YOXKlQgGg0lf39zcjLKyMuWjoaHB7CUlUIaMifHqQf27XUTWJOnBcgw+iIiI0jJcdknn1ltvVX49f/58LFiwADNnzsQbb7yBa665JuH169evx7p165TPvV5vzgMQR1zmI2ig7KLM+Ug1ZIxlFyIiIk053xM6Y8YMVFVV4fDhw0mfd7vd8Hg8MR+5JhpORcYjYOBgOZdde7eL0vPBzAcREZGmnAcfx48fx5kzZ1BbW5vrt9ItvuE0aFLPhy8QfozBBxERkTbDZZf+/v6YLMbRo0exf/9+VFRUoKKiAg899BBWrVqFmpoaHDlyBN/+9rcxa9YsXHfddaYuPBsJDacGej5E8BHgVlsiIqKMGA4+9u3bh6uuukr5XPRrrFmzBhs3bsT777+Pf/u3f0NPTw/q6upw7bXX4ic/+Qncbrd5q86SM67sYvpWWwYfREREmgwHH1deeSVkWdZ8fuvWrVktKB9Eb0cgFDte3amn7KIxZEyWZVXDKcerExERabHkXdIef6ptyMh49eSZD/XuF2Y+iIiItFky+Egou2TQ8xG/1VbM+ADY80FERJSKJYOPaMNp7Hh1XbtdNMououTitEtKcENERESJLHmXjG61jR0ypmvOh1J2iZ3YKppNmfUgIiJKzZLBhyiviOFiYttsNj0fPNeFiIhIH2sGH3bRcBq/20V/z0d82UU514Wj1YmIiFKyZvAhMh+ZzPmwa2Q+hjndlIiISA9rBh9x49XFf431fCQvu7Dng4iIKDVrBh+qOR+hkIxI4kM5cC4V9nwQERFlx5rBhz1adgmqprUaajiN7/lQdrtY8pISERHpZsk7pTrzIbbbqh9PRTSlxg8ZG2LDKRERkS6WDj4CoZAyaAwwOGSMPR9EREQZsWbwoRqvLrbZAvrGq7tVZRf1AXs80ZaIiEgfawYfqrJLQFV20VF1UXo+ZBkxgYtvhMEHERGRHtYMPiLllUAwpBowJkGS9DecArFNp6LhlD0fREREqVky+FBOtQ3JSs+Hnp0uQLTnA4jt+2DPBxERkT6WDD6UU22DIWW3i55+DyDcLyLilNjggxNOiYiI9LBk8OG0qTMfkeBDx04XQZRe1Ntth1h2ISIi0sWSwYddGa8e3e2iZ8aHUOxyAADODAwrj/k44ZSIiEgXSwYfTlF2CYUQCBrr+QCAixrLAQA7j5xRHmPPBxERkT6WDD7EnI+QDCX40NvzAQBXzJ4MANh+6LTyGMsuRERE+lgy+FBnOXyRRlEjPR/LZlcBAPZ9dlYJOkTZpcBhyUtKRESkmyXvlE5VoCGGgxkpu8yoKkZdWQGGgyHs+awbAM92ISIi0suSwYe6xOKPBA1OA2UXSZKipZdPwqWXITacEhER6WLR4COx7GIk8wFESy87DncBiPZ8sOGUiIgoNUsGHzabpAwKE70aRno+AGDprCpIEvBxRx86vT5l5gfLLkRERKlZMvgAoqUXJfgwmPmoKHZhXl0ZAODVP3Uqj7PsQkRElJp1g49IpsM3YnyrrSBKL698FA0+WHYhIiJKzbrBRyTTITIfRns+AOCKSPDx9uHwsDGXw5bR9yEiIrIS6wYf9tjzWYz2fADAoqmTUOi0YzjIQ+WIiIj0sm7wEZf5MNrzAQBuhx1LZlQonzP4ICIiSs9w8PHmm2/ixhtvRF1dHSRJwvPPPx/zvCzL+MEPfoDa2loUFhZixYoVOHTokFnrNY3THttwas+g5wMAls2qUn7NnS5ERETpGb7jDgwMYOHChdiwYUPS53/2s5/h8ccfxxNPPIHdu3ejuLgY1113HXw+X9aLNZPozfBH5nw4Myi7ANFzXgA2mxIREenhMPoFK1euxMqVK5M+J8syHnvsMTz44IO46aabAAC/+c1vUF1djeeffx633nprdqs1UXS3S+YNpwBwbnUJppS6carPjwKnZatYREREupl6tzx69Cg6OjqwYsUK5bGysjIsWbIEO3fuTPo1fr8fXq835iMfxDh1MZk0k54PIDxqXWy5Zc8HERFReqYGHx0dHQCA6urqmMerq6uV5+I1NzejrKxM+WhoaDBzSZqUsouy2yXzS3HdBTUAgPpJhdkvjIiIaIIb9TrB+vXr0dvbq3y0t7fn5X1Fj0c2u12Ea+dW4/9+83L86MsXmLI2IiKiicxwz0cqNTXhDEBnZydqa2uVxzs7O3HhhRcm/Rq32w23223mMnSxK1ttMztYTk2SJCyaOsmUdREREU10pmY+pk+fjpqaGmzbtk15zOv1Yvfu3WhqajLzrbImyiyi4dSZRdmFiIiI9DOc+ejv78fhw4eVz48ePYr9+/ejoqICjY2NuP/++/HTn/4Us2fPxvTp0/H9738fdXV1uPnmm81cd9ZE2cVvQuaDiIiI9DMcfOzbtw9XXXWV8vm6desAAGvWrMFTTz2Fb3/72xgYGMDdd9+Nnp4eLFu2DC+//DIKCgrMW7UJ7FmeaktERESZMRx8XHnllZBlWfN5SZLw4x//GD/+8Y+zWliuOePHq2c4ZIyIiIiMsWyjQ3TImCi7WPZSEBER5ZVl77iOSLARDMmRz5n5ICIiygfrBh9xZRaWXYiIiPLDssFH/O4WZj6IiIjyw7LBhzOux4M9H0RERPlh2TtufJnFybILERFRXlg3+Igrs3DIGBERUX5YN/iIG6fOng8iIqL8sHDwEd9watlLQURElFeWvePGZzq41ZaIiCg/LBx8xO92YfBBRESUDxYOPlh2ISIiGg2WveOy4ZSIiGh0WDb4iJ/rYWfPBxERUV5YNviI7/GIn3hKREREuWHZO2582YUNp0RERPlh2eDDGZ/5YNmFiIgoLywbfMRnOpj5ICIiyg/LBh/OhN0ulr0UREREeWXZO258poMTTomIiPLDssFHfI8H53wQERHlh2WDD45XJyIiGh2WDT7ih4rF94AQERFRblj2jhs/VIyZDyIiovywbPAR32DKng8iIqL8sG7wkbDbxbKXgoiIKK8se8fleHUiIqLRYd3gIz7zweCDiIgoL6wbfMT3fHDIGBERUV5YN/iwcbw6ERHRaLDsHTe+zMKqCxERUX6YHnz86Ec/giRJMR9z5swx+22ypi6zOO3hdRIREVHuOXLxTS+44AK8+uqr0Tdx5ORtsqKeaMqdLkRERPmTk6jA4XCgpqYmF9/aNOqAg/0eRERE+ZOTu+6hQ4dQV1eHGTNmYPXq1Whra9N8rd/vh9frjfnIB/V4de50ISIiyh/Tg48lS5bgqaeewssvv4yNGzfi6NGjuOKKK9DX15f09c3NzSgrK1M+GhoazF5SUuqAgzM+iIiI8keSZVnO5Rv09PRg6tSpePTRR3HnnXcmPO/3++H3+5XPvV4vGhoa0NvbC4/Hk7N1+QJBzPn+ywCAao8bu/9+Rc7ei4iIaKLzer0oKyvTdf/OeSdoeXk5zj33XBw+fDjp8263G263O9fLSKBuOGXPBxERUf7k/K7b39+PI0eOoLa2NtdvZYi60sKeDyIiovwxPfj427/9W7S0tOCzzz7D22+/ja985Suw2+247bbbzH6rrEiSBGck6OBWWyIiovwxvexy/Phx3HbbbThz5gwmT56MZcuWYdeuXZg8ebLZb5U1h82GQDAYs/OFiIiIcsv04GPLli1mf8ucEbtcmPkgIiLKH0v/yC96PdjzQURElD8WDz7Cv33O+SAiIsofawcfkaCDW22JiIjyx9J3XQd3uxAREeWdtYOPSMaDPR9ERET5Y/HgQ4r5LxEREeWetYOPSMOpnT0fREREeWPpu67IeDhZdiEiIsobawcfbDglIiLKO0sHH2KsOns+iIiI8sfSwYfIeIjeDyIiIso9S991lfHqzHwQERHljbWDDx4sR0RElHfWDj4i5RYnyy5ERER5Y+m7rpO7XYiIiPLO0sGHnbtdiIiI8s7SwYdT2e3C4IOIiChfLB18RIeMWfoyEBER5ZWl77rlRS4AQFmhc5RXQkREZB2O0V7AaLrrihmon1SImy86Z7SXQkREZBmWDj4ml7rxjaZpo70MIiIiS7F02YWIiIjyj8EHERER5RWDDyIiIsorBh9ERESUVww+iIiIKK8YfBAREVFeMfggIiKivGLwQURERHnF4IOIiIjyisEHERER5VXOgo8NGzZg2rRpKCgowJIlS7Bnz55cvRURERGNIzkJPv7jP/4D69atww9/+EO88847WLhwIa677jqcOnUqF29HRERE40hOgo9HH30Ud911F+644w7MnTsXTzzxBIqKivDrX/86F29HRERE44jpp9oODw+jtbUV69evVx6z2WxYsWIFdu7cmfB6v98Pv9+vfN7b2wsA8Hq9Zi+NiIiIckTct2VZTvta04OPrq4uBINBVFdXxzxeXV2Njz/+OOH1zc3NeOihhxIeb2hoMHtpRERElGN9fX0oKytL+RrTgw+j1q9fj3Xr1imfh0IhdHd3o7KyEpIkmfpeXq8XDQ0NaG9vh8fjMfV7Uyxe6/zhtc4fXuv84bXOH7OutSzL6OvrQ11dXdrXmh58VFVVwW63o7OzM+bxzs5O1NTUJLze7XbD7XbHPFZeXm72smJ4PB7+Zc4TXuv84bXOH17r/OG1zh8zrnW6jIdgesOpy+XCokWLsG3bNuWxUCiEbdu2oampyey3IyIionEmJ2WXdevWYc2aNVi8eDEuvfRSPPbYYxgYGMAdd9yRi7cjIiKicSQnwcdXv/pVnD59Gj/4wQ/Q0dGBCy+8EC+//HJCE2q+ud1u/PCHP0wo85D5eK3zh9c6f3it84fXOn9G41pLsp49MUREREQm4dkuRERElFcMPoiIiCivGHwQERFRXjH4ICIioryyTPCxYcMGTJs2DQUFBViyZAn27Nkz2ksa95qbm3HJJZegtLQUU6ZMwc0334yDBw/GvMbn82Ht2rWorKxESUkJVq1alTCAjox75JFHIEkS7r//fuUxXmvzfP755/j617+OyspKFBYWYv78+di3b5/yvCzL+MEPfoDa2loUFhZixYoVOHTo0CiueHwKBoP4/ve/j+nTp6OwsBAzZ87ET37yk5izQXitM/fmm2/ixhtvRF1dHSRJwvPPPx/zvJ5r293djdWrV8Pj8aC8vBx33nkn+vv7s1+cbAFbtmyRXS6X/Otf/1o+cOCAfNddd8nl5eVyZ2fnaC9tXLvuuuvkTZs2yR9++KG8f/9++YYbbpAbGxvl/v5+5TX33HOP3NDQIG/btk3et2+ffNlll8mXX375KK56/NuzZ488bdo0ecGCBfJ9992nPM5rbY7u7m556tSp8u233y7v3r1b/vTTT+WtW7fKhw8fVl7zyCOPyGVlZfLzzz8vv/fee/KXv/xlefr06fLQ0NAornz8efjhh+XKykr5pZdeko8ePSo/88wzcklJifyLX/xCeQ2vdeb++7//W/7e974nP/vsszIA+bnnnot5Xs+1vf766+WFCxfKu3btkrdv3y7PmjVLvu2227JemyWCj0svvVReu3at8nkwGJTr6urk5ubmUVzVxHPq1CkZgNzS0iLLsiz39PTITqdTfuaZZ5TX/OlPf5IByDt37hytZY5rfX198uzZs+VXXnlF/sIXvqAEH7zW5vnOd74jL1u2TPP5UCgk19TUyP/0T/+kPNbT0yO73W75d7/7XT6WOGF86Utfkv/yL/8y5rFbbrlFXr16tSzLvNZmig8+9Fzbjz76SAYg7927V3nNH//4R1mSJPnzzz/Paj0TvuwyPDyM1tZWrFixQnnMZrNhxYoV2Llz5yiubOLp7e0FAFRUVAAAWltbEQgEYq79nDlz0NjYyGufobVr1+JLX/pSzDUFeK3N9Ic//AGLFy/Gn//5n2PKlCm46KKL8Ktf/Up5/ujRo+jo6Ii51mVlZViyZAmvtUGXX345tm3bhk8++QQA8N5772HHjh1YuXIlAF7rXNJzbXfu3Iny8nIsXrxYec2KFStgs9mwe/furN5/1E+1zbWuri4Eg8GE6arV1dX4+OOPR2lVE08oFML999+PpUuXYt68eQCAjo4OuFyuhIMCq6ur0dHRMQqrHN+2bNmCd955B3v37k14jtfaPJ9++ik2btyIdevW4e///u+xd+9e/M3f/A1cLhfWrFmjXM9k/6bwWhvz3e9+F16vF3PmzIHdbkcwGMTDDz+M1atXAwCvdQ7pubYdHR2YMmVKzPMOhwMVFRVZX/8JH3xQfqxduxYffvghduzYMdpLmZDa29tx33334ZVXXkFBQcFoL2dCC4VCWLx4Mf7hH/4BAHDRRRfhww8/xBNPPIE1a9aM8uomlv/8z//E008/jc2bN+OCCy7A/v37cf/996Ouro7XeoKb8GWXqqoq2O32hK7/zs5O1NTUjNKqJpZ7770XL730El5//XXU19crj9fU1GB4eBg9PT0xr+e1N661tRWnTp3CxRdfDIfDAYfDgZaWFjz++ONwOByorq7mtTZJbW0t5s6dG/PY+eefj7a2NgBQrif/Tcne3/3d3+G73/0ubr31VsyfPx9/8Rd/gQceeADNzc0AeK1zSc+1rampwalTp2KeHxkZQXd3d9bXf8IHHy6XC4sWLcK2bduUx0KhELZt24ampqZRXNn4J8sy7r33Xjz33HN47bXXMH369JjnFy1aBKfTGXPtDx48iLa2Nl57g6655hp88MEH2L9/v/KxePFirF69Wvk1r7U5li5dmrBl/JNPPsHUqVMBANOnT0dNTU3MtfZ6vdi9ezevtUGDg4Ow2WJvQ3a7HaFQCACvdS7pubZNTU3o6elBa2ur8prXXnsNoVAIS5YsyW4BWbWrjhNbtmyR3W63/NRTT8kfffSRfPfdd8vl5eVyR0fHaC9tXPvmN78pl5WVyW+88YZ88uRJ5WNwcFB5zT333CM3NjbKr732mrxv3z65qalJbmpqGsVVTxzq3S6yzGttlj179sgOh0N++OGH5UOHDslPP/20XFRUJP/2t79VXvPII4/I5eXl8gsvvCC///778k033cTtnxlYs2aNfM455yhbbZ999lm5qqpK/va3v628htc6c319ffK7774rv/vuuzIA+dFHH5Xfffdd+dixY7Is67u2119/vXzRRRfJu3fvlnfs2CHPnj2bW22N+Od//me5sbFRdrlc8qWXXirv2rVrtJc07gFI+rFp0yblNUNDQ/Jf//Vfy5MmTZKLiorkr3zlK/LJkydHb9ETSHzwwWttnhdffFGeN2+e7Ha75Tlz5sj/+q//GvN8KBSSv//978vV1dWy2+2Wr7nmGvngwYOjtNrxy+v1yvfdd5/c2NgoFxQUyDNmzJC/973vyX6/X3kNr3XmXn/99aT/Rq9Zs0aWZX3X9syZM/Jtt90ml5SUyB6PR77jjjvkvr6+rNcmybJqlBwRERFRjk34ng8iIiIaWxh8EBERUV4x+CAiIqK8YvBBREREecXgg4iIiPKKwQcRERHlFYMPIiIiyisGH0RERJRXDD6IiIgorxh8EBERUV4x+CAiIqK8YvBBREREefX/A+/8197v1vmnAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB3QUlEQVR4nO29ebQcZ3Xu/VTPZ55k6Wg4GjzjQQZkWwgTMFhgy/kcBt8scJwguHxwncgJtm4CKAkkJHHkkLUYkmvMTT6wLysYB3KxCQ7Yy8hYjoMkW8LyjLBlGY1HsnR0Tp+p5/r+6H6r3qquqq7qrq6uVj+/tXrp9HC6364+6vepvZ+9t6KqqgpCCCGEkICItHoBhBBCCOksKD4IIYQQEigUH4QQQggJFIoPQgghhAQKxQchhBBCAoXigxBCCCGBQvFBCCGEkECh+CCEEEJIoMRavQAzpVIJR48eRV9fHxRFafVyCCGEEOICVVUxPT2NJUuWIBJxjm2ETnwcPXoUY2NjrV4GIYQQQurg0KFDWLZsmeNjQic++vr6AJQX39/f3+LVEEIIIcQN6XQaY2Nj2j7uROjEh0i19Pf3U3wQQgghbYYbywQNp4QQQggJFIoPQgghhAQKxQchhBBCAoXigxBCCCGBQvFBCCGEkECh+CCEEEJIoFB8EEIIISRQKD4IIYQQEigUH4QQQggJFIoPQgghhAQKxQchhBBCAoXigxBCCCGBQvFBOoLDp+dw9+P7MTWfb/VS2pqjk/P4//7zNWTyxVYvhRDSxoRuqi0hzeCfnngN397xayRiEXziHatavZy25SuP/grf33MYuWIJf3D1ua1eDiGkTWHkg3QEIuJxbHK+xStpb/a/MQMA2LH/VItXQghpZyg+SEeQL5YAABOzuRavpL05UhFve359WjumhBDiFYoP0hHkCuWN8hTFR93kCiWcmM4CAOZyRbxwZKrFKyKEtCsUH6QjyBVVAMDpOYqPejk2NQ9V1a8/dWCidYshhLQ1FB+kI8iLyMcMxUe9HD5t9MvsovgghNSJJ/Fx9913Y/Xq1ejv70d/fz/WrVuHn/zkJ9r9V199NRRFMVxuueUW3xdNiFfo+WicIxXxsaA3CQB4+vUJFEuq068QQoglnsTHsmXLcOedd2LPnj3YvXs33vOe9+D9738/XnzxRe0xn/zkJ3Hs2DHt8qUvfcn3RRPilVxFfMzni5jPsUdFPRyumE3fc+FZ6E3GMJ0p4OVj6RavihDSjngSHzfccAOuv/56nHfeeTj//PNxxx13oLe3Fzt37tQe093djdHRUe3S39/v+6IJ8YownALAqdlsC1fSvojIx4qRHly+cggAfR+EkPqo2/NRLBZx//33Y3Z2FuvWrdNu/853voMFCxbgkksuwZYtWzA3N+f4PNlsFul02nAhxG/kslCmXurj8Ony/+Wlg11Yu2oEALDrAPt9EEK847nD6fPPP49169Yhk8mgt7cXDzzwAC666CIAwO/8zu9gxYoVWLJkCZ577jl89rOfxb59+/CDH/zA9vm2bt2KL37xi/W/A0JckC/q3gSKj/oQPT6WDnVhbLgbQDnyoaoqFEVp5dIIIW2GZ/FxwQUXYO/evZiamsK//du/YePGjdi+fTsuuugifOpTn9Ied+mll2Lx4sW45pprsH//fpxzzjmWz7dlyxZs3rxZu55OpzE2NlbHWyHEHjntQvHhnWJJxfhUBkA58rGgN4mueBSn5/J45cQMzl/U1+IVEkLaCc9pl0QigXPPPRdr1qzB1q1bcdlll+FrX/ua5WPXrl0LAHj11Vdtny+ZTGrVM+JCiN8w7dIYx9MZFEoqYhEFi/pTSMQieOuKQQAsuSWEeKfhPh+lUgnZrLWBb+/evQCAxYsXN/oyhDRErigbTik+vCJSLosHU4hGyimWK1dWfB+v0fdBCPGGp7TLli1bsGHDBixfvhzT09O477778Pjjj+ORRx7B/v37cd999+H666/HyMgInnvuOdx+++145zvfidWrVzdr/YS4whD5YKMxz8hmU8Has4cB0PdBCPGOJ/Fx4sQJfPSjH8WxY8cwMDCA1atX45FHHsF73/teHDp0CD/96U/x1a9+FbOzsxgbG8ONN96IP//zP2/W2glxjbHUluLDK6LMdulgt3bbm8cGkYhGcGI6i1+fmsPKBT2tWh4hpM3wJD6++c1v2t43NjaG7du3N7wgQvymWFIhN+LkfBfviLTLsiE98pGKR/HmsUE89foEdh04RfFBCHENZ7uQMx7z6HcaTr0j5roslcQHAFy5qpx6oemUEOIFig8fOHx6Du//X0/iwWeOtHopbc2hiTls+Np/4so7fmq4/Le7f47ZbKHu582ZxMepmXB2OP3ZL0/ghn98Er86Pt3015qYzeGDX/8vfHvH664eL9IuywaN4kP4Pna91l7i4+nXJ3D91/4TO/Z7M8v+w7ZX8OH/vQOZvH2L/vt2HcQH7vovnAzp3xkhYYDiwwd+/uopPHt4CvftOtjqpbQ1T7zyBl4+lsaJ6azhsvvXp/Hs4cm6n1f2ewBAOlOoioaEgR/uPYLnj0zhpy8fb/pr/Xz/STxzcBLf3vHrmo9VVdXQYEzmrcuHEIsoODI5r5lS24H/eO4YXjqWxr/srP3+Zb71Xwew68AE9h6atH3Md586iL2HJvHkKycbXCUhZy4UHz4gzqzFFzSpj2y+fBzfdf5Z+I8/egf+44/egfMW9pbvK9QvFoTQiEYUVKpEcTqEqRfxHjMBDL4Tqacjp+ehqs6TaU/O5JAtlKAowOIBo/joScZwydIBAO0150WYjncdOFXz/QtmswVMzuUBOKfuxH00NhNiD8WHD4jNbTydQSGEZ9Ttgth8F/YlcfGSAVy8ZAADXfHyffkGxEehvLkkYxEMdScAhHNjEH9HcwGIj1OVcuP5fBGnKxuqHUJUL+orNxczs3aVXnLbLkxUhguenMnhtZOzrn5HPrlw+vsRgwsnOMCQEFsoPnxAbBrFkopjlRbUxDvZQnnTlTc48bO4rx5EZCoRi2C4pyw+whz5mHfwE/iFfOZeK12i9fgwpVwE7Wg6PSX1enHrV5GPk12vmLlcAZmKUKaxmRB7KD58QB5axtRL/YjNNxmLarclNfFRf+RDeD7i0QiGesIb+RDrnA8i7SKVGwszqR16jw9r8XH5ymEoCnDg5CxOpNtDfMvl1k+5nMwrHye7cm1ZcFB8EGIPxYcPyObFWl/kxB6RWknG9T9LIUT88HwkohGMVMRHGDcGEaEJJPIhnbnXEsx2ZlPBQFccbxotz2Rqh+iHqqqGz39XpUNrLQ67SLtQfBDiDooPHzCID0Y+6iZXLG+6SSntIoSIuWLFC+LziUcVLe0S6shH4GkXd5GPZTbiAzC2Wg8709mCFq2MRhQcm8rUPAaA8cTCzs8h/12F8W+MkLBA8eEDctqlncoNw4YW+bBMu/jj+dAjH+EzAwrxEYjh1Iv4mHROuwDtZToVUZ/uRBSXLStX6ux0MRxPPk6nbDwfckSJkQ9C7KH48AFGPvxBpFYsDacNVLvIno/hNki7ODWw8oNSSTV4Fpz+ZlVV1TZdp8jHFSvL4mPf8elQHlsZIbyGexJYe3Z5Mq8b0SQfJ7v3KN8+OZdn9RshNlB8+AA9H/4gohuGtIsvno9yZEo2nIZxgwwq8jGdKaAoDbs54hCtS88XMFPpLrvEIfIx0pvUerI8/Xq4ox+nJfHhtlInky/ijWk9WnZ6LmfpE5kwGVEn553LmAnpVCg+fED0kQCAo5MZlErumhYRI3q1iyw+Gk+7GA2nSQDhFh/NrnYRfSgS0fKxTWcKSGesN8nDk2VhMtKTQHfCeQ5lu7Ran5DEx+UrhhBRgIMTczg2ZX/iIEroxTHLF1VMW7T8N5fghvHvjJAwQPHhA/mSflaeK5Y406FOxOabjMuej6jhvnrIW/T5COOmIN5js9Mu4r2PDqQw2F1u4mYXsTtiM1DOiitXVVIYr3ublxI0ctqlLxXHxUtqd2gVXq7lI93oTpT/Jq16fZhNpnbeEEI6HYoPH5ANpwBwiKmXurCMfMQb7/ORLejVLiO9lSZjc/nQRaiyAXU4lTdfYSK1Ex+Ha/T4kBGm05eOpm0jKWFAmI2F+dhN6kXudeJUMWU2ModR5BISBig+fCBv2hhpOq0Pa89H4+JDL7XV26sXSyqmQpSPV1XVUGrrdt5IPYgNcUQWHzZ/s24qXQSL+lNYOdKNkgrsef20T6v1H118lVNwbip1xHFYNtTl2CtGTumUrzMKSogVFB8+UKikXZTK0DKaTutDVLRYV7s04PkQkY9YBIlYBH2psnchTH0YzNGzTAPVPbWQN8hlQ90AHMSHi0oXmbWV1MtOl11DW4FuOC2nnESlzqsnZmxTpnL6achBWIi/qXPP6jVcJ4QYofjwgVxl41gyIM4i2eujHqzbq/tX7ZKsmAW1+S42LbJbQc5UktnMRmOy+BBeDlvPh9bdtNvVc1/ZBv0+JkyRj6GeBC4c7QMAPG2z7sOT1WmXiVlj5CxfLGE6UzahnruoLD7COEOIkDBA8eED4sx65YLyF7SbbomkGqe0SyOG05yUdgF08REmM6D5/QUmPirpFLvmeNpQORdpF0AXH88fnsJcrroaJAycMqVGgNq+DzkCZNeoTgiNiAKcvaDH8FqEECMUHz4g0i4rR8pfOGFNuxw+PWfo7xA2xAacivtbaqs1GYuV82Je5rukM3mcmG7+sLQq8eFh457JFjxVWJ0ypF3sPR9zuQJOz5XP7t1UuwDA2HA3lg52oVBS8X/3HMbu1ye0S1BVYMWSioOn5mx9M7LnReAkPgrFEsYrA/OWDXVrEZOqypbK9aHuBBb0hrekm5Aw4Fy4T1wh0i6rKmc7RybnoaoqFGECCQE79p/CTf+8Ex97+0r85W9d3OrlWGKZdon7N1jOHPlwYwa88es/x/hUBk985t1arr8Z5M1pl5z79/uhr/8Xjk1msPNPr0FPsvZ/aa3ao1cXHydncsjki0hJZc6HJsqCpC8Zw0BX3PV6rlw1jAeeOYLP//BFw+19qRh2bnG3xkb48qP7cNfP9uMbv7sG110yargvky9q1UTDvdXi45fjaUzO5TDYrd83ns6gWFKRiEZwVm/SVrzKEaUwl3QTEgYY+fABkXZZPlxOu8zlipicC08lBVAedw4A+9+YafFK7LFsrx71r9pFPK/dmauZXKGEV07MYDpbcDX7oxHM789tykJVVbxaWeOJaXeRBdGfYrgniYGuOHoqfSvM0Y9nDpYrVt60uN/V8wo2vn0lLlrcj1ULerRLNKJgOlMIJCX5zMFJAMDeQ5NV94nPPB5V0CeJoIV9KZy9oAeqCuw2VeqISObiwRQiEcVWWGiRD0l8MO1CiDUUHz4g0i69qRjO6itvbGHzfRRLwQ0tq4dCsaSlhKz7fDSedklokY/yWXwtM6BsSG32qPh6PR/5ogqRSXMrWEQL8OHuBBRF0VIq5r9Z8Z5F51K3vHlsED/+9G/gZ398tXZZOVIW5qcCKD0V78PKx3JaSo2YI5Nah1ZTpY6514ldi/7TUjpHMzXPWrdhJ6TTofjwAXl2iN43IVwVL4XKDtXs1t31Ip/5W021bcxwqn8+gPvIh2xIbbr4qEq7uPucMpIoc9MZdS5X0Mp4RdrBrtGYqFgR5bONEFQaolRStTbpVj4WK7OpwK5SR+7xAdh7hk5ZpF0KJRXpTDiNt4S0EooPH5CnptqdRbYaEVVoZhVFI8jiIuH7YDmj58Ot4VSOfPxyPI2pJqbS6o18yILDTVRLCKpELKKlW7RyW0kwH5qYw5HJecQiCt66YtDVWpyQIwHN5MR0VjsZsDJ+y34XM0JkvXA0rQ3Tk59n6WA5eiNE21yuaDj+cufUVDyqHV/6PgiphuLDB0TaJR5VsKxGx8hW0S6Rj3hUQTSih8O1apcGmm5Vez7ciQ85MqKqzZ3WWq/4kI+Lm89WrvQQaQet0Zi0WYuz/0uXDdQcKOcGt9GmRpEF1InpbFW67pTkdzGzZLALy4a6UCyp2PNr3feh9zop/9/uS8YQj5aPnfx+zN1NhUhhl1NCqqH48AE57bKsRtOmVhH2yIfYJIQvQyCX2tabO9c9H+UNQzYDOj3nhKk01OwF8JNc0fi5uE67SJ+nm8/WvEECkHp9VIsPkYpoFC/lzY1gjjgemzSWSVuV2cqI6MdT0mdt7nWiKJLpVErNacKmUmarCa4Q9ZMhJCxQfPhAvg3SLuLsP+yRD3miLaCnXUqqHr3xSlXapXJGmiuUMOtwPMRGJSa/NrNrZ3WfD7fiw1vkw8rzsNSi14cQWm/zwe8hv16zIx/m/3fm6yKVNtRtJz4qptPXyp91qaTi6KTo8aH3OhG/LxtoNWFXuW+48ncTpk66hIQFig8fyEtpF5EXDlvaRUQ+csUSCsX6UxjNQqQP5EoXQK92Aer3fZgNp13xqPY6Th4EsVG+902LAFR7AfykqtTWreej4C3ycdpCfIhU4fF0BvliCcfTGbx+ag4RBVizcsjVOmohBJ/VGHo/Mf+/Mxu/9eiEjfioVLw8e3gSmXwRJ2eyyBVLiCjA6EBKe5w+HVl/P+JnLe0SUKqJkHaE4sMHDNUulbOjqfl80zaqepCjBmFMvYi0g1l8yGmYeite8qb+IYqiaGF3p41BnMlevKTf0gvgJ/VHPjwaTi3Ex4LeJBKxCEoqMD6V0Sp7LlrSj/6U++ZiTohIQbPTLiLdKbrkmtOftdIuy4e7sag/iXxRxS8OntZmuoz2pzTxClSnVEolVesGK4RJUIKLkHaE4qNBSiVViyrEoxH0JmNamD5Mvo9iyMWHHvkwpl0iEUVqNFbfus2zXQB3ZkAtjN6btPQC+Im51NZN2Wz5cSXpZzeeD70iQxCJKJqf4dDpOe09XrnSn5QLIJl8m5yCEJGPNSvKEZvDk9biw6rUFigLU/2zntDSNstMg/XMHpap+bz2f0wILXY5JcQeio8GESkXAJoDvtawrlZQkEa2Zzy07g4Kq+6mgkSDFS96tYteRePGDCifJZu9AH5jjny4bQZnMJx6qHYxV3vIvT7Ee/TLbApIaYomNt1SVVX7PyeEk9nzcapG5AOQ5ry8NqGX2Zpm25iFhXjevlSsqqqKaRdCqqH4aJC8tKmLM+ulISy3LUoiaS4fnnSQwGqirUCveKnT81Gojny4qb6Qz5LNXgC/8aXPh4vfsWuyJf5mXzgyhVdOlFvw+yk+DE235pvz9zcxm9MiQVdUvCpy9DFfLGFqvpwacZrT87bKZ/2Lg6fxemUsgXmqr1lYWEVUhgNKNRHSjlB8NEi+IEc+KuIjhOW2eTntEsKKF73axUl81LduLfIhiY9aHoRyDl8/SzZ7AfzGPNHXfYdTKe3i4nesDKeA/jf7o+eOAQDOX9Rrm5qoh2Qsit7KLJVmtVgXYn9hXxJnn9ULoDwUThisxbwlRbGvdgGAc87qxUhPAtlCCY++fByAfeTjtJP46KX4IMQOio8GERtbRIHWHEtLu4Qp8lFsE/Fh8nwAevltvYZTzfMhRVVGamwMk/N5bWbKUKUhl+wF8BuxRjE91n2TscYNp4BeRiqOhx8t1c0MVWbqNGszllMkC/uSiEcVFEsqjlcG7mml011xQyM7M4qiaFEf8TvLaqRdrIysQfU2IaQd8SQ+7r77bqxevRr9/f3o7+/HunXr8JOf/ES7P5PJYNOmTRgZGUFvby9uvPFGHD9+3PdFhwkRUYhJZ9XCnBamXh9hr3bRxYf/aZd8ofzeE4ZqBeeNYULK4YuI1pVN9H0I8THYpbfudoN8TGp9rrlCCdOVOSNmz4M5reBnykUgfCbN2ozlAXCRiILFA8YIpIi4uInomN+/+fiYq6UmLJ5b/DyfL4ZS8BPSSjyJj2XLluHOO+/Enj17sHv3brznPe/B+9//frz44osAgNtvvx0/+tGP8P3vfx/bt2/H0aNH8aEPfagpCw8LWhmnQXyEL+1i8HyE8ItQnME7Gk4bTLvELcSHnRnQ6kxW9gI0MujOCvF8IvLhvtrFveFUpJGiEUV7HYE5rbC2CeKj2ZEAcxt0s/Fb/0yrW6ubMUd+lth4Pqbm88gXS1JESX/u3mRM+14IYpovIe2EJ/Fxww034Prrr8d5552H888/H3fccQd6e3uxc+dOTE1N4Zvf/Ca+/OUv4z3veQ/WrFmDe+65Bz//+c+xc+fOZq2/5egbmx7GFV96J2eyTTEn1sMZEfmos9olZ5rtAtTeCK3OZGUvwHOHJ+tai+0ahfiolGk3o726qOwZ6o4jYko7jPantFTEqgU9WNifqvr9Rml29Ye5LNZ8ElCrzFbmgtE+9KfKHpUFvUmkTJ13B7sTqIzGwem5nKVYNbRhZ+qFEAN1ez6KxSLuv/9+zM7OYt26ddizZw/y+TzWr1+vPebCCy/E8uXLsWPHDtvnyWazSKfThks7Iapd5LTLYHcc3ZWJlkdD4vuQ+3zYCaJSScW3njyA5w9PBbUsDUfPR4OTbfVqF7nU1nlTsDqTlb0Au3z2fZgjH3M5dxUhhvbqNcSH0+Ybi0YwWhEczYh6ALUF3+7XJ/DtHa/XXYorIh+iY6u5bbx4XadKF0E0on/WZr+HuH+wS/ew2D33kM17/vWpWdz9+H5MZ5o3KZmQMONZfDz//PPo7e1FMpnELbfcggceeAAXXXQRxsfHkUgkMDg4aHj8okWLMD4+bvt8W7duxcDAgHYZGxvz/CZaiVUlhaIoWFxpxTyezlj+XtDIJcF2aZenX5/AXz30Ev7yRy8GtSyNnIvIR90dTi0+IxF6n8kWLMWY6Epp9kaI5lUvHPFXoJkNpxmXUR4vaZeJGnNNzltUrhB5+7kLXL22V2oJvj/5t+fwhR++iGfrFL9HxAA4U9rFLD6cenzIXFU5Ducu7LW8X34/ds9tJ7j+9scv4+8e/iX+/dmjrtZCyJmG51nZF1xwAfbu3YupqSn827/9GzZu3Ijt27fXvYAtW7Zg8+bN2vV0Ot1WAsQq7QLoZ+vypt9KZM+H3SYlzvYnWzAIS+vzYVVqG6/f81EsqVrViuz56O+KoScRxWyuiCOT8zjnLOMGo23Ups1kQWViqTBu+oUQVuJsWszgiUWdzw8yHgynYkrviM1cky/+1sV4+vXTuGH1Ytfr9oJdFAAACsUSDk6UxcPrJ2fx5rFBT8+dzuSRrnwmS02Rj8Oa4dR92gUAfvdtK9AVj+LdFy60vH+kJ4n9b8waxIf5ua0EV6mkYmfFtPzGNL0gpDPxLD4SiQTOPfdcAMCaNWvw9NNP42tf+xo+/OEPI5fLYXJy0hD9OH78OEZHR22fL5lMIpmsbQALK1ZpF0AXI/Km30rceD7ELJp60xuN0Ky0S15qWy57PhRFwdKhLvzq+AyOnLYQHzZnsj2VXhV+z+0xez6A8ufUV0t8GEptnddUy/OwYqQHK0Z6XK23HpzSLuPpjJYarKc5n/B1DHbHtc9omTTksVRS9WiWjfgyE49G8JErl9veLwsLO2Fj5XPZd3xaa3bmt4glpF1ouM9HqVRCNpvFmjVrEI/HsW3bNu2+ffv24eDBg1i3bl2jLxNarCopAL3nRyE0kY/afT7mKhuq35UcbhBmUstqF222i/d1ZS2awAn0aojqzc5uo+6peHncejLcItIufamYZmR0YwyWxUcmX0KpZP/3ZuVjCRKntItcGVZPifoRzWyq+zNGB1KIKOW/55OzWU+GUzeIJmKHJua0/zNmYaMJLqmNv9wnJj1PzwfpTDxFPrZs2YINGzZg+fLlmJ6exn333YfHH38cjzzyCAYGBvCJT3wCmzdvxvDwMPr7+/GHf/iHWLduHd72trc1a/0tR/cTGNMusUh5oys6bAZB4ibyMVsRJa2JfDi0VxdplzqqdOTIhzk1phsSq2fw2I1eF2fVs1l/K4a02TbRKLrj5XSQm4oXcwVQtlBCV6I6egR49zz4zYg2Yr461SBHO+qKfEzqPT4EiVgEi/pTODaVwZHT8zU9L14R7dNFO/pkLIIuU1XMkEXkwyA+aDglHYon8XHixAl89KMfxbFjxzAwMIDVq1fjkUcewXvf+14AwFe+8hVEIhHceOONyGazuPbaa/H1r3+9KQsPCyLtYhf5yIdEfLiJfMxqaZfgS3Gd0y4V8VGsP+0SjypQFKP4ECWZVv1Y7NMu5fXN+h35kCpyuipeFFeRD9NnNZcr1BQfbqo9moEQcpl8CXO5AroT+tfPYUPkw/tARvE7SweN02eXDnbh2FQGh0/Pa63Q3aZdaiEiKK9WxMdIpROujPj7ET1WVFXFLmkyMtMupFPxJD6++c1vOt6fSqVw11134a677mpoUe2E2Nxi5shH2DwfxdrGxFnJ86GqatUXaTNxrnapeD7q6PNh1d1UYDcAUFVV+7SLFvlojvhIxCJaTwk3zeDMlTpOgqXVkY+eRBSJaAS5YgmnZnLoHta/fmQBeHRy3vPfn7nBmGDpUBd2//o0Xj6W1qJ/fqVdhIgRwskcJZNfSxz7107O4qSUgmHkg3QqnO3SIO3i+ZDTLnZ+BZF2UdXgq3Qcq10aaK+eK5afN24hauwGAM7mipoHw7xRibP1fFH11RsjN0ITPWLcDIozl+Q6pWr89jx4RW66ddpUUSULwEy+5LkRmZXnQ77+fKU0ujcZs4yu1UO1ubTaSyMEyqlKpZFozS+8Q4x8kE6F4qNBhLgwn1mHzfNhSLvYRBDks/lcHSmORtA9D/62V88VrNNigN6MajydMXhDhDkwFY8YUgOAvmkA/kY/5MhPVxMiH+Ypva3CrsupOfrkdTSBleejfL2chnmu0jvET+Flfi6r4yoESTpTQL5YwlOVlMu7LjirfDsNp6RDofhokJxd2kVEPkIiPuR12J1Rz0q312PubASRUknGHTwfDZTaWomaBb1JJKIRlFRgfEpvBicMkVYzQGLRiLYeP30fOclwKjwbXqpdRLd0u8iHeUpvq9CmCUuph1JJ1cTGwr7yMfdS8TKfK2qpDHPkQ0S3RGmrn+LD/Pdh9dwDXXHtszk9m9M6465/0yIAZVFSb0dXQtoZio8GsU27aJ6PcHyxyOuYy9ukXaQz+aArXpyrXcqbcT1pjrzFXBdBJKJUNaICaqcneptQ8SKnXUTkw434EJ/TYKXyYs7md8R76pem9LYCq3LbkzNZ5IolRBTg8pXlDrJWFUh2iKhHTyJaPTDPZiCcHwz1GF/L6rmjEUX7bJ49PIVjUxnEIgredX458lEsqaGctURIs6H4aBD7tEul2iXg9IUdBUOH09ppl6DFh9h8HQfL1eP5sJjrImNlOq3VCbO7CRUv+YLs+SiLm1qltqqqSuKj0pbd5nda7fcQWKVdDleO/Wh/Smty5iXtos10Geq2qGhqnvhIxqKaEHV6bnH7T144BgBYvWwAwz0JzReWnqfvg3QeFB8NYpd2EV8soYl8FOVSWzvDqSw+WpR2cepwWscZYs4mMiXQxIe02Z2usVH3JPyveMlKkY+Uy8iHLMZE7wo7n4jVlN5WIHpjnJbEhzj2S4e6bCuQnJB/30wqHsUCqQrFb7+LfDxriY9HXzoOAFh79ggURdGm5nK4HOlEKD4axC7tEjbPR97UZMwqzzyXlT0fLTKcWnU4bcjzYW84BeT5H3qYv1aUwO9GY6qqSp6PCLoS5bXWMpzKZtOhSuTDTrC0urupQJSjypEP2SxqlQarhUjRmFMsAvl2v8WX/Hx2wkbcLipbxLTcvlT5M2O5LelEKD4apGCzuYlZL6GJfEjrKKnWG7kc+Qi+2sXB8+GH4dTieQE9LO8l7eJ3rw+5rFlOu1hN25URZbbRiKJtZHapGrspvUGjz3fRu5welqbRLrOIRNXisEPkw3y73+JjxEPkAygbgy+vTEbu7yp/zky7kE6E4qNB7Kbahi3yUTCJCfPGViiWDD0jgox8yN4Fpz4fubpKbe2rXQBrz0etZlx+z3eRhV5STru4jHykYpGaFTKaoPKpu2e9iMjLhEXaZdlQtyYUprMFrUKlFlrapQWRjyGP4uPiJQOaUOxn5IN0MBQfDWLnKdA9H+EwnJojMOZNylwlEaTno1BSIbJAlp6PeP1TbXM24lAgNrtjkxltKJvbyMeMT2kXuYonEdWbjNVMu1Q+o1Q8WrNCRvT4GPZprkm9WBlO5bRLdyKmPcZt9EM3nFqLD9FGX359vxACNRpRNDFhRn7NtZWUC1AeIgiUy20J6TQoPhrENu0S4g6nQPXGZk4hBFntIr9Ws9Iudp6P0f4UohEFuWIJb1S6UIqUgN0MEN8jH5X3FYsoiEQUTUi4TbsYxEebVLtMZwrIVdr4mw2jXkynuUIJx9MZw++bkSMfVr1bGkG8n6HuBCIRa4ErH/MrJfGhRT7YaIx0IBQfDWKXdolWOpyGJe0iIh+KTTMqs3kyyMiHXMXi2OG0nqm2otTWxvMRi0Yw2p8CoHsPTs+WNwO76ad65MNf8SHep97h1Pn5hThJxqW0i434sJvSGzSDUtOtybkcJufyWnM7IRL0CqTavT7GpzIoqeVjt8BGWBg8Hz6/fyEsnLw0suC50hD5KIsPtlgnnQjFR4PYpV1iISq1VVVVE0GiL4E5PG+OfPg5t6QWcmt1q7PHxma7VLwkDo215AqLbKGoiQq7s2QhPub8SrtU5s9o4sNlh1Pd86FHPuyajAn/hF/j5OslElG0NZyazWnRjQW9Cc3r4qXi5aVj5bbpq0Z6bCMPqxb0YEFvEmef1WNoj+8Hly4bQEQBLhsbsH3M+aO9SMYiWHf2iNZwDJAMp/R8kA7E01RbUo19tYswnLbe8yELoL5kDNOZQnXkI9f6tItVyqV8e6XDadH7tN1apbZAecbLUyiH+UV6IhZRtM3BjNjAZnxKu2QLRgGr+zecPwM97aL7ROwiHyK0L3pLtJLhngROzeYwMZvTzvqXSr4MqwokO0S7cjmiYCYVj+Jnf/wuxCIR3yc1Xzjaj6f/bL2jqFvYl8LOLddoolXQz8gH6WBa/03U5tSqdglD5ENO/fSmYsCUVeTDlHYJsNol51DpIt8upu0mYu43EK3DqcPvyNNtRXpiqCdhu1F1a5EPn9MuFfGhCwnn58/KhtOEvU+kWFIxXVlrf5e1KTJIZNPpG9Nlf80yyZfhxfMhpsQ6iQ9AT3E0g5He2j4Sq3k6muGUng/SgTDt0iC2s12E5yMEhlND5MOmH4TZXxCo50Pr8WEdEpcjIl7XVctwChg3u1pltoD/s11ypshPymvaJR7VUhZWPhHZm9IXgsiHPlwua9mdVBaDTkzN5/HyeBqAsYqkXRBCkGkX0olQfDRIrka1S9giH2LzMW9sZvNkK9Iudo3AZBOq13WZzZxWyB4DUZLqFEYXkQm/ZrvkTI3QaqVQBJZpF4uIlTizTsYitgIvSMSxnZjL6w3GpMjHssFyCubUbM7RdLv79QmoatnTsbBiGm4nmHYhnQzFR4MUasx2yYdAfMgCSJy1m0ttzebJQA2neWfPh6IodbdY1zqcOnk+Kn4DOe3iVBXR63OHU7tql1riQ0u7xORS2+o1iTPrMKRcAGOXU6seHf1dMfRVjvFRh9TLU8LvsbL9oh4A0y6ks6H4aBC7zU2IkTA0GRMCKaLoQ9HM3oDWRj7sW6sL9C6nHiMfLgyniwfKZ83z+SL2vzEDwDntItqfz9YQB67XaPJ8yJUrVjN4BCLykZQ8H1apGnFmHYaUC6B7Piakahc57aIoiquKl50V8bH27PYUHwNMu5AOhuKjQew2t2iImoyJtEssoveDMIezW+n50D0P9ikBbbJtEzwfqXgUZ/WVTYPPHS6Xbjo14/I98mFKu4jPSLWZwSPQ+nzEIo7REr3SJRyRj+GKQfPQxDwm58prM7dGr2U6nc0W8MKR8mdVy2waVoQYzORLgUYaCQkDFB8NYpd2iUfCM1hOrCEaUaRmVMYvO9EqXGysQVa7OM11EWi9Pjyuy43nA9A3u19WDIyOkY+k3v685MPna5d2AZy7nModTkU0xinyEba0izjW/alYVTVKLdPpnl+fRrGkYulgl6F9ejvRK5XeTjP6QToMio8GsUu7REM0WE6PfCi2M0BE5EOc8Ycu7RJv1PPhXJ4rPAeiL4jT6Hl507Br6uWFnOlvKBaNaD87zXfRZ7vokY98UdXes0CE9cOSdhGGU3Gsl1qIByEG7dIuwu/RjlUuglg0ov0t0XRKOg2KjwbRmljF7DwfrRcfwncSiyq2PSRECmGoJeKjdnRCbMbNSLsA1XNBhnrsowTJWERrEe5Hrw+r6EyqIracym0NpbYJ/XfNvyNGtocl7WKemWM1EE4zAdukXXYdOAWgff0eAn24HCMfpLOg+GgQ+z4fiuH+VlLQ0i7SuHabJmMjmvgIcraLC89HZd3NMJwCxiZXgPMAMkVRfJ3vYiU+as1qAfTjloqVIyXiby5j+p1prdolXJEPgdnvATinXTL5Ip49JPweI01YYXDow+UY+SCdBcVHgwhxEYuEuMNpsTrtUjXVthIJERtDkJEPbf6Ki2oX730+jHNT7DBHPmpNfxVVQ7XG3rvB3GQMgKOHQyBHPhTF/rPVSm1DEvlIxCKGFJBV5EMIkuPTmSrBuffQJHLFEhb2JbFypD39HgIhCOn5IJ0GxUeDiLSLeXML01TbgmQ47bZpwy3SLsOVdEOgaZe8+1Jb72kXd5GPpYPGTWyo23mjFqZTXyIfFr6hlIteHxmpvTpgP5BOM5yGxPMBGA29VpGPBb0JJGMRqCpwbMoY/ZBbqvs9qyVohNGWaRfSaVB8NIhd2iWsng+71t0i7aJ5PnwwUrpFr3ZxUWrrsdpFM5zWmAcjRz4Gu+OI1RArerO25qRduhPWUQwZucMpAFszsW44DUfkAzBGlqyqVeReH+bUy1OvC79He6dcAF0Q0nBKOg2KjwaplXYJReSjKEU+aqRdxBlpLkCvSrZQfeZvpv60izvDaW8yhsFKtGPYxdh5IQ5mfJjvYmW4FULCudS2EjESkQ+baIlmOA2J5wMwig9zyku7XVS8SKbTXKGEPb8+DaC9K10EWuSDXU5Jh0Hx0SD2aZfwdDgV0Ze41GRMNiWqqqpXuwjPR6B9PprZ4bS2sBGIza6W3wOQIh++pl30yE+Xq8iH3l5d/h2z+JgOmecD0I9xVzxqm+JaZhH5eP7IFDL5Eoa64zj3rN7mL7TJCEGYZuSDdBjhORVqQ0olVd/YqwbLhWeqrez5kFt3C7KFEkSApiV9PrQ24W76fNRZalvDcAqUxceLR9OuxIcwhPpZ7RKXUkN2KRQZu7SLufdIWmuvHibxUa4mWjrUZevbEGLwhSNT2P162efx0HPHAJT9HpFIe/s9AKnahZ4P0mFQfDRAXopq2A2WC0XaRfJ8WJ0dyxvoYHcLSm21ahc37dU9ej4KlciUi8iH8B6Y+1BY0WMzoK8ezLNdADgOihNkTYbTbpuoltZePURplwWVY2xlNhWIz2PbL09g2y9PGO5r9xJbQR9LbUmHEp5vozYkL0U1qgbLhbDUVo58ZAsllEoqIhFFm2jbndAHlLUk8tGEUlu3TcYA4ENvXYoXjkzhxrcuq/nYnspx8mO+i1WprdOgOEFWaq8OwNJMnMmXNAEcpsjH+y4axaMvHcdH162wfczVF5yFtauGcWI6a7h9QW8Cv3XZkmYvMRD0tAsjH6SzoPhogIJkyrSrdimEyPMRiyhaugAob1I9yZgW+ehOxAzeClVVAyllzLroxZHQZrt4izS4ne0CAJcsHcD3blnn6nlF5GPWj2qXooXh1GYGj0zG5JWx6vMhNrXyRGP7yFLQLB/pxr/+D+djPdidqPmYdkekXVjtQjoNGk4bQGwaiqKnWQSxEA2Wkz0f8tm1OEMW5aK9yajh/qAqXrRSW8e0S53VLlrkw18R1ZMUkQ8f0y5yqa3m+bDelIolVYu8mdMucuRD724ab/ueGGciWnt1VruQDoPiowGcGliFyfMhm2IjEUWfG1I5Q5YjH/IGGFTqJWuRdjAjhInXahe7wX+NokU+mlztYtdkTC7BrerzIUVjpuaF2ZRBzjAiJg2zwynpNCg+GqDgsLFpfT5CVu0CVLfuFmH63mTM8F6CKrfVPA+uql3cr6lYUrUqHjeeDy+I9uq+pF0sB8s5l9oaxIe51DZfnXYJU5kt0RGicDpbQCkEJyqEBIWnb+StW7fiiiuuQF9fHxYuXIgPfOAD2Ldvn+ExV199NRRFMVxuueUWXxcdFrQGYxYhfT3y0XrPR8HUCM3sDdAiH8nyjJB6W5nXi97nw03axf2a5CiJG8+HF/TIR5PSLjUMpxmpQiZi+lxln4jeWp3iI4yIz0VVgRkfhCwh7YKnb+Tt27dj06ZN2LlzJx599FHk83m8733vw+zsrOFxn/zkJ3Hs2DHt8qUvfcnXRYeFXME+7RKm9urmyIc5pC8aZYkNtV5/Rb2ICIsrw6mHNeUcDMGN4mu1i0UErVaHU727qZVg0dckvARMu4STVDyq/W3TdEo6CU/fSA8//LDh+r333ouFCxdiz549eOc736nd3t3djdHRUX9WGGJEVMM67RKewXJ6tYvRGyA2sNmKCBEbajIeBTIFz/6KevHi+fCSCsobxIffhtMm9PmwqHaplXZJSfNwrIbRpSXDKQkn/akYTs7kkJ7PO/Y9IeRMoqHTwampKQDA8LBxxsJ3vvMdLFiwAJdccgm2bNmCubk52+fIZrNIp9OGS7vglHYRKQ5VRctzuXaRD7GxzZoiH0JMBWc49TDV1kMFTl6qdPG70qPHz6m2Vn0+anQ4NXc3BawFy3SGhtOw08/5LqQDqfsbqVQq4bbbbsNVV12FSy65RLv9d37nd7BixQosWbIEzz33HD772c9i3759+MEPfmD5PFu3bsUXv/jFepfRUpzSLlFJkBRKKhItbAUtT7UFqjc2TXxUTJSauTOgyba64dTNVFvvng+/K10AOfLR7D4f1u83a5rrAkgdTmXD6TwNp2Gnj5NtSQdSt/jYtGkTXnjhBTz55JOG2z/1qU9pP1966aVYvHgxrrnmGuzfvx/nnHNO1fNs2bIFmzdv1q6n02mMjY3Vu6xAEWkXS89HRDE8LtHCwqKC1GQMqC7J1NIumucjuC6nqqq6S7vEvQ+W8zLXxSuiYihfVJEtFB3NsrWwEkm1Daf2aReryAfTLuFFfDbscko6ibrEx6233oqHHnoITzzxBJYtc25FvXbtWgDAq6++aik+kskkkslkPctoOXmHBlbRiDHy0Ur09urlzc28selpl4rnI0DDqWwKdTSc1pEKcopMNYrcLXQu65P48FRqW512MZdQA/qGxrRLeGGXU9KJePpWVlUVt956Kx544AE89thjWLVqVc3f2bt3LwBg8eLFdS0wzDhWu0T024ot7vVhjnykTK27dcOpsdolCMOpLCbcRD68lNo2q8EYAMSiEW29jfg+VFW1TLsIIZErlCwrpqwMp1YVMky7hB92OSWdiKfToU2bNuG+++7DD3/4Q/T19WF8fBwAMDAwgK6uLuzfvx/33Xcfrr/+eoyMjOC5557D7bffjne+851YvXp1U95AK6kV+VCUsuG01ZEP4fnQmoxpo9fLm+acOfKhDZ9rvudDrl5xEgn1pIKsNnU/6U3GkC3kGqp4sSsH7pJERaYyg0cmow3jqxYflmkXRj5CC9MupBPx9K189913Y2pqCldffTUWL16sXf71X/8VAJBIJPDTn/4U73vf+3DhhRfif/7P/4kbb7wRP/rRj5qy+Fbj5PkAwjPZtsrzYRq9PtPCahex+SZjEceKlHpSQflCc+a6CLp9qHjJ2UR+5HSKlbjRKoQsql3m80WoavkzZ6lt+Omn4ZR0IJ5Oh8QXmh1jY2PYvn17QwtqJ/I1PAXRiIJ8UW15l1OR9olV1mn2E4h/u1tQ7SJewynlIt/vZdquPlSuOZEPkaZqpOLF0IVVWqeiKOiKRzGfL1o2GtM8H3LkoyI+VLUs0lLxKNLz7HAadvpSjHyQzoOzXRqg1sRUrdFYyDwfdobT3hZ0OM26KLM13+92XU6D//zAj+FyOan1fcRUju3UaEz3fFinauZzReSLJe0zpuE0vPR3Cc8HIx+kc6D4aIBCjTPrsEy2LZg8H12mTphiOJoQJfVOkK2HrMteHIaBdy7XZVVF4ide5rvsG5/GPz2xv8pH47RGp0ZjVqW20YiiPc98vmgI41N8hJe+JCfbks6D30gNUOvMOiyej6KN52M+X0ShWNJC+C2JfFjMKLGi3KW0nFJwK4qaWe0C6OW2btIuf/MfL+E/XzmJ5cM9uO4SffSAo/hwaDSWtSi1BcqCJVcoYa4S+RDrjDXpGJDG0Q2njHyQzoHfSA1QM+0SDcdkW63Ph0WH0znprLq7qs9H8z0fuuHUOe1Sz7TdWp9Po4jIx4yLyMdrb5SHL74xkzXcbjVUTmA1KE6QsehwKv9OJl/Uwvh99HuEGpF2YeSDdBIUHw1QqBn5KN8e2shHrqj5FeJRRRMAwUY+anc3FXgtt8033XDqLvJRKJYwns4AqN5gnCIf+qC46vdr1ecDMJbb6pUuDHCGGc1wOl+oaeon5EyB4qMBam1u4fF8GKfayoZT4VcQlS6A1OfDwwTZenHTWl2giSKX6wrK81Gr1Pb4dFYTgGZToRvPh5W4sepwChhTatNad1NGPsKMKLXNFUuBDXMkpNVQfDSAU5MxQI80tLraRYt8VNYpl9qaK10AqazVwwTZehEpFDcCIeEx7dJ0z4cYLlcj7XLk9Lz2s7mc0k3axbLUVuvzYR35mM8VpTJbRj7CTE8iBlHoxHJb0ilQfDRALcOpHvlo7dmM2IS1DqeVKEcmV6yqdAGC9XzokY/as1G8tn1veqlt5ZjN1Ei7HD49p/1sbiSVc4j8OFa72KVdJJ8IG4y1B5GIool/ltuSToHiowHEpm5XSRANa7VLvDrtIrfvTnhMbzSC22oXwLvnQ2zs8VizOpyKyIfzhmGIfMy793w49/kQTcaqq12Ask9EVE+wzDb8CIFI0ynpFCg+GkAP61tvbuKMOyyeDzHVVmxQhZKKybkcAH2uC1DfHJV6kdur10IfLudSfGifT/0TZ53oddnn48ikLj6qDKcO82caiXzM5QocKtdG6F1OGfkgnQHFRwO4Tbu0eqqtXbULAJycqYiPRLXnI8jBcl7SLq49H82OfFSO42yNtIssPsybi1OTtW6nPh8FYTi1L7XVhsox7RJ6+jnZlnQYFB8NUCvtEgtNtYtYZ3k98aiiCaOTlb4TctpFRBiC7HDqJvKRiHmrwmm24bTXZXv1w3WmXVIO4iNjMxMnZVFqy7RL+NHTLox8kM6A4qMBaqVdwmI4NUc+FEVBd2WTEuLDaDgNLu2iTWf1UGrrtgon12TDqTDuzlqIA0GppJrSLnalttWRH63U1iLtUivyMZ8vMu3SRgiByGoX0ilQfDRAzfbq0XAYTsU6hecD0M+qT1XSLr1WhtOQRT70Ph8uO5w2uc+Hm8jHydmsIYI0ny8arjt1YdVSKC4HywG6YJHTLox8hJ9+rdEYxQfpDCg+GqB2tUs4ptqaIx+AvknpkQ8Lz4fLTb4RtFLTGlNtgfB1OBXt6OdyRZRsBKaodDmrL6ndJptOnUptUzaGU1VVbQ2nVmkXej7CD9MupNOg+GiAWk3G4iEptdWrXfR1irNq3fPRqrRL86pdaqXFGkWOFlmlRgDdbLpiuFt7vLzB5BwNp5VSXlPkI19UIf6kqme7lH9nPicZTpl2CT39TLuQDoPiowFERMPO0BiW9upFYTiVxIc4Q56YFaW2VtUuIetwGq2vw2mzIh/JWETrTGnX60OYTZcOdVluMG5Kbc0dTjPS+zf3R+lKlK/P5fT26uxwGn6EQGTkg3QKFB8NkKtV7aJ5Plo81VZrr66vU0Q+hC5qWbWLl8FyHteVbbLnQ1GUmvNdRNpl6WCX1svBMvJh2WRMFxIyQowoSvVx64qX13NyJqt9tky7hJ8+ltqSDoPiowFqpV00z0fLIx/VaZcuk1egR6p2ERGGXLFk62XwC2/t1cPl+QD0/ihWXUgBPe2ydKhLmy4rbzBOjdCEkDB7PmTBpijGvz3Rw+XEdDmdVp5WzP/mYUcIRKZdSKfAb6UGqJV2CctgOfH6hrRLwiQ+DJEP/b5mD5dzMlya8TrVttmzXQDdK1Mr8rFsqFuvaLAwnDq1VzdXu9iZTQFdVIp0Wn8qXiVQSPhg2oV0Gh2TDH71xAw2fuspdCWi+Onmd/nynG5nuzQ78vHCkSls+cHz+NyGC3HVuQuq7hd9RgyG06rIR7XnAyhv9FabnFeeOzyJP33geWzZ8CbDGrU+H65mu9Q51bZJHU4BabKtRZdTVdV7fJTTLg6GUwvxIVJjc/kiVFXVRIQ+16X6c+k2iUqmXNoD8bcxns7gyjt+2uLVkE5gxUg3vn/L21v2+h0jPuJRBUcm56u+nBvBqUeDfHuzPR+PvnQczx+ZwkPPHbMUH1qprbTOrqrIh349FlEQUcp+kPJG3/gG9pMXxvHCkTT+757DJvHhfv6KVyNszsNz14sQbTMW812m5vNaRGTpYJceWp93F/kQZ8PFkop0poCByu8Lw6m5x0f5NuN7ZY+P9mB0IIUFvQmcnMlpKTNCmklvi78bOuabST9DLfdkiEQaPxsuuJzt0uzIhzjDz9ukSApWfT4c0i6KoiAZi2I+X/St4kVsuIelbp+A5PnwMNXWreG0ljj0AyHarKpdRKXLSE8CXYmo1MVSinyIwXoWf0NdiSiGexKYmM3hyOl5XXw4pF2qIh8ss20LUvEofvbHV+PgxFyrl0I6hFZ7wTpHfEhphbl80dCjoV5qGRpjFcNps/t8iHXYbcpFzfOhr7PKcGo6Hsl4xFfxIVIN8nh5QG9k1sw+H/Em/idzqnYRKZdlQ10A4NnzAZQjJhOzORyZnMdFS/oB6GkXq8Zs5s9VmFxJ+OlLxXHxkoFWL4OQQOgYw2kqXrsng1dqnVkHF/lQK/86Rz6smoxp102blteeGrUQG+54OoOCtE7tzL8pU22dDcF+YNcIDJDKbIX40NIu7jwfQFl8lJ9LPyO2GyoHVEe0+pKMfBBCwkfHiA9FUaT8vD/io3bkQ1S7NLlipGbaxTjVFjCeIXcnolVpKK9RhlqItEuxpGI8ndFu99Tnw2OprVMDL7/oraRdrOa7HJZ6fACQDKd65CNbY/KuEC7yZFyntEu5/Fa/zsgHISSMdIz4AIy+Dz/QSm1tNregIh9C3ORsSnqtIh8pg/io3qCSHsfX10Ku8JBTL948Hx5LbQvN7/OhT7a1SruUoxVCfOhpF/eRD5GykSfjahNtLX5HURSDsOyj54MQEkI6Snx01+jJ4IVSSbU0csqIEtzmez4qaReLiECppEKtvLzs+ZAFR2/S+gwa8K/Ph+xzEBtpqaRKTbZctFf3uKYgDKf6ZFuLtMuk3uMDsO5imavRXl5Lu0y6i3wAxpQaW6sTQsJIR4kPvRtl4+IjL5XP2hkaYwFFPnJa5KN6U5Zf29DhNGEtRAR+T7aVIx8ihSCv19NUW5drynsQNvXS7ZB2sfN8TFvMdrGLzojftYoWWZXalm+XxAf7fBBCQkhniQ8t8tH4hip3La01WK7Y5A6nBQfPhxx1iRs8H3Lko1p8JDz21HAiXywZUl1iI5Wf2+9ql0KxpM02aa7nwzrtMpst4PRcWWQsNVW7TGcLWtv6Wh1elw2WoyanZnOYrxxDL5EPpl0IIWGks8SHiHz4kHaRN3rbtEvA1S5WpbYFKUITtenz0W2ZdvFm7nTC3DJapBBE1UpEsT+GxjW5Fx/5oiy6AvB8mASteI99qZgmOkTaRVWBmYpYqeX56O+Koa8icISHpJb4kD0fTLsQQsJIZ4mPGhNIvSDC5Ypi3NRldMNpc6tdnJqMyREauz4f5h4fgPeyViemTcOyNPGhVbpEXc0f0QVR7TXJKZ0gZruY0y7yTBdBKh7VRIYQZHoXVus1KopSVfGit1dn2oUQ0p50mPiodKP0odpF7m5qt3EGF/lw5/mQNZIcmu+xaDkvPBhuu4k6IfpaCDF25PQ8SiXV88h78bh8Ua05bTdvEB9N7HBq0+fj8KSxzFbQbzKduikHNptOtT4frtIujHwQQsJHZ4kPLUTuX9ol7pAu0Kpdmuz50Ktdql+nKFXkyCIp5Try4YP4qEQ+Vgx3I6KUN9yTM1l9qJxL8SE/rlbFixxRaOZUV7tomh75MIsPfb6LqqraZ+coPkym04xmOLVJuyQY+SCEhJuOEh/dNubAenDTujvoyIdl2sVioi1gjnw4GE596PMh0i5DPQmM9qcAlCMDOQ89PoDqabtO5AMoswXkaJrxb+rwaWOPD0Fflz46XRZQbiIfetrFfrAcoJuJFQXotfhsCSGk1XSU+NC7UTaedsnXGCoHSNUuTfd8VAynDtUu5nW6j3w0fqxE2qU/FTOcxWsNxly0VgfKkSRxTGutK4i5LoB+7PJF1bAmkSJZWhX5EMPl8oaUllM5sPCNmNMuKZvjJsqoe5MxXwYoEkKI33j6Zt66dSuuuOIK9PX1YeHChfjABz6Affv2GR6TyWSwadMmjIyMoLe3FzfeeCOOHz/u66LrpTvwtEvAng/Lapfq7qbiujjb7mlytYtIu/R3xbWN9LBBfLj/M3SbDsoVaotDP5Bn4sxlq8uJ7dIu05mCa/FhTruIqE+tahdOtCWEhBVP38zbt2/Hpk2bsHPnTjz66KPI5/N43/veh9nZWe0xt99+O370ox/h+9//PrZv346jR4/iQx/6kO8Lrwe7ngz14ObMOlqpLik03fNRu9rFqpRVpF6s0i5ah1NfxEf5ePelYpJ5ck5rFualD0fCZUTGS+fURohFI1r6Q/g+soUiTkxnAVikXSTDqdyB1SlCIZ7j+HQGuUIJmUKNtEvl86TZlBASVjx9Oz388MOG6/feey8WLlyIPXv24J3vfCempqbwzW9+E/fddx/e8573AADuuecevOlNb8LOnTvxtre9zb+V14HYbINKu8S0tEuzm4yVn7+kll9LjnLYeT6A8hnyJPLWkY+4n2mXSuQjFbdJu/gf+ci7qCLxi55EDJl8Dk+/PoHj6YwmPFLxCIZ7EobHapNtpbRLLYG0oDeBZCyCbKGE8amMNNW2RuSDZlNCSEhp6NRoamoKADA8PAwA2LNnD/L5PNavX6895sILL8Ty5cuxY8cOS/GRzWaRzWa16+l0upElOaLP4fAv8uHUHCsWUJ8P2euRL5YQjeibUtFh/owW+bDqcBr1r9plWot8xA1lo149H/Jja4qPQjCGUwDoTcVwajaHzd971nD70sGuqkqbfm2ybaFmgzGB6PXx2huzOHx6Tu/zYRP5EGKSaRdCSFip+7SwVCrhtttuw1VXXYVLLrkEADA+Po5EIoHBwUHDYxctWoTx8XHL59m6dSsGBga0y9jYWL1Lqkm3j1Nt3ZxZC89H8wfL6RuxeVPWPB8Wm/DH3r4S7zr/LFy+YrjqPtFDwo9qF93zETM0zPJaais/tta6as1M8ZOPv30lzl7Qg1XS5ZyzevDJ3zi76rF9KT3y4aXPiVbxMqkfNzvPx7svWIi3nT2Mm65s3v8lQghphLojH5s2bcILL7yAJ598sqEFbNmyBZs3b9aup9PppgmQXh+n2rqrdql4PgJKu5TXZdyUtWqXSPU6f2/dSvzeupWWz9mMDqf9UuRjLlfE8XQ54uVmqJy2LpfpILdRBT/42FWr8LGrVrl6bH+X8HwUXDUYEyyT0lW1Ih9jw924/1PrXK2HEEJaQV3i49Zbb8VDDz2EJ554AsuWLdNuHx0dRS6Xw+TkpCH6cfz4cYyOjlo+VzKZRDKZrGcZnun2c6qth7RLsyMf5rSLjBAmdi3g7fC1ydi8bjhNxaNY0JvEyZksXntjBoA3U6jbdJAbcdgK9GoX954PwNjltJbngxBCwo6nb2ZVVXHrrbfigQcewGOPPYZVq4xne2vWrEE8Hse2bdu02/bt24eDBw9i3brWn4nZ9WSoBzdpF7HhW1Wh+In8/OYup06GUyfExuZPtYteagvopaOvvVGuknLbZMzLuvIBVbt4RU+7yJ6P2iJCHLNDE3Oa8LJLuxBCSNjxFPnYtGkT7rvvPvzwhz9EX1+f5uMYGBhAV1cXBgYG8IlPfAKbN2/G8PAw+vv78Yd/+IdYt25dyytdAOMMk7lssaEzx7BUuxRLKlTp6XNFo6gSKZ+YR+Oln5EPYTgVZ/3LBrvw7KFJHDg5a3gtV+uKu1tXLqAOp14RaRdj5KP2GkV/FHHMAPu0CyGEhB1P4uPuu+8GAFx99dWG2++55x587GMfAwB85StfQSQSwY033ohsNotrr70WX//6131ZbKPEohGtZHEmW8CQqQzSC67SLtHmez7MUZWcKfJR1NIu3jYqvzwfqqpKno/yn5vwL8zXkT5wu64gPR9e0CIfHj0fIu0iyngBRj4IIe2LJ/GhqrU30VQqhbvuugt33XVX3YtqJj3JGLKFXMMVL1opp4vZLs2MfJhbqld5PhxKbZ1wG2GoxWyuCPH2zWkX7bU8Vbu4q8LJB1jt4gUhwHLFkibK3IiPRf0pxCKKoWNt2N4bIYS4peO+vUQPhEa7nIpNwMlTEA1gsFy+4Cw+nPp8OOF2k6+FaDAWjyqayDB3/ayvw2l7ej56EjGIj+LkTA6AuzVGIwpGB1La9VTIIjqEEOKFjvsG6/FpvksuJNUuZmFjNmIKw2m9no9ao+trkZbKbEXDrcYiH+7avoe12iUSUbRmdydnyikUt+JLnhPDlAshpJ0J1zdzAPRoXU4bTbtUNrcWV7uYN2GzWCjU7fkQkY/GjpNmNpVafZsjH576fGgdTp3X5aWBV9CIY3FKRD5cel6WDnZrP1N8EELamfB9MzcZfb5Lo2mX2mH9WGXDb2bkwyxs8qYhdvWmXdymN2oh0i7ykLO+VBwDkhhpRrVLWD0fgG461SIfLtcoR4y8lCcTQkjY6LhvsN6kP43G3JRyilRHMz0f5ue2M5zW22SsUFJRaCByI6ddZOToR32D5ZwjH7ohOFyltoBuOtUjHy7TLoZjxsgHIaR96TjxIbqczviUdom1uM9HVdqlYDac1vamWCGfWTfi+9DTLsbCKsNZfD2GU5fVLmEznAJ62kVEPty+/6UGz0f43hchhLil477BxHyXRiMfbsL6UUl8uClTbmQdgirPR6m2SLJC3rQb6XKqpV2SxsjHsqH6zuK1Dqc1BFEuxOJDpKAm5jxGPmTxwcgHIaSNCd83c5MRk20bHS6nez6cql30w9us6EettEu9no9YNKL9TiO+D9vIR6Npl1pTbV0YgluFSEEJPepWIC0e6EKlYIiRD0JIW9Nx32Ca56PBtEvORdpFHmPfLN9HVZ+Pqr4f9Xk+APcpDieE56Mv5RD58DTbxaXnI8SG0/6UUYi5jXwkYhEs7CsPYWS1CyGknQnfN3OTEdUuMwGkXeRoQ7PEhzn9YL5er+cD8KfFuphoa95w5bJRT2mXuCi1dev5CKHhtMsoxLyUA4uIEcUHIaSd6Tjx0aNFPoJIu+j3FYtNSrsUzWkX81Tb+iMfek8N543+yOQ8vv74q5iay1fdZ55oK1hWp+HU7cC7sM52Aaorf7z4UsSAOaZdCCHtTMd9g+kdTgNIuxgiH81pNFY9WM4fzwfgvqfG/3rsVXzp4X347tMHq+5LVzwf5rTLYHccw5XBfgMmYeKEOOOv1aclF+K0S1+daRcAOPusHgDQjh0hhLQjngbLnQn4NdvFTdpFURREIwqKJbVphlPXg+Xq2ITdpl0OnJwBALwujXsXmCfaChRFwTd+dw3emM5iYX+q6vfsGK089thUxvFxofZ8mNMuHtb48bevwlB3Av/P6sV+L4sQQgKjA8WHP7NdRCTDqckYAE18NMvzYU67+Bn5cNvl9MjkvOFfGc3zYRHduHLVsOc1iV4XU/N5TGfyVREVQVhnuwAWaRcPkY+B7jg2vn2lzysihJBgCd83c5PR0i45n2a71Njcmt1orLq9uvX1hjwfDtUuxZKKY5PlKMSR0xbiI1PdXr0RepMxDHaXN28rsSPQPR/hM5w2knYhhJAzgY771tPSLj5Nta0lPpo9XK66yZg/s10Ad2mX4+mMFtU5MjlvaKaWyRc1EWAV+agXUfFhJXYEerVL+KpCGkm7EELImUDHfetp1S65IkoNRCPcpl2EOGle5MPtbJf6PR9OHU7l6EO2UMIblZbhgN5gTFGA3oR/GT5NfDhFPlzM3mkVjHwQQjqdjvvW65E2wbkGxsW7TbuIyEfTmozVqnYpCsNpc0ptzdEH+bowm/YmY4jUEXmxQ/g+DruIfISxw2k8GkGX1KeD4oMQ0ml03LdeKh6B2Acb6fXhtpqi1Z6PRvp8uDGcmqMP8nVRZms2WDaKm7SL5vkIaUpDbjdP8UEI6TQ67ltPURQt+tHIfJe8h2oXoJmRj/Lzik22erZLczucHj49Z7guCwJtqJxPZlOBaLR12CHtoh2XkG7scpVOWAUSIYQ0i4781pN9H/XivdqluYbT7qSY9mrygPjRZMyh2kWkPsTMkcOGtIt9mW0jiO6ojobTQnj7fADGvideOrwSQsiZQEd+64mNuqHIh+dql+amXUQ0J2eKUgjPR7SuJmMuPB+V6MPas0cM1wGptXqT0i4nZ7LI2Ph2wmw4BUyRD4oPQkiH0ZHfetpk2wa6nOZdbm5BVbuIgXl2s10aSbvYVbuoqoqjQnxUGoZZGU7N3U0bZbA7rr1fq4oXVVU18RHWlIYcDaL4IIR0Gh35radNtm1gvovbDppBVbt0VwSVneejscFy1sfp1GwOmXwJigJcvnIIgLHXh1N300ZQFMUx9VIsqRDtRsK6scuCLKwCiRBCmkVHfuv1+jDZ1nu1S3M9Hz0VQWWOUjQS+ahV7SL8HYv6UlgxXB54NpMtYKpiNPW7u6mMU68POfoTVs8H0y6EkE6mI7/1uhusdlFVfVaL62qXpnk+RNql4vmoinz4MVjOWnyIqMPSoS50JaJY0FuetCpEyXSTSm3Fa5Zfa67qPvkYhFV8sNSWENLJdOS3XqPVLvKZda1NPRZptuejEvlIRg3XBUL0NFbtYn2cjkyWN36RAjFHI0SprbzR+sXSwXK5rVXaRY7+tIXhNKQCiRBCmkVHfuuJFEW9813kDb7WxqFVuzTb8yEMpwWz4bRxz4c5miLQIh8V0bHU5MPQ0y7+Rz40z4dl2kU3mypKOMWH8HyEeY2EENIsOlN8JMVk2/rER8HgKXDeOERb8+Z5Poxpl2rDqQ+D5Wz6fByW0i6ALkKCTLtYRT7cViK1EmHCDfMaCSGkWXSo+BCRj/rSLiISoCi1IwoxD56PbKGIjd96Clt//LLrtehpF9Hnoxnt1e3SLsbIh+g8KtIxzUy7LKu85ng6Y9tiPoxzXQRa5CPEaySEkGbRkd98WuSjwbRLPFI7ZB714PnY+doEtv/qDXxn10HPaxFpF1vDaQNTbWsZTu08HyLy0Yy0y4LeJBLRCEoqMD6VMdx3cKIsfoZ7Er6/rl+cvaAX3YkoLhztb/VSCCEkcPw/JW0DRDfQRtMubkLmMQ99Pna9dkpbl6qqrrwAIu3Sk7AxnGrVLv5OtZ2az2O6It6E+VNOhRRLqna/303GACASUbBkMIXXT83h8Ol5jA13a/ftem0CAHDlymHfX9cvhnoS2PG5a9CViNZ+MCGEnGF0eOSjsbSLm7B+VPN81BYfTx0ob5qqCszbVJiYKWiRj/J7KqnG1xL3N1LtYtXhVJS4jvQktA1UiI/Tc3kcT+vRiGZEPgA5zWP0feyqHMcrV4VXfADAQHecaRdCSEfSkd98flW7uEllxLTZLs6G0/lcEc8entSuuxVGYpCc8LEARrHQiOfDaartEZPZFCgbS0WU45fjaQBAKh5p2garG1z1Xh+z2QKePzIFIPzigxBCOpXOFB8N9/kQpZy1N/RoxF3k45lDpw39Q9wKo7wp8gEYfR/N8nyYzaaCpZVoxMvHpgE0p9JFf63qipdfHDyNYknF0sEuLTJCCCEkXHjekZ544gnccMMNWLJkCRRFwYMPPmi4/2Mf+xgURTFcrrvuOr/W6ws9DU611ea6uDijj1c2/VqeD+FTELj1oxRMhtPy+vyKfFQ8HxaltmazqUCIkZePlSMfzWitbn4tOe0ijuNaRj0IISS0eBYfs7OzuOyyy3DXXXfZPua6667DsWPHtMt3v/vdhhbpNz0NTrXNe/BRuPV8CL+HwG3aRR5wJwyweavIR12GU/u0y+HT1pEPIUZeqogPv4fKWb2WLD7EcVx7NsUHIYSEFc+npRs2bMCGDRscH5NMJjE6Olr3opqN3pBLRbZQ1M7w3eJ2qBzgrtolWyjiFwdPAygPvZvJFlxHPnLSWhLRCPLFoqHLaUOG08pxKanl55FbyWtpF1NqQwiC10/OAggm7XJ0ch6lkopcsYS9hyYBAFeuGmna6xJCCGmMpng+Hn/8cSxcuBAXXHABfv/3fx+nTp2yfWw2m0U6nTZcmk2PlKKYq6PiRZTaujFSRl1MtX3+8BSyhRJGehK4aHG574Nbz4cQF4mYoqWBckX9PRUa8XzE9d8x+z5sPR+V60JrNTPtMtqfQjSiIF9UcWI6i72HJpErlrCwL4mVI/R7EEJIWPFdfFx33XX49re/jW3btuHv/u7vsH37dmzYsAHFovUmv3XrVgwMDGiXsbExv5dURSwa0VIK9fg+ch6iCW46nMqlob2VzdqtKMoXdXEhIjE5OfIhPB91pF3kuTWy+JjLFTAxmwNgrHaxut7MtEssGsFofwpAuavqU9Jx5LwUQggJL76fln7kIx/Rfr700kuxevVqnHPOOXj88cdxzTXXVD1+y5Yt2Lx5s3Y9nU4HIkB6kzFkC7m6Kl68pF2iLgynsvjY8+ty+sWtKJJ7jgixYOn5qCPtEokoiEcVLT0lEGbTvlQMAyZxYY6ENDPyAZTFzpHJeRw+PY9dB8oRtrVnM+VCCCFhpumltmeffTYWLFiAV1991fL+ZDKJ/v5+wyUIuhuoePGSdonXMJwWiiXseV1UaIyg16MZtqAJIUVbjxAfqqpqr1tPtQtgXfFy2CblApRbmnfF9bRWMz0fgD7j5fWTc5pwY6ULIYSEm6aLj8OHD+PUqVNYvHhxs1/KE6LFej0VL17SLlHNcGrt+XjxaBqzuSL6UzFcMNqnmWFnXKRdiiVV81bEI3q1i2gyJgueeiIfgF7xIvcOsSuzBQBFUYyNx5qYdgH0NM/DL44jky9huCeB8xb2NvU1CSGENIbnmPjMzIwhinHgwAHs3bsXw8PDGB4exhe/+EXceOONGB0dxf79+/GZz3wG5557Lq699lpfF94ojQyXq6faxS7yIXwKV6wcRjSioLcSkXEjiuT0SjwmeT4qt8upnpiLtVqhldtKkQ87s6lg6WAXXj0xA6A5c13MrwXofUWuWDlEvwchhIQczzvD7t278e53v1u7LvwaGzduxN13343nnnsO/+f//B9MTk5iyZIleN/73oe//uu/RjKZ9G/VPtDIfJeChyZjmufDxnCq+xTKqYLupIh8eBQfUUUTH8KEWvAh8pGw6PVxWIt8WFeUmFuuNxPzGtayxJYQQkKPZ/Fx9dVXQ1XtzZOPPPJIQwsKClFuW0/aRYt8eKl2sYh8lEqqVKFR3jS1BmguRJHcjj0eiVR5PorS/Q17Pgpy2qU8S8Vc2SJYZki7NN9wKsN5LoQQEn46crYLoG/ybrwVZnKeql3sxccvx6eRzhTQnYjikiVlo6029M5D2iUaURCJKFq1i/B8yD6TaJ2pCNHrw1Dt4iLtImjWRFvB4oGU9FoxvGlxMIZlQggh9dPc09IQYxf5mJjN4bU3Zgy3JWNRXLSkXxcSXma7RO2bjD1VSbmsWTGkeTK8eFHyUqWL/K8QR8JnElHKZbP1IDwfLx+bRn8qjmKp3NALcBn5aLL4SMWjOKsviTems5pvhhBCSLjpXPFh4a3I5It431eewMmZbNXjb19/Pj69/jwA3tIuTp6P3RaloaIKx40XRZ7rIv+bNxlO6+luKkhVymb//pF9ptsjGOlJWP6O7MNodp+P8ut14Y3pLFMuhBDSJnS8+JC9Fc8emsTJmSwS0Yh2Vj+fK2I8ncFPXz6uiQ8vaRenahfRJXRsWN+sxcRdL2kXsQ7N82Eqta1nqJzgpiuXY3wqU5U2+uBbltpWlSzsS+KmK5cjHlW049xM/sc7z8b9Tx/Cb69Z1vTXIoQQ0jidKz4qaZcZaZMXnUbfe/Ei3PU7bwUAHJuax7qtj+HFo1NIZ/LoT8U9VrvYez4y+bLwSUlNubykXYS3Q6RbEqZSW9kTUi/XX7oY11/qrUeLoijY+qFL635Nr1x3yWJcd0m4+sgQQgixp2MNp91a5EPf5LVx7FL4fvFAF5YPd6OkQuug6anaJWrfZCxT6Z1hKT5ctH0XgqY67VK+vZHW6oQQQkiz6Fjx0Wvq85EvlqT23MZeEUKM7HptQnss4LHaxcLzkalUkKSkCIqIyOQKJUMfDyvM64jHjB1OtaFyDXg+CCGEEL/p2F2p21TS+vyRKcznixjsjle15xZGRlGdkveQdhFmTyvPR9Yi8iHaqwO1e33kq9Iu0cr6TJ4PRj4IIYSEiI4VH2Zvhdbsa+VwVVnq2ypTUp87PIX5XLGu9upuPR8JaTrtTA3Tad6cdrGNfFB8EEIICQ+dKz4SRm/FrtfKUQ2rcs1lQ11YPJBCoaTiFwdPV/XXcCLqMNVWiI+kKYIiKl7maphOReRD9AhJmEptRW8RN+skhBBCgqJzxYcoac0WUCyp2P26td8DKFdvCFGy68BEVX8NJxwjH4XqtAsAabJtDfFRERkJrcmYqHZRK/cz8kEIISR8dLD4qFS75Ip46Wga09kCepMxXLTEuj23ECW7XjtVl+HU3OE0Xyxp0ZBU3Pg8vdLanDCnXapmu/jQZIwQQgjxm47dlXokY+fP9p0AAFy+csg2SiAiH88cmtR8Im7SGTGbDqci5QJYRD4qUZmakQ9T2iVeNduFkQ9CCCHho2PFRyoegdiTH/tlWXw4jWM/56weLOhNIFco4bnDUwBcpl2i1mkX0eMDqPZ86JEPb2kX8a/Z89FIh1NCCCHEbzpWfCiKokU/nj08CcB5HLvs+8gWvFe7mA2nstnU3KZclAHXmrhbVe1inu1CzwchhJAQ0rHiA9B9H6oKdMWjuHTpgOPjzZERNxEFvb260fMhRtSbUy7yujxXu8SMhlMheOL0fBBCCAkRHb0rCW8FALx1xaC2edthjowkXEU+rJuM6a3Vq59Dn2zrLu0SN1e7VIRNnp4PQgghIaSjxUevNHH1ypX2fg/BBYv6MNAV1657aq9uEh9uIh+15ruI50zYznah54MQQkj46GjxIbwVALD2bHu/hyASUXDFSv1xbjb1mM1sFy3yEbMQHwm9B4kTuYJRXCRiRsMpPR+EEELCSEeLDxH5SEQjePPYoKvfkSfeukq72Ey11VurW6RdXEY+zP1GxGwXIUo424UQQkgY6WjxITqJvnls0DL9YYUcIXFX7eLs+Uhapl3cRT6q0y6V2S5F9vkghBASXjpafJzVlwQArDuntt9DcNHifs330d8Vq/Foe8+H1VA5QbdLw6k57RK363DqQiQRQgghQVF79zyD+R/vOhvLhrrw39Ysc/07sWgE39x4OY5OZbB4oKv24yviQ1WBUknVJuZmCtZD5QA9HTTrsslY3DxYrlAWHQWmXQghhISQjhYfC/tS+PhVqzz/3uUra5tTBVHJlFooqUgI8ZG3HioH6EbYuRpNxgpF59kuhcq/TLsQQggJE4zHNxk56iCbTrW0i0XkQxhO3U61re7zYfR8MPJBCCEkTFB8NBl5oqzs+8g6eD56XE61zZnSLmbDaVEznPJjJoQQEh64KzUZOepQlHp9ZAoOHU5FtUuuAFVVq+4X2Ho+iox8EEIICS8UH00mElEg5sbJkQ+nahfRXl1Vgfm8ffRD93yIJmPlj7OklqMe7HBKCCEkjFB8BIDVZFsn8dEVj2qCxcn3UZ120T/OXKHEyAchhJBQQvERAFaTbbUmYxaG00hEQXe8dsWLOe1iEB/FktRenR8zIYSQ8MBdKQCE6VSe7+IU+QDcVbyY0y5xKb2SL5bYXp0QQkgoofgIAH2+i1TtUrDv8wG4q3gxRz4URTGYTkWkhX0+CCGEhAmKjwBw9nxYfwRu5rvkTE3Gyj9Xym0LjHwQQggJJxQfAWDp+RCRj5h15EOb7+LQYr1ginwAxvkuIi3D2S6EEELCBHelALCabOvUZAyQ5rs4RD7MHU7LP4supyojH4QQQkIJxUcAWE22FWmXpE3aRcx3mXWsdqlOu8iej7zW4ZTigxBCSHjwLD6eeOIJ3HDDDViyZAkURcGDDz5ouF9VVXzhC1/A4sWL0dXVhfXr1+OVV17xa71tiYg8GKtdnNMu3iIfkviopF1yxRKbjBFCCAklnsXH7OwsLrvsMtx1112W93/pS1/CP/zDP+Ab3/gGdu3ahZ6eHlx77bXIZDINL7Zd0atdZM+Hs+FU93y4qXaR0y7ln/MFuc8HxQchhJDwEPP6Cxs2bMCGDRss71NVFV/96lfx53/+53j/+98PAPj2t7+NRYsW4cEHH8RHPvKRxlbbpkQtPB+1+nz0uqh2sUq7aJ4P9vkghBASUnz1fBw4cADj4+NYv369dtvAwADWrl2LHTt2WP5ONptFOp02XM40YibPh6qqeodTu8hHsna1ixb5iFWnXfJFVWqvTmsPIYSQ8ODrrjQ+Pg4AWLRokeH2RYsWafeZ2bp1KwYGBrTL2NiYn0sKBSLtIabaigZjQO0mY648HxGrahcp8kHPByGEkBDR8lPiLVu2YGpqSrscOnSo1UvyHXPkI5uXxIeN4bSnUu1i1+G0WFIhsji21S5FdjglhBASPnwVH6OjowCA48ePG24/fvy4dp+ZZDKJ/v5+w+VMw9xkTJhNI4rRLCpTa7aLEBaAMe2idTil54MQQkhI8VV8rFq1CqOjo9i2bZt2Wzqdxq5du7Bu3To/X6qtEJEJIQZks6mi2IiPSrWL3VRbWXzI4iIhdzgtcaotIYSQ8OG52mVmZgavvvqqdv3AgQPYu3cvhoeHsXz5ctx22234m7/5G5x33nlYtWoVPv/5z2PJkiX4wAc+4Oe624qoqc+H1uPDxu8B6LNd7CMfeuWMZbULZ7sQQggJKZ7Fx+7du/Hud79bu75582YAwMaNG3HvvffiM5/5DGZnZ/GpT30Kk5OTeMc73oGHH34YqVTKv1W3GebBclnR4yNmH5HQp9pai4+C5OeQPR3GqbY0nBJCCAkfnsXH1VdfDVVVbe9XFAV/9Vd/hb/6q79qaGFnEub26u4iH6LaxTrtkquID3NUIx7VS21Fh1MaTgkhhIQJmgECQEQehBjQ57o4iI9KtUuuWEJOKs0ViLRLwjSxVmuvLnU4ZZ8PQgghYYK7UgAIw2e+aDScJh3SLqK9OmCdeilYNBgD5MiHbDhl5IMQQkh4oPgIgLjJ85EpiLSL/eFPxCJaVMNqvott2iVWKbWl4ZQQQkhIofgIgGrPh/NcF0GPw3wXq7kugNlwSs8HIYSQ8EHxEQBmz0dWiA+b7qYCbbKtpfgoP1ciZi0+ckVVi3yYBQohhBDSSrgrBYB9tYvz4e/Vym2r0y5527QLPR+EEELCDcVHAIhqE6sOp050OzQas0u7yE3G9GoXig9CCCHhgeIjAETkQat2KbgTH70OjcbyBetql0RUvBY9H4QQQsIJxUcAVPf5KP+brJF26U6IyEd12kUIi7hJWMizXYrscEoIISSEUHwEQMyu2qWG4VQfLlcd+cjVSrsUVXo+CCGEhBKKjwCIVnk+ardXB+QW6+7TLkJ8ZPJFiC74cXY4JYQQEiK4KwVAVeRD83zUSLuIPh8W1S52aRdZfAiiTLsQQggJERQfASDSHsVKqiTrMvLR69Dnwy7tIlq2y+W5rHYhhBASJig+AsAc+ci6jnxUxIdVn48aaZd56Xfo+SCEEBImKD4CIFYRBAXzVNsahtNeh/bqWtolak67lK/P5+XIBz9mQggh4YG7UgBUV7u463Dq3F69knYxCQsRCZEjHwx8EEIICRMUHwFg9ny4LbXt1dIuFp4PLe1i6vMh0i6V14hHFSgK1QchhJDwQPERAHbVLsla7dUrTcbmnJqMmafamjwg9HsQQggJGxQfAaBFPkwdTmulXUSfj3pmuwjo9yCEEBI2uDMFgGhvXtXh1GWTMauptlraxcZwKmDkgxBCSNig+AgAEX0oeOzz0aM1GStAFe1KK7hNu7DHByGEkLBB8REAMS3toqJYUpErVsRHrEbapVLtoqrG0lkAyBes0y6JKD0fhBBCwg3FRwBENcNpSWswBtSOfHTFoxCFKmbfR75ol3Zx9oAQQgghrYY7UwAIz0expGpmU6C2+IhEFHTHrSte8iV3hlNGPgghhIQNio8AEFNtCyVVM5vGo4orYWBX8SLaq8eqxIfxOen5IIQQEjYoPgJA9ny4bTAmsKt4EWmXhElsKIpi8H0w8kEIISRsUHwEgBAf+WIJ2UrEolaDMUGPzXwXu7RL+TZdcFB8EEIICRsUHwFg9HyIoXLuDr023yXnLu0CGCfdxqIUH4QQQsIFxUcAGD0f7rqbCrT5LjbVLua0S/k2SXywwykhhJCQwZ0pAAyej4K77qYCMd9l1lztUrRuMma+jYZTQgghYYPiIwCi0mC5rMvW6oK+VDnyMZ0xRz7Kng+rtIvc5ZSeD0IIIWGD4iMAjNUu3tIug90JAMDpuZzhdrsmY+bb6PkghBASNig+AkBEJ/LFkudS25GesviYmLUWH+Z26oA58sGPmBBCSLjgzhQAln0+XKZdhm3Fh33ahZ4PQgghYYbiIwBkz0dG6/Ph7tAPVcTHKZvIh3XaheKDEEJIeKH4CIBGIh962iVruN0x7RJlnw9CCCHhxXfx8Zd/+ZdQFMVwufDCC/1+mbYiamU4den5kNMuqqpqtxdcV7tQXxJCCAkXsWY86cUXX4yf/vSn+ovEmvIybYPc6Es0C3Nb7TLSkwRQ9njMZAvoS8UBADm31S5MuxBCCAkZTVEFsVgMo6OjzXjqtkROfejiw13koysRRVc8ivl8EROzOU18OKVd4hwsRwghJMQ0JSb/yiuvYMmSJTj77LNx88034+DBg7aPzWazSKfThsuZhiwApj1GPgA99SJMp8WSispcOeu0Cw2nhBBCQozv4mPt2rW499578fDDD+Puu+/GgQMH8Bu/8RuYnp62fPzWrVsxMDCgXcbGxvxeUsuRBcBcZUBc0qXnA5B8HzNl8SGiHoB12iXBwXKEEEJCjO/iY8OGDfjt3/5trF69Gtdeey1+/OMfY3JyEt/73vcsH79lyxZMTU1pl0OHDvm9pJYjRz5msqLaxXvkQ/T6MIqPWn0+aDglhBASLpruBB0cHMT555+PV1991fL+ZDKJZDLZ7GW0FEVREI0oKJZUz54PQCq3rbRYF5UuQG3xQc8HIYSQsNH00+KZmRns378fixcvbvZLhRohAoT4qCvtYop8RBRrcRGPsdqFEEJIePFdfPzxH/8xtm/fjtdffx0///nP8cEPfhDRaBQ33XST3y/VVggRMFOP4bS3YjiteD70Mlvr50gy8kEIISTE+J52OXz4MG666SacOnUKZ511Ft7xjndg586dOOuss/x+qbYiZop81JV2qXQ5FWkXqzJbgO3VCSGEhBvfxcf999/v91OeEYiSWFEi60V8DHVbp13sKlnihmoXGk4JIYSEC+5MAWFOf3hJu4z0Gg2ntdIuNJwSQggJMxQfAWFOf7id7QIAw5UW66LPh0i72IkPQ58Pig9CCCEhg+IjIKojH96rXWZzRWTyRS3tYtVgDAAS0u2MfBBCCAkbFB8BURX58JB26U/FNKExMZvzlHZh5IMQQkjYoPgICLPx00vkQ1EUg+m0VtrF4Pmg4ZQQQkjI4M4UEOYIRDLm7dDLjcZqpl2k544z8kEIISRkUHwEhOy9SMQiUBRvosBafNgYTlntQgghJMRQfASEHPlIeYx6ALr4ODWbQ85D2oVTbQkhhIQNio+AkCMQXvweArnLaaFWkzFDtQs/YkIIIeGCO1NAyKPt6xEfWq8PKe1i116dfT4IIYSEGYqPgJCjFF7KbAViuNyEx7QLPR+EEELCBsVHQDSadhk2lNo6p10M1S70fBBCCAkZFB8BYTSc1pN20Q2ntdIuxsgHP2JCCCHhgjtTQMgiIFlH2mWkVy615WwXQggh7QvFR0DEGk27VCIfk3N5ZPLF8nO6qnah+CCEEBIuKD4CIhptTHwMdScg+pKdSGcBuGsyxsgHIYSQsEHxERDxBpuMRSMKBrviAIDj0xkAxvSK4bVY7UIIISTEUHwERLTBPh8AMFRJvRyvRD7sohrGahd+xIQQQsIFd6aAMHo+6jvsI5r4KEc+7ISF/FqMfBBCCAkbFB8BIXs+knWU2gLG4XKAfdpFURTN90HPByGEkLBB8REQfkQ+RIt1q+c0I4RJjGkXQgghISPW6gV0Co12OAX0tIvAyc/x//7GKrxyYgYrhrvrei1CCCGkWVB8BIQsFJJ1io9hs/hwqJq5bf35db0GIYQQ0mwYkw+IaIOltoCF+KCfgxBCSBtC8REQjXY4BSzEB/0chBBC2hDuXgHhh+fDS9qFEEIICSvcvQLClz4fvUbxkbCZ7UIIIYSEGYqPgPCjw6k58hGL8OMjhBDSfnD3Cgh50myqziZjyVgUvUm9QIlpF0IIIe0Id6+AiPqQdgGAoZ649nOcaRdCCCFtCMVHQPhR7QIYu5yy2oUQQkg7wt0rIGTPR7KByIfc5ZTigxBCSDvC3Ssg5MhHvYPlAKPplGkXQggh7QjFR0D45flg5IMQQki7w90rIGKVKIWiQBt3Xw/DFB+EEELanKbtXnfddRdWrlyJVCqFtWvX4qmnnmrWS7UFoidHKhaFotSfLhli2oUQQkib0xTx8a//+q/YvHkz/uIv/gK/+MUvcNlll+Haa6/FiRMnmvFybYFIuzSScgGYdiGEENL+NGX3+vKXv4xPfvKT+PjHP46LLroI3/jGN9Dd3Y1vfetbzXi5tiCmiY/6zaYA0y6EEELaH993r1wuhz179mD9+vX6i0QiWL9+PXbs2FH1+Gw2i3Q6bbiciUSj/oiPEUOfD6ZdCCGEtB++i4+TJ0+iWCxi0aJFhtsXLVqE8fHxqsdv3boVAwMD2mVsbMzvJYWCwa5yZ1LzfBavLOhLIBGNIBmLNFSySwghhLSKWO2HNJctW7Zg8+bN2vV0On1GCpA3jw3i7268FJeNDTb0PN2JGP73R9dAAZDgbBdCCCFtiO/iY8GCBYhGozh+/Ljh9uPHj2N0dLTq8clkEslksur2Mw1FUfDhK5b78lzvvmChL89DCCGEtALfT50TiQTWrFmDbdu2abeVSiVs27YN69at8/vlCCGEENJmNCXtsnnzZmzcuBGXX345rrzySnz1q1/F7OwsPv7xjzfj5QghhBDSRjRFfHz4wx/GG2+8gS984QsYHx/Hm9/8Zjz88MNVJlRCCCGEdB6Kqqpqqxchk06nMTAwgKmpKfT397d6OYQQQghxgZf9m+UShBBCCAkUig9CCCGEBArFByGEEEICheKDEEIIIYFC8UEIIYSQQKH4IIQQQkigUHwQQgghJFAoPgghhBASKBQfhBBCCAmUprRXbwTRcDWdTrd4JYQQQghxi9i33TROD534mJ6eBgCMjY21eCWEEEII8cr09DQGBgYcHxO62S6lUglHjx5FX18fFEXx9bnT6TTGxsZw6NAhzo1pMjzWwcFjHRw81sHBYx0cfh1rVVUxPT2NJUuWIBJxdnWELvIRiUSwbNmypr5Gf38//5gDgsc6OHisg4PHOjh4rIPDj2NdK+IhoOGUEEIIIYFC8UEIIYSQQOko8ZFMJvEXf/EXSCaTrV7KGQ+PdXDwWAcHj3Vw8FgHRyuOdegMp4QQQgg5s+moyAchhBBCWg/FByGEEEICheKDEEIIIYFC8UEIIYSQQOkY8XHXXXdh5cqVSKVSWLt2LZ566qlWL6nt2bp1K6644gr09fVh4cKF+MAHPoB9+/YZHpPJZLBp0yaMjIygt7cXN954I44fP96iFZ853HnnnVAUBbfddpt2G4+1fxw5cgS/+7u/i5GREXR1deHSSy/F7t27tftVVcUXvvAFLF68GF1dXVi/fj1eeeWVFq64PSkWi/j85z+PVatWoaurC+eccw7++q//2jAbhMe6fp544gnccMMNWLJkCRRFwYMPPmi4382xnZiYwM0334z+/n4MDg7iE5/4BGZmZhpfnNoB3H///WoikVC/9a1vqS+++KL6yU9+Uh0cHFSPHz/e6qW1Nddee616zz33qC+88IK6d+9e9frrr1eXL1+uzszMaI+55ZZb1LGxMXXbtm3q7t271be97W3q29/+9hauuv156qmn1JUrV6qrV69WP/3pT2u381j7w8TEhLpixQr1Yx/7mLpr1y71tddeUx955BH11Vdf1R5z5513qgMDA+qDDz6oPvvss+pv/dZvqatWrVLn5+dbuPL244477lBHRkbUhx56SD1w4ID6/e9/X+3t7VW/9rWvaY/hsa6fH//4x+qf/dmfqT/4wQ9UAOoDDzxguN/Nsb3uuuvUyy67TN25c6f6n//5n+q5556r3nTTTQ2vrSPEx5VXXqlu2rRJu14sFtUlS5aoW7dubeGqzjxOnDihAlC3b9+uqqqqTk5OqvF4XP3+97+vPebll19WAag7duxo1TLbmunpafW8885TH330UfVd73qXJj54rP3js5/9rPqOd7zD9v5SqaSOjo6qf//3f6/dNjk5qSaTSfW73/1uEEs8Y/jN3/xN9b//9/9uuO1DH/qQevPNN6uqymPtJ2bx4ebYvvTSSyoA9emnn9Ye85Of/ERVFEU9cuRIQ+s549MuuVwOe/bswfr167XbIpEI1q9fjx07drRwZWceU1NTAIDh4WEAwJ49e5DP5w3H/sILL8Ty5ct57Otk06ZN+M3f/E3DMQV4rP3k3//933H55Zfjt3/7t7Fw4UK85S1vwT//8z9r9x84cADj4+OGYz0wMIC1a9fyWHvk7W9/O7Zt24Zf/epXAIBnn30WTz75JDZs2ACAx7qZuDm2O3bswODgIC6//HLtMevXr0ckEsGuXbsaev3QDZbzm5MnT6JYLGLRokWG2xctWoRf/vKXLVrVmUepVMJtt92Gq666CpdccgkAYHx8HIlEAoODg4bHLlq0COPj4y1YZXtz//334xe/+AWefvrpqvt4rP3jtddew913343NmzfjT//0T/H000/jj/7oj5BIJLBx40bteFp9p/BYe+Nzn/sc0uk0LrzwQkSjURSLRdxxxx24+eabAYDHuom4Obbj4+NYuHCh4f5YLIbh4eGGj/8ZLz5IMGzatAkvvPACnnzyyVYv5Yzk0KFD+PSnP41HH30UqVSq1cs5oymVSrj88svxt3/7twCAt7zlLXjhhRfwjW98Axs3bmzx6s4svve97+E73/kO7rvvPlx88cXYu3cvbrvtNixZsoTH+gznjE+7LFiwANFotMr1f/z4cYyOjrZoVWcWt956Kx566CH87Gc/w7Jly7TbR0dHkcvlMDk5aXg8j7139uzZgxMnTuCtb30rYrEYYrEYtm/fjn/4h39ALBbDokWLeKx9YvHixbjooosMt73pTW/CwYMHAUA7nvxOaZw/+ZM/wec+9zl85CMfwaWXXorf+73fw+23346tW7cC4LFuJm6O7ejoKE6cOGG4v1AoYGJiouHjf8aLj0QigTVr1mDbtm3abaVSCdu2bcO6detauLL2R1VV3HrrrXjggQfw2GOPYdWqVYb716xZg3g8bjj2+/btw8GDB3nsPXLNNdfg+eefx969e7XL5Zdfjptvvln7mcfaH6666qqqkvFf/epXWLFiBQBg1apVGB0dNRzrdDqNXbt28Vh7ZG5uDpGIcRuKRqMolUoAeKybiZtju27dOkxOTmLPnj3aYx577DGUSiWsXbu2sQU0ZFdtE+6//341mUyq9957r/rSSy+pn/rUp9TBwUF1fHy81Utra37/939fHRgYUB9//HH12LFj2mVubk57zC233KIuX75cfeyxx9Tdu3er69atU9etW9fCVZ85yNUuqspj7RdPPfWUGovF1DvuuEN95ZVX1O985ztqd3e3+i//8i/aY+688051cHBQ/eEPf6g+99xz6vvf/36Wf9bBxo0b1aVLl2qltj/4wQ/UBQsWqJ/5zGe0x/BY18/09LT6zDPPqM8884wKQP3yl7+sPvPMM+qvf/1rVVXdHdvrrrtOfctb3qLu2rVLffLJJ9XzzjuPpbZe+Md//Ed1+fLlaiKRUK+88kp1586drV5S2wPA8nLPPfdoj5mfn1f/4A/+QB0aGlK7u7vVD37wg+qxY8dat+gzCLP44LH2jx/96EfqJZdcoiaTSfXCCy9U/+mf/slwf6lUUj//+c+rixYtUpPJpHrNNdeo+/bta9Fq25d0Oq1++tOfVpcvX66mUin17LPPVv/sz/5MzWaz2mN4rOvnZz/7meV39MaNG1VVdXdsT506pd50001qb2+v2t/fr3784x9Xp6enG16boqpSKzlCCCGEkCZzxns+CCGEEBIuKD4IIYQQEigUH4QQQggJFIoPQgghhAQKxQchhBBCAoXigxBCCCGBQvFBCCGEkECh+CCEEEJIoFB8EEIIISRQKD4IIYQQEigUH4QQQggJFIoPQgghhATK/w8hySi9aKQFlAAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -1007,6 +367,197 @@ "source": [ "gpt4.inspect_history(n=1)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multi-hop\n", + "Let's try a multi-hop example" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')\n", + "dspy.settings.configure(rm=colbertv2_wiki17_abstracts)\n", + "\n", + "class GenerateSearchQuery(dspy.Signature):\n", + " \"\"\"Write a simple search query that will help answer a complex question.\"\"\"\n", + "\n", + " context:list[str] = dspy.InputField(desc=\"may contain relevant facts\")\n", + " question = dspy.InputField()\n", + " query = dspy.OutputField()\n", + "\n", + "class GenerateAnswer(dspy.Signature):\n", + " \"\"\"Answer questions with short factoid answers.\"\"\"\n", + "\n", + " context:list[str] = dspy.InputField(desc=\"may contain relevant facts\")\n", + " question = dspy.InputField()\n", + " answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n", + "\n", + "from dsp.utils import deduplicate\n", + "\n", + "class SimplifiedBaleen(dspy.Module):\n", + " def __init__(self, passages_per_hop=3, max_hops=2):\n", + " super().__init__()\n", + "\n", + " self.generate_query = [dspy.TypedChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]\n", + " self.retrieve = dspy.Retrieve(k=passages_per_hop)\n", + " self.generate_answer = dspy.TypedChainOfThought(GenerateAnswer)\n", + " self.max_hops = max_hops\n", + " \n", + " def forward(self, question):\n", + " context = []\n", + " \n", + " for hop in range(self.max_hops):\n", + " query = self.generate_query[hop](context=context, question=question).query\n", + " passages = self.retrieve(query).passages\n", + " context = deduplicate(context + passages)\n", + "\n", + " pred = self.generate_answer(context=context, question=question)\n", + " return dspy.Prediction(context=context, answer=pred.answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Prediction(\n", + " context=['Paris (disambiguation) | Paris is the largest city and capital of France.', 'Capital (French magazine) | Capital is a monthly French economics and business magazine published in Paris, France.', 'Paris | Paris (] ) is the capital and most populous city of France, with an administrative-limits area of 105 km2 and a 2015 population of 2,229,621. The city is a commune and department, and the capital-heart of the 12,012 km2 Île-de-France \"region\" (colloquially known as the \\'Paris Region\\'), whose 12,142,802 2016 population represents roughly 18 percent of the population of France. By the 17th century, Paris had become one of Europe\\'s major centres of finance, commerce, fashion, science, and the arts, a position that it retains still today. The Paris Region had a GDP of €649.6 billion (US $763.4 billion) in 2014, accounting for 30.4 percent of the GDP of France. According to official estimates, in 2013-14 the Paris Region had the third-highest GDP in the world and the largest regional GDP in the EU.', \"Administration of Paris | As the capital of France, Paris is the seat of France's national government. For the executive, the two chief officers each have their own official residences, which also serve as their offices. The President of France resides at the Élysée Palace in the 8th arrondissement, while the Prime Minister's seat is at the Hôtel Matignon in the 7th arrondissement. Government ministries are located in various parts of the city; many are located in the 7th arrondissement, near the Matignon.\"],\n", + " answer='Paris'\n", + ")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "baleen = SimplifiedBaleen()\n", + "baleen(question=\"What is the capital of France?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 31 / 50 (62.0): 100%|██████████| 50/50 [00:00<00:00, 162.38it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average Metric: 31 / 50 (62.0%)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "/Users/ahle/repos/dspy/dspy/evaluate/evaluate.py:145: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", + " df = df.applymap(truncate_cell)\n" + ] + }, + { + "data": { + "text/plain": [ + "62.0" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluator(baleen)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "for name, module in baleen.named_sub_modules():\n", + " if getattr(module, \"_compiled\", False):\n", + " print(\"Found compiled module\", name)\n", + "\n", + "result = optimize_signature(\n", + " student=baleen,\n", + " evaluator=evaluator,\n", + " initial_prompts=6,\n", + " n_iterations=60,\n", + " max_examples=30,\n", + " verbose=True,\n", + " prompt_model=gpt4,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABMYUlEQVR4nO3deZRU9Z0//PeturX0Vr3S1SzdbC4NbjGg0GoSH0WJcRKN/DKJh0xM4smKRuWZSeJkso/iTE5i4jxoluPg5DcSfvE8MVGfSYwBRY2AQNSIYMtiBIGuBpruruql1u/zR9X33lvV99a+NfV+ncNJ6C6qLxfkfvqzfRUhhAARERFRmdgqfQFERERUWxh8EBERUVkx+CAiIqKyYvBBREREZcXgg4iIiMqKwQcRERGVFYMPIiIiKisGH0RERFRWaqUvIFUsFsOxY8fQ1NQERVEqfTlERESUBSEE/H4/Zs2aBZstfW6j6oKPY8eOobu7u9KXQURERHk4cuQI5syZk/Y1VRd8NDU1AYhfvMfjqfDVEBERUTZGR0fR3d2tPcfTqbrgQ5ZaPB4Pgw8iIqJpJpuWCTacEhERUVkx+CAiIqKyYvBBREREZcXgg4iIiMqKwQcRERGVFYMPIiIiKisGH0RERFRWDD6IiIiorBh8EBERUVkx+CAiIqKyYvBBREREZcXgg4iIiMqq6g6WI6pVR4bG8eiOwwhGomlfV++045a+eej0uIv69d8cGMXW/hP47BXz4bDz+xIiKh0GH0RV4v/ZcgD/Z9eRrF4biQrc/aFFRf3633niDWw/NIQ5rfW4/sKZRX1vIiIjBh9EVeLw0DgAYOV5XpzV2Wj6mr++O4IX9p/E8ZHJon/9/gF/4n9HGXwQUUkx+CCqEj5/PKD49GXz0bew3fQ1/+/ud/HC/pM4PR4q6tceGgvh9HgYAHDwxFhR35uIKBULu0RVwpfIZnQ1W/dytDU4AaDowcfBEwHT/09EVAoMPoiqgH8yjLFQvNHU63FZvq5VBh9j4aJ+/UOGgOPQyTFEY6Ko709EZMTgg6gK+EaDAIAmt4p6p3U1tK0+HnwMjRU786GXWkKRGI4NTxT1/YmIjBh8EFUB32ii5JJhfLa1wQEAmAhHMRFKP5Kbi4ODyaWWAyy9EFEJMfggqgIy+PBmCD4aXSocdgVAcfs+ZJ9HR2M8s5IajBARFRODD6IqMJBl8KEoClqLXHoJRqLamO+KRV4AnHghotJi8EFUBeSkS7pmU6nYEy/vnBpHTABNLhXLF8RHfDnxQkSlxOCDqArIhtN0Y7ZSsTMfssSyoLMRC2fEl5sdYvBBRCXE4IOoCmRbdgEMmY9iBR+JQGNhRwMWzGgAAJwMhDBc5F0iREQSgw+iKjCYQ/AhJ16Gxouz6+NQor9jYWcjGlwqZiayL+z7IKJSqbngIxyNVfoSiJLEYgKD/kTZJZvMR32JMh+JrIcsvbDvg4hKpWaCj33HR/Ge7/0RV/7guUpfClGSk2NBRGICNkUfdU1HbjkdKkJZRAihZThk0CGDEAYfRFQqNXOwXHOdA8PjYYwFI4jFBGw2pdKXRAQAGEw0m3Y0uqDaM38/0FrEzMegP4hAMAK7TUFPez2AePkFAA4OsuxCRKVRM5mPGU0uKAoQjoqifMdIVCwDWRwoZ6RlPooQfMhJl562erhUOwA9A3LoJDMfRFQaNRN8OOw2tDfEdyjIbZJE1cDnj/997GzKLvjQej6KEESn9nvE/388+Dh8apw9UkRUEjUTfABAVzODD6o+Pi3zkXnBGKBPu5weC0OIwk6fTe33AOKLzhqcdkRiAu+cGi/o/YmIzNRU8OFNfGc5MBKs8JUQ6QayPFROkns+QtEYxgo8XE7PfOjBh6IoWMCJFyIqodoKPhI1dWY+qJrI7aadWQYfdQ47XGr8P91Cm071HR8NSR/nxAsRlVJtBR9NDD6o+vhyzHwoiqJlPwppOh0PRXB0eAIAsKCjMelz2q4PTrwQUQnUVPAha+oDDD6oivhy2G4qaee7FNB0KrMe7Q1ObYJG0sZtmfkgohKoqeBD/uMu09xElTYZjuJ0Yk16tpkPoDjnu5j1e0jGLaeFNrUSEaWq0eCDmQ+qDnLBmNthg6cu+51/xdj1cdCi3wMA5rbXw6YA/skITgQYrBNRcdVU8CG/sxwaCyEYKWxKgKgY5I4Pr8cNRcl+625bfWLctoCyS7rMh9thR3dbfOMp+z6IqNhyDj6OHj2KT37yk2hvb0ddXR0uuOAC7Nq1S/u8EALf+ta3MHPmTNTV1WHFihXYv39/US86Xy31DjgTUwKDLL1QFZDbTXPp9wCMmY/8T7aV200XzJia+QB4wBwRlU5Owcfp06dx+eWXw+Fw4Pe//z327t2LH/7wh2htbdVe8+///u944IEH8NOf/hQ7duxAQ0MDVq5cicnJypc6FEWB18NFY1Q98mk2BfSej+E8Mx+xmMDbJ6cuGDNa0BEPSmRjKhFRseR0sNy//du/obu7Gxs2bNA+Nn/+fO3/CyHw4x//GP/yL/+CG264AQDwy1/+El6vF7/97W/xiU98okiXnb8ujxtHhiZqrulUCIG/HB5Gb1cTGlw1c55gxfz13WF0t9ZPmSJJpY/ZZrfdVNKmXbLs+Rj0T2L7oSGteXR0IoxgJAan3YY5rfWmv4YTL0RUKjk9hZ544gmsXLkSH/vYx7B161bMnj0bX/7yl/G5z30OAPD2229jYGAAK1as0H5Nc3Mzli1bhm3btpkGH8FgEMGgHgiMjo7m+3vJilzkVGvjts/2D+Kzj+zCje+ZhR9/4uJKX84ZbcehU/j4z7fjsoXt2Pi55WlfO5AIgvPNfGTb8/HF/70bfzk8POXj8zsaYLc44VlmRN4cGIUQIqeeFCKidHIquxw6dAgPPfQQzj77bDz99NP40pe+hK985Sv4r//6LwDAwMAAAMDr9Sb9Oq/Xq30u1bp169Dc3Kz96O7uzuf3kTXZdDpYY8HHrr+dBgD84Y0BTBS4kpvSe+K1YwCAlw6eyvj3LN+yi575yK7n4y1fPHuxZG4rLlvYjssWtuN9Z3fgH1eea/lrLpzTjDqHHb7RIN44VtpvCoiotuSU+YjFYli6dCnuvfdeAMDFF1+MPXv24Kc//SluueWWvC7g7rvvxtq1a7Wfj46OljQAkT0ftZb5kKnzyXAML+w/gWvP66rwFZ2ZYjGBZ/b6tJ8/s8+H1cvmWr5eK7s055/5yJSVCAQjCAQjAIBffvbSrMtubocd7z+nA0+/4cMf9/pw/uzmnK6RiMhKTpmPmTNnYvHixUkfW7RoEQ4fPgwA6OqKP9B8Pl/Sa3w+n/a5VC6XCx6PJ+lHKcnvMOWUQa04aGga/ONeX5pXUiFee3cYg369jPjHN6zvtRBCn3Zpyi34aEmM2kZjAqOTkbSvlQFOk0vNud/n2sXx/27/+IZ55pKIKB85BR+XX345+vv7kz721ltvYe7c+Hd28+fPR1dXFzZv3qx9fnR0FDt27EBfX18RLrdwWtnFXzsNp+FoDO+c0oOPzft8iERjFbyiM5cM7M6fHQ+iXzp4Ev5J89LI6EQEwUj8z6Ezx4ZTt8OOBqcdQOYtpz4Z4OSYXQGAq3o7YbcpeHPAjyND4zn/eiIiMzkFH3fddRe2b9+Oe++9FwcOHMDGjRvx85//HGvWrAEQH2W988478a//+q944okn8Prrr+NTn/oUZs2ahRtvvLEU158zY+ajVtZGHxkaRzgqUOewo6XegdPjYex653SlL+uMJDMEX3j/QizoaEA4KvBc/wnT18rSX2u9A26HPeevpe36yNB0OqD1leQW4Mivccm8+Cg9M2ZEVCw5BR+XXHIJHn/8cfzqV7/C+eefj+9///v48Y9/jNWrV2uv+epXv4rbb78dn//853HJJZcgEAjgD3/4A9zu3L/rKgUZfEyEo/AH06erzxSy5LJgRgOu7o03A6crB1B+Dp4I4OCJMTjsCq48dwauOS9+r5+xeGjn22wqZXu+iy/PiRqJpRciKracN5z+3d/9HV5//XVMTk5i37592pitpCgKvve972FgYACTk5P405/+hHPOOadoF1yoOqcdHne87u2rkb4P4xrta+UDcd9AzWR+ykUGGX0LO9DkdmgP7WffHEQoMrXMNVBg8JHtro9Cg5xrFsf/zuz821BBZ8kQEUk1dbaLJCcLamXi5ZAh+Hjf2R1wqTYcGZrAmwP+Cl/ZmUVmBq5NPKwv7m5BR6ML/mAE2w+dmvJ6rRcjj3IIkP2uD32RWX7BR3dbPRbP9CAm4v1CRESFqsngQz/dtjaaTo2nl9Y7Vbzv7BkAWHoppsHRSbxyZBiAnimw2RTt//9x79SShTxULt+gINtdH4VmWABoGTP2fRBRMdR48HHmZz6EEDgwmHx6qf4gYQ2/WP60bxBCAO/pbkl6yMssyJ/2DiIWSy5zDYwkejHymEIB4o2qQA7TLnlmWAA9oHph/wkuqSOigtVk8NFVQ8HH0FgIIxNhKEp8lTYAXN3bCZsCvHFsFO+e5vhkMchATgZ2Ut/CdjQ47RgYncTrR0eSPjfoz2/Hh5TNtEssJrSx8lwXmRktnunB7JY6TIZjeH6/+fQOEVG2ajL40Lac1kDDqSy5zG6p08Y52xtdWDqvDYD1JAZlLxCM4KUD8Z4O2WQquR12XHluJ4CpmSb59y/foCCbaZdTYyFEYgKKAnQ05p/5UBRFb1bm3xkiKlCNBh+1k/kwTroYyXIAHySF29p/AqFoDAs6GnBW59Tj6bUyl6HHJhKN4WQgnpHIdcGYpPV8pMl8yL/jHY0uOOyF/ecuAysuqSOiQtXk2eryO81qaziNxQR++Ew/5nc04n8tmVOU9zw4aBV8dOFf/7992PH2EIbHQ2ipT3/0O1mTGY1rUkou0pXndkK1Kdg/GMDHf7YNdpuCSFQgJgDVpqCjocBplzSZj0InXYwumdeqLanb+bfT6FvYXvB7ElFtqunMx4lAENFY9ey6+PPBk1j/7EF854k3iraDQ8t8dDYkfbynvR69XU2IxgS2vDlYlK9Vq15NTLl8IDFFlKq5zoEPnBP/3I63h/DSwVN4+W9DAICzOhthszjSPpPWhnjD6fBE2PLvcSHbTVOpdhuuOKsDgP57JiLKR01mPjoaXbAp8UO5TgaCBY0gFpNMyweCEfiDEXjcjoLfUxuznWFSDljsxZsDfvzxDR9uem9xMi21aGQiPuqarnzyw7+/CH8+cArRlKDy0kTvTT5k2UWI+DXITIhRodtNU8kMSqbdIkRE6dRk8GG3KZjR5IJvNAjf6GRVBB+pR7EPjk4WHHxMhqM4kphmMQ0+zuvCA1sOYOtbJzAZjuZ1vkitE0JgNBF8pPvzaql34voLZxb1azvsNjS5VfgnIxgaC5kHHyPFK7sAhgkbbjologLUZNkF0P8xrpaJl9ePjiRtXJU7IArxt1NjEALwuFV0NE59MJ03y4NZzW5MhKN4cf/Jgr9eLRoLRSErHk1FyFTlKtOWU7nIrFgBdrbnyRARpVOzwUennHjxV0fTaerUSTEmcQ5pm00boShT+wri45Ndpl+fsiOzHg67Arej/P85ZTrfRQbX+S4ys/x6LLsQUQFqNvjQFo1VSeZDTkw0ueKVsGKcO2M16WIkN1f+aZ+vqppvpwv/ZPxkZI/bYRrglVqmTISviA2n2Xw9IqJs1G7wUUWHy719cgxv+QJQbQpuuHgWgOJkPuSky4IZDZavuXR+GzxuFafGQvjL4dMFf81aMzqZ6PeoK3/JBUifiQhGojg9Hr++YvV8tCUmbNjzQUSFqNngo7Mp/p1gNSwaeyaR9Vi+oB3neJsAFCv4sJ50kRx2G65eJJdg8ayXXOnNppXp3ZbBgFkmYjAx6eJSbWguUnAkg53RyQjCXDRGRHmq2eBDXzRW+eBDjthee54XnU0yI1NYL4oQwnK7aaprF+snlhZrv0itqHjmQ2s4nXqyrc9wmm2xSkLNdQ7Itxo2+ZpERNmo2eBDX7Fe2YbTk4EgdifKHSsWebWgaLDAoGhgdBLjoShUm4K57fVpX/v+c2bAqdrwzqlx7E/0iVB2Rif0no9KaKu37sEYKOJ2U0m161kU7vogonzVfPAxMhHGZLhyR4Rv3ueDEMAFs5sxq6VOe1AM+gvbvnpwMF5y6Wmvz3imR4NL1TZXsvSSG63sUleZsku6k23lpEu+Z8dYacswYUNElEnNBh8et4q6xFKtSu760EouidJHR6NT2756KpB/VibbkotkLL1Q9rSyS6UyH2mmTwYTY+TFzHwAhlIPgw8iylPNBh+Komjjh5Xq+xgLRvDCgfhyL7lvQ7XbtKPPCykJ5Rp8XL3IC0UB/vruCI4NT+T9dWuNVnap9LSLWdlFbjct0o6PKV+TZRciylPNBh+AXnqp1Ljt82+dQCgSw9z2epzj1YOEYlyXHnxYj9kazWhyYUlPK4D4zo9qEY2JvI5vD0ViGZtng5FoxomNaEykLcvpmY9KTbtYT5/IoLqzyJmPdBM2RETZYPAB4HiFyi5/2hc/TfaaRd6kaQS9GTa/6xJCYL9PnmabXeYDiE/bANW17fSu//Mq3vO9Z3IqjZ0MBHHFv23B5365y/I1kWgM197/PP7ugRcRS9Nb8/c/24b3//uzmAiZByCVnnYxTp+kZj98JWg4BdJP2BARZaOmg4/FszwAgJcOnqrI15eHvl3U3ZL08a7mwspBb/kCGPQH4VRt6O1qyvrX9S2IN53uOz6a19cttpOBIJ766zEEghHseDv7P6M/7BnAoD+I7YeG0rx3CO+cGke/z285tRGMRLH7ndMY9AfxztCY6WsqPe1ityk4K1Fa235Iv0dCCC1zVqztplK6CRsiomzUdPAhV4tvO3hS+w62nOSkROoCKK/c9ZFnRkZOrLzvrA7UO7MvB8hNqCcDIQxXQT1/y75B7dA2uTAtG7JpNhCMWJZsjH/eVuWtQUPPjdVkh575qNwB0fLvsWxeBuJlmMlw/Pde7FOb003YEBFlo6aDj4UzGrFwRgPCUYHn+k+U/etr54KkBh9yAVqeh97Jh68so2SrwaViZuJr5/KwLxV53g2g97BkMjoZxraD+gm9gWDE/HUTevBhlWEyBiWnx8yDU33DaWUyH4DerPxc/6DWnyJ/Ty31DrgTU13FwswHERWqpoMPQP+HuxL7LaxWc3sLOPTu2PAEXj86AkWBtjY9F3I6JtuHfamMBSN4fr8eRBzMcvnZc/0nEI7qPRyyLJLKmPmwmioyBiVm3+ULITBqEUCW04Wzm+H1uDAWimJbooSobTdtKm7WA2Dmg4gKx+AjkbJ+rv8EgpHyLRuLxgT8QfMHl3birj/34EM2iy6d26qN7OZCTsdUOvh4YX98Eqgxccrv2yfHslq6lhpEWpXTjEGJVXnL+HGz7/LHQ1HtmiqZ+bDZFL30kvjzl9fuLfKYLWDcLcKGUyLKT80HHxfNaUFnkwuBYCRtg2KxBSb1h19TSuZDBh/D47lvX5WlimsXd+V1XXI6Rm5IrRT5EP1fS+bAabchGIll3D8SjES18pkzsdXVWF4xMgYlgxZB3qA/fc+HfA+HXYHbUdn/lOSf9zN7fYjFhCHzUdxmU0AvuwSCkbIG7ER05qj54CPpu8Yyll7kg8vtsMGlJtfkPXUqXGr8jyaXiZeR8TB2JAIo+XvKlSy7HKpg5iMSjWFzYgz5QxfMxLyO+Nk0BzJc0/ZDQwgEI+hscuH82fFJJuvMh6HhNJvMh0mJwTjpUqyD2/K1fEE7mlwqTgaCeOXIsFZKKvaCMSAeLNtt8d8vD5cjonzUfPAB6A9q+V1jOYykaVRUFMVw6m72TafP9g8iEhM4x9uIeR3ZLRdLJYOPd4bGEYpU5sj0l/82hJGJMNoanFgyt1XvQ8nQ9yGDx2sWe9Eij3637PkwlF0s7rGx4TRd5qOS/R6SU7Xhyt5OAPHs14DhRNtis9kUtNbHf88834WI8sHgA0DfwnY0ulQM+oN47d3hsnzNTA8ubdw2h8xHoSUXIL4TosFpRzQmcNhit0WpyZHRq3s7YbcpejbmpPX1xGJC63e5ZrFXa+LNJvNhdYKw8ePmmY/KbjdNda0hiB4sYfAB6CvWOfFCRPlg8AHApdpx5bkzAJTvYDU9ZW/+4JKNglYPxlSTYb3fIdcRWyNFUbS+jwMV6PsQQg8i5CTSws5EE2yazMdr7w5j0B9Eo0tF38J2NCUyStn0fJwaC03pXTAu6QLMmyurKfMBAFeeOwMOu4JDJ8aw77gfQPG3m0o834WICsHgI0E+6Mq1WjzTg6srsZUy20VjLx08ifFQFF0eNy6Y3VzQtemZhvL3few9PoqjwxOoc9jxvrM7kq4n3e4R+ed25bkz4FLt2tIvY3nFKLUccyJlp4pxSReQueejGjS5HbhsYfyehaJywVjxG04BoJXnuxBRARh8JMjvGg8MBsoyZioXjDVZPLhyPVzOWHIotPlxQYfMNJQ/8yFLLu8/p0NbjrUgEXycDAQxYtHg+MeUbIknh8wHMLWxV/5cTs2Mh6JTJo+0sksFt5umMjYa220K2vMYt86GHLcd4rgtEeWBwUeCx+3A8gXtAMqT/cjULyCDj8EsGk6jMWOpIv+Si6SN21Zg4uWPWhCl9600ulStfHDQJBtz8EQABwYDcNgVrXwmM0qZej7UxNTGwEjyfZYZp3kd9dprUrMf+om21ZH5AJKDj84mlzaVUmxazwfLLkSUBwYfBuXcdpqx4TRN5uNPe334wdNvaj++9bs9OBkIocmtYtn89oKvzbjlNNOx9EC8L+WRP79tuco8W0eGxrHv+ChsSrzZNOma0vR9yMBr+YJ2LRDQMh8WZReZeZqfyPJYZT66muv0jZ4pJQar9fiV5PW48Z7EQYWdJer3AIyZDwYfRJS76skXV4GrezvxTQCvHBnGZDha9DMxjDL1C2hbTkcnIYTQSimDo5P4wn/vNt32eXVvJ5xq4fHk3PZ62JT4w/VEIIjODCu6/2PLAfzv7e8gGInhCx9YmPfXlVmPS+a1aQ98aeGMRvz5wCnTvg8ZLF5r+K5f6/kwKbvE16LHP36Otwn7BwOWwYe3yQXfiBMn/MEpTad65qO6/jP64PldePXIMOa115fsazDzQUSFqK5/NStMriMXAghGYqUNPjKchtqZaBQMRmIYmQhreyue2edDNCbQ3VaHFYazW1yqHZ/qm1uUa3M77Ohuq8c7p8ZxcHAsY/Dxt1PxgODNAX9BX/eZxKjwyvOmjgpbnTkz6J/EK0eGASSXamRQ5zfJfEyGY9r5L2clSkypGaYBLfPhxpHT4wCmTnbIANKqb6dSPnv5fDjttrwXzWWDmQ8iKgSDDwOHXa+PWx3FXiyZTkN1O+xoqXdgeDyMgdFJLfiQDZk3X9qDL195Vsmub+GMxnjwcSKAvoXpSzmyL6WQHpHTYyG8/Lb1dlar4GPzvkEIAVzU3ZK0zVPr+TDJfMjAz25TsGCGVdkl/nvyetyGs0wsej6qqOEUiC8c++wV80v6NVot7gkRUTbY82GgKIrWXGg8GbUUsjkNVS+9xB+E/skwXkocF1/IIrFs5HLAnMwSHBzMrkfEzOY3BxETwKKZHnS3TS0XyJ6Pw6fGETYEhmYlF0AvhfiDkSklKmOzrzflHks+w5IubadFavCRIYA8k7VxzwcRFYDBRwrVLoOPcmU+rL9r1h6MickLeVz8ghkNWrmgVLLZrQHEl5vJVfFjoWhO6+CNrIIIqcvjRr3TjkhM4PBQvAwSCEbw5wOnTH+dsRQSSCm9GJt9U3trJK3h1Jj5mDLtUn0Np+Ui93xMhmOYCPFwOSLKDYOPFI7EXodIic94yWY7plwQJR+E2i6LEmc9AOPptukzH6nlinxKLxOhKJ7fn347q6IoU8542dp/AqFoDPM7pgZjTtWGukTPTuq4rbHZVwZ446Eo/IlpnUg0pi0d83pcppkPIURNZz4aXapWpmT2g4hyxeAjhQw+Spn5iMWENpaa7sHVZRi3DUVieO7N+EmvxdjlkYl80B8dnkj7nW3qBtZ8go8X9p/AZDiG2S11WDzTY/m6BVopKJ6N0c+yMV+sJnsxRibMp1Sa3CrqnHYt+yQzTCcDIcSEvqTLLPMxEY5qAWq19XyUg6IoPN+FiPLG4COF3vNRuuDDH4xAZvib0pRdOg0lge2HTsEfjGBGkwvvmdNSsmuT2hqc2smlb6c50M2XspY8U6bEjHFBWrrtrMam03A0hi0ZgjF910dq5iM5Y5F6grDM5sglXa0m2zxl9kS1KVqGpdZw4oWI8sXgI4VWdilhw6l8+LlUW9pxXmPDqfwuf8UiL2wl2lqZymrCxEhmC2TQlu7kWTORaAx/2qevhs/2enYcGoJ/MoKORife091q+np94iW150P2asQDv9SFbvJ/ZfDXZvIdvrFsVug6++mKuz6IKF8MPlI4ytBwmu1pqPI78uMjk0Vdn56tBVlMvMgH9UWJrZq5Zj52v3Map8fDaK5z4NJ5bWlfa9xy+vQbejBmtUJcllMyZT68hgwToJ8kLA/3k82VQ+MhrSk1m4bhM53VCDIRUSYMPlKoWs9HKTMfst8j/YNLLho7GQjCNxpEg9OOyzLs3CimbCZe5ANbXtexkUmM5bBmXTbRXr2oU7v3Vua1N0BR4pmL3716FED6YMxq10dq8NeVEnwMGMZsAf0hG4rEMJ7of8k2gDyT6UEZD5cjotww+EihT7tUPvPR0ZB8MNiVvZ1wqeXrL0idLjEjH9jndjWhPfGQTtcjYiSEMDSNZp7gcTvs6G6N7wAZnYyg3mnXjpA3Y3W+S2rwJ6eKZPOsPGROBh91DjtcibX1sr8h03r8WmBWjiIiygaDjxRlKbtkOaJpsynobNKPRLfagVEqctz20MkAYhajx7JJs8vjzqpHxKjf58eRoQm4VBvef451EJF0TYlSEABcee6MtD0zVue7pAZ/Wtkl0Tw76Nd3fAApkx2J/oZq3W5aTlojLns+iChHDD5SlGPDaS7LqeSDUbUpuPLczgyvLq7u1jo47AomwzEcG5mY8nkhRFKJIt3Js2bkqvj3nd2Bemd2D3EZ4ACZG1SznXZJXeYmMyBew6mwqSfb1vKOD4k9H0SULwYfKco57ZJNs6L87rtvYTuay9xfoNptmNceDygOmAQUw+NhhCLxDFGnx5X1VlQpl5KLJLMxdpuCq87NEHxknHZJHrU9EQgiGhP6dtNmPevUluhv0DMftbvdVLJaO09ElAmDjxTlWDKWS7OiPNRt9bKekl1POjKgOGQSUPgS5YnWegdcqj2nssvR4QnsOToKmwJctSj7jE7fgnY4VRs+fOFMNNenv38ZMx+JkklHows2BYjGBN49Pa4FFp3GzEd98q4PTrvAcu08EVEmtfsvp4VynO2SS7Pip/rm4qPvnV2x9P7CzgbgDfOAIrU8oQUqJ8cQjQnLEVgAeCYxKrt0bhs6Gl2Wr0s1r6MBf/nmNVoDaDpmPR9CCD34S9xTu03BjCYXfKNBvPbuCACg3mlHk0v/zyO1xMBpF+PJtmEIIWp23wkR5Y6ZjxTlONsll2ZFRVEq2leQLpuhlyfiwcfs1jo4VRtCkRiOnp7aI2L0xwL2lsTPFcki+EjcN79h2mUyHNP6eYyBgyxv/fXIsPZz48O0NeUUV0676NMuoWgMYzxcjohywOAjRTVNu1SDtGWXxKSLtyn+4LbbFMxvz7yYbGQ8jB1vDwHI3DRaCLM9HzLwsylAg1OflJEllr8mMh9yx4pknfmo3eRhndMOtyP+TwibTokoFww+Uqi2MiwZm0bNinLL6aA/OKV3Qpt0adZ7I7SJlzTBx5Z+H6IxgXO9TZjb3mD5ukLJfgx/MIJoLGUzacpadJn5eP3oSNLPJU67mGPTKRHlg8FHCn3apRyZj+r/rrnJ7dB2jaRmPwa1MVs9S5DNxIscsS31qvgmQ2AQSAR8qf0ekiwdTYTj5QNvSvDRNmXPx/QJIEsptRxFRJQNBh8pylF28U+zZkWrTacDo8nLuJJea5H5mAxHsfWtEwByG7HNh1O1aSfOyqBD69VIKZcYl7kBU4MPbZV4ornSbxHE1Bru+iCifDD4SKFPu5Sm7BKLCfiD06tZ0aqUkrqGHDD2iJgHH38+cBLjoShmNrtx/mxPKS43SVMiuzSSyDZlynxY/dw4VjoRjhqaVqs/e1VKqeUoIqJsMPhIUeqzXQKhCBIHo2oPxmpnls0IR2M4NTY1+JA9IicDIQybpOK1kstib1lGM7Wm08nU/RzJwUdqpsOb0nAqywvRmNAmeVSbomVWalVbffLyNSKibDD4SOEo8am28uHnUm1pzyWpJmZ9HCf8QQgRfwDLA+UAoMGlYmYia5Da9xGNCfxpn+z3KG3JRZJ9NbLcovdqJAd+U4OP5J+7HXbUJ6Zj3jk1nngPR83vttAzHzzZloiyx+AjhX62S2kyH3rPwfQouQD6SvN3To1pjbhyx0dnkwu2lGViVn0frxw+jVNjIXjcKi6d31bqywaQfebD41aTshidTcnBB6BnP/52akz7NbWOPR9ElI+cgo/vfOc7UBQl6Udvb6/2+cnJSaxZswbt7e1obGzEqlWr4PP5in7RpaSW+GwXvedg+jy4ZnrcqHPYEY4KHEmUHHwmY7aSPHk2NfiQi8Wu6u3MaklYMWgr1lN7PlKCP0VRtFJLe4MTTpMNqvJBa8x81DpOuxBRPnJ+Apx33nk4fvy49uPFF1/UPnfXXXfhySefxGOPPYatW7fi2LFjuOmmm4p6waXmLPG0i3HPxHRhsylaL4eceNFWq5tkCGSm5OCgXnYRQuDpxEr1cpVcAMOKdTlqq20mnRr8yVJLaslFkiUGmfmYLj07pcTMBxHlI+d/PVVVRVfX1IfHyMgIHn74YWzcuBFXXXUVAGDDhg1YtGgRtm/fjuXLlxd+tWWglrrnI/EQbJomky7SwhmNeOPYKA6eCGAFvPD5482mqVMh8rUAsO/4KJ5JZDtOBYJ459Q4nKoN7z9nRtmuW1+xnj7zARiDD/OzZmRzpZb5mGZ/hqUgMx+D/qD2Z01E1a/BacdlZ3VU7OvnHHzs378fs2bNgtvtRl9fH9atW4eenh7s3r0b4XAYK1as0F7b29uLnp4ebNu2zTL4CAaDCAaD2s9HR0fz+G0Uj+z5KNW0y3RaMGaU2sfhSzlUzuy1R4cn8Llf7kr63BVndaDRVb7fu75iPaXh1CRwkI2yXc11pu8lMx/vnmbwIXU0xu/JyER4yp81EVWvBTMasOX/vrJiXz+np8CyZcvwyCOP4Nxzz8Xx48fx3e9+F+973/uwZ88eDAwMwOl0oqWlJenXeL1eDAwMWL7nunXr8N3vfjeviy8FWesvec/HNCq7APoIrZxgGTDZbip1NbvxhQ8swI5DQ0kfd6k23H7VWSW+0mRaz0fivvvTlL0+tnQO3j45htXLekzfS245lWcO1vqODyB+Js7n3jcfO/92utKXQkQ5mN1q/k1WueT0r+d1112n/f8LL7wQy5Ytw9y5c/HrX/8adXX5/UbuvvturF27Vvv56Ogouru783qvYpBnu4RKPe0yzb5rltmMA4MBCCH0E20t+iPuvm5R2a4tHa3nY0rD6dS/+md1NuHnn1pq+V6thpFiYPr9GZbKN65fXOlLIKJppqCRg5aWFpxzzjk4cOAAurq6EAqFMDw8nPQan89n2iMiuVwueDyepB+VJDeclupsl+l6Gur8jgYoSjy9PjQW0k607bQIPqqFnvmIQAhRUPAn+xu0955m2SsiompRUPARCARw8OBBzJw5E0uWLIHD4cDmzZu1z/f39+Pw4cPo6+sr+ELLxaltOC3tkrHp9l1zndOO2S3x7NZfj44gkFgRb9ZwWk30no8wgpGYltHKJ3CQ57vo7z29AkgiomqR07+e//iP/4gPf/jDmDt3Lo4dO4Zvf/vbsNvtuPnmm9Hc3Ixbb70Va9euRVtbGzweD26//Xb09fVNm0kXQM98hCKlznxMr+ADiJde3j09gW0HTwEAGl1qWZtH86FtOJ0Ma4GfTYl3eueqjWUXIqKiyOnJ8e677+Lmm2/GqVOnMGPGDFxxxRXYvn07ZsyIj07ef//9sNlsWLVqFYLBIFauXIkHH3ywJBdeKrLno3SZD+s9E9Vu4YxGbH3rBP584CQAoNNiJLWayCAvEIxg2NBsms9a9DaWXYiIiiKnJ+CmTZvSft7tdmP9+vVYv359QRdVSU61XD0f0+/BJU+33Xs8Pg5t1WxaTeQiMCHio79A/hmLltTgg5kPIqK88GyXFPq0C3s+UsmJF3kqr9Um0GriUu1wO+J/pu8mVsPn26vhVG1oMpSZ2PNBRJQfBh8pSjntEosJ+IPmp6pOBzL4kKZD8AHogV4xloMZx22nYwBJRFQNGHykKOW0SyAU0bIG0/HB1dHoTOpV6ZoGPR+AXuLSMh9FCD7sNgX1eTStEhERg48p5NkupZh2kSUXp2qD2zH9HlyKomiHxgHTKfMRD5gKLbsA+vkuHreaV9MqEREx+JiilGe7TNftpkYLOgzBR5Xv+JBk5uNoEcsu07FhmIioWjD4SFHKs12m63ZTIznxAkynzEc8UDgZiB/7XkjgIMdtp3MASURUaQw+UsjMRynOdpnOky6Ssem0s2m69HwkB3uF7FjRMx/TN4AkIqo0/guawmEvZeZDTrpM3+DjvFke2BRgbnuDdq+qXWqwV8j9n98Rz/x0t9YXdE1ERLWMwUcKLfgoSc+HzHxM39s+p7Uev/5CH2ZMk6wHMDXYKCTztPK8Lmz4zCV4b3droZdFRFSzpu9TsETkno9wVEAIUdSJBv8ZkPkAgKXz2ip9CTlJDTaaCgj+7DYF/9e5nYVeEhFRTZseefMyctj0W1LsXR9aw+k07vmYjqb0fEzz4I+IaLpj8JHCoeqZjmL3fWhlFzYrllUxez6IiKhwDD5SqIbMR7EnXpj5qIypPR8M/oiIKonBRwqH3Zj5KHLwMXFm9HxMN8YeD5sCNDgZfBARVRKDjxSKosCubTktVc8HH37lZMw0NbkdsNm4Fp2IqJIYfJiQ2Y9in++ibzhl5qOcjJkP9tsQEVUegw8TcuKl6JmPM+Bsl+nI7bDDlVibz3tPRFR5DD5MyF0fxez5iMUE/GfA2S7Tlcw2MfggIqo8Bh8m5JbTYk67jIUikIkUPgDLT/bZMPAjIqo8Bh8mSnG+izzXxana4HbYi/a+lB1mPoiIqgeDDxNa2aWI57ucCSfaTmfyvrPZl4io8hh8mJCZj3AxMx9nwKFy0xkzH0RE1YPBhwnVJg+XK17mYyIcBQDUOVlyqYRrFnvh9bhwxdkdlb4UIqKax2/DTZSi50NmUeR7U3l95KJZ+MhFsyp9GUREBGY+TMklY8XMfMixXSeDDyIiqnF8EppQS9DzIcd2VTtXexMRUW1j8GHCUYJpF5ZdiIiI4vgkNFGKaRdZwmHwQUREtY5PQhOqTQYfxe/5cLDsQkRENY7BhwlHCc52CbHsQkREBIDBhymWXYiIiEqHT0ITailHbVWWXYiIqLYx+DDhSPR8RGLFHLWNv5fsJyEiIqpVfBKacKjFz3yw7EJERBTHJ6GJkk67sOxCREQ1jsGHCX3apQRnu7DsQkRENY5PQhOlmHYJsexCREQEgMGHKf1slyL2fERYdiEiIgIYfJhy2Ip/toucnGHZhYiIah2fhCZKeaot16sTEVGtY/BhwlGCJWN62YW3nIiIahufhCZkU2gxp120sgsbTomIqMbxSWiiFOvVwyy7EBERAWDwYcpRgmmXUISjtkRERACDD1PakrEinu3CsgsREVEcn4QmSrFenWUXIiKiOAYfJkrRcMqyCxERURyfhCZKMmrL9epEREQAGHyYKsWSMb3ng2UXIiKqbQw+TOgNpyVYMsbMBxER1Tg+CU3keqrt8ZEJ/P1Pt+F/Xj9u+ZpQlNMuREREAKBW+gKqkWrLrefjuf4TePlvQ6h32fGhC2aavkZmURh8EBFRreOT0ESu0y6ByQgAIBi2Dlb0sgt7PoiIqLYx+DCR64bTQDCS8fVhll2IiIgAMPgwlevZLjL4CFm8XgiBMMsuREREABh8mHLKskuW69XHZPARMQ8+ojEBkXgrll2IiKjWMfgwUezMh3FqhpkPIiKqdXwSmtDPdhEQInP2I5Ah82EMShh8EBFRreOT0ITTECBEsyi9ZCq7RJKCD5ZdiIiotjH4MKEaAoRsFo0FgtHEa9OXXVSbAkVh8EFERLWNwYeJpOAjixXrgWAYgHXmg4fKERER6fg0NOGw6bclm0VjY4nMh3XDKReMERERSQUFH/fddx8URcGdd96pfWxychJr1qxBe3s7GhsbsWrVKvh8vkKvs6xsNgX2HFas60vGzBtUuWCMiIhIl/fTcOfOnfjZz36GCy+8MOnjd911F5588kk89thj2Lp1K44dO4abbrqp4Astt2zPdwlFYknlFrPsB8suREREuryehoFAAKtXr8YvfvELtLa2ah8fGRnBww8/jB/96Ee46qqrsGTJEmzYsAEvvfQStm/fXrSLLodsz3eRky6SWd+HFnyoLLsQERHlFXysWbMG119/PVasWJH08d27dyMcDid9vLe3Fz09Pdi2bZvpewWDQYyOjib9qAaOLBeNBVKCD7PpGK3sYmPmg4iISM31F2zatAl/+ctfsHPnzimfGxgYgNPpREtLS9LHvV4vBgYGTN9v3bp1+O53v5vrZZScatcXjaUzFsoh88GyCxERUW6ZjyNHjuCOO+7Ao48+CrfbXZQLuPvuuzEyMqL9OHLkSFHet1CORM9HJMOobWAyc/ARYtmFiIhIk1PwsXv3bgwODuK9730vVFWFqqrYunUrHnjgAaiqCq/Xi1AohOHh4aRf5/P50NXVZfqeLpcLHo8n6Uc1cKgy85Fb2cWs4TSiLRlj5oOIiCinssvVV1+N119/Peljn/nMZ9Db24uvfe1r6O7uhsPhwObNm7Fq1SoAQH9/Pw4fPoy+vr7iXXUZ6NMumRpOo0k/T1d2cbLsQkRElFvw0dTUhPPPPz/pYw0NDWhvb9c+fuutt2Lt2rVoa2uDx+PB7bffjr6+Pixfvrx4V10G2U67yO2mUtpRW5ZdiIiIcm84zeT++++HzWbDqlWrEAwGsXLlSjz44IPF/jIl57BnW3ZJznyYvZ5LxoiIiHQFBx/PPfdc0s/dbjfWr1+P9evXF/rWFaVmOWqby54P9nwQERHxbBdLcidHJFa8JWNOll2IiIgYfFiR/RmZMh/+LKZdWHYhIiLS8WloQZZIMk+7sOxCRESUCz4NLcj16pEcez5MG04jLLsQERFJDD4saNMuGXo+/FlsOOV6dSIiIh2fhha0s11MggkjebZLncMOwKLnI8YNp0RERBKfhhayPdtFbjhta3ACsMh8RLhkjIiISGLwYcGR5am28myX1gYHgPQbTrlenYiIiMGHpWyXjMlTbVvr02Q+Yhy1JSIikvg0tJDN2S7RmMBEOLnskm7aRQY0REREtYzBhwU5ahtO0/Mhm02BDJkPll2IiIg0fBpa0KddrDMfsuTisCtocsePyWHZhYiIKD0+DS1kM+0iF4w1uFQtqxEyKdOw7EJERKRj8GEhm2kXOenS4FThUBPBB5eMERERpcWnoQWt7JJm2kUGH01uY+bD+mA59nwQEREx+LCUzdkuxrKLzHyYbURl5oOIiEjHp6EF1SanXdKVXeJjtg0uFa60mQ/2fBAREUkMPiyky2RIMvPR5FLhTNvzwbILERGRxKehBUfiELhI2syHLLvYtZJKuswHyy5EREQMPixls149YBy1zWLahWUXIiIiBh+WHFlMu+RadmHmg4iIiMGHJX3aJfOG0waXqq9jNwlWIlyvTkREpOHT0IKa6PlIP+2iBx8u1brnQ249ZdmFiIiIwYelrKZdEgfLNbpUOO12ANxwSkRElAmfhhayOdtF7vlodKlwqNZlF55qS0REpOPT0IJcr56+5yMMIPlguaBJ5kO+hwxQiIiIahmDDwuygdSsh0MaM2Q+5LRLauZDCKG9h+wjISIiqmV8GlpwZJH5kKO2jcaD5VIyH8YlZSy7EBERMfiwJCdTrHo+hBAIhPQNpzLzERPJh9EZgxeWXYiIiAC10hdQrfQlY+aZj/FQFCLxqUZX8m0MRwXU+PBLUtmGZRciIiJmPizJs12sNpzKkotNAeoc9qQxWmPpxfjrHdzzQURExODDipphw6nfsGBMURSoNgVKIrYIRqPa67RJF7sCRWHwQURExODDglZ2icUgxNQARGs2TZRcFEXRGkqNpZowJ12IiIiS8IloQZZIhACiJivWAynBBwDTiZeQtt2UWQ8iIiKAwYcl1dDDETEJPuSOjwZj8GFysq223VTlrSYiIgIYfFgyZirMmk4Dwfh2U2PmQ5+QmTpqy3NdiIiI4vhEtOAw9GiYjdsaz3WRZHYjaFJ24Ym2REREcQw+LNhsChJnyyUtDZPGDNMukmnZJcITbYmIiIz4RExDn3gxyXxMyoZT+9TXG8suiV/L1epERERxfCKmoQUTJifVBgznukhmmQ+WXYiIiJIx+Egj3fkuZmUXlxy1jbLsQkREZIVPxDRU29SlYdJYaOqeD3lwnFnZhcEHERFRHJ+IaTjtU4MJyZ/o+WhwTl0yFjTZ88ElY0RERHEMPtJQ05xsO5ZtzwfLLkREREn4RExDP1zOrOdj6p4Ps2mXMJeMERERJeETMQ2zg+KkQJZ7PmSzKkdtiYiI4vhETENmPsIm0y5mB8u50pRdOGpLREQUx+AjDTntEknJfAgh9J4Pll2IiIhywidiGk6TYAKIT7PIEdoGw4ZTbdol6WA5NpwSEREZ8YmYhmoxaitLLkDKqK0qN6LqmRKO2hIRESVj8JGGHLVNLbto202ddthselDh0DacRrWPhVh2ISIiSsInYhpWS8bMJl0Ai1NtWXYhIiJKwidiGtp69ZRTbbUTbd3JwYecdjGO5kZYdiEiIkrC4CMNqyVjZue6AIayS9Kptiy7EBERGfGJmIbVtEsgsd3U2GwK6GUX87NdeKuJiIgABh9p6dMu2ZVdzPZ8sOxCRESUjMFHGpmmXVLLLuYNpyy7EBERGfGJmIYsu0RiVtMudtPXh6LGng+WXYiIiIz4RExDTezwCFmM2ja6HEkfd6pTR3NZdiEiIkrG4CONzGWX1MxH/OcsuxAREVnjEzENp8WobS5Lxlh2ISIiSsYnYhqq1sNhsV59yp6PqWUanu1CRESULKfg46GHHsKFF14Ij8cDj8eDvr4+/P73v9c+Pzk5iTVr1qC9vR2NjY1YtWoVfD5f0S+6XKyWjMnMR1MWmY8Iyy5ERERJcnoizpkzB/fddx92796NXbt24aqrrsINN9yAN954AwBw11134cknn8Rjjz2GrVu34tixY7jppptKcuHloE+7pOz5kEvGXObr1c0zHww+iIiIAEDN/BLdhz/84aSf33PPPXjooYewfft2zJkzBw8//DA2btyIq666CgCwYcMGLFq0CNu3b8fy5cuLd9VlYjXtYl12SSwZM+35YNmFiIgIKKDnIxqNYtOmTRgbG0NfXx92796NcDiMFStWaK/p7e1FT08Ptm3bZvk+wWAQo6OjST+qhT7tYjVqa1F2iZqUXVRmPoiIiIA8go/XX38djY2NcLlc+OIXv4jHH38cixcvxsDAAJxOJ1paWpJe7/V6MTAwYPl+69atQ3Nzs/aju7s7599EqTi0ng+97BKOxnB6PAQAaG90Jr1ePwtGIJYo1WhlFxuDDyIiIiCP4OPcc8/Fq6++ih07duBLX/oSbrnlFuzduzfvC7j77rsxMjKi/Thy5Eje71VsDpONpSf8QQgRD0za6pODD2N2I5zYiqoFHyrLLkRERECOPR8A4HQ6cdZZZwEAlixZgp07d+InP/kJPv7xjyMUCmF4eDgp++Hz+dDV1WX5fi6XCy6XK/crLwOzJWO+0UkAQGeTGzZbckDhNDSVhiIxuFQ7l4wRERGlKPiJGIvFEAwGsWTJEjgcDmzevFn7XH9/Pw4fPoy+vr5Cv0xFOBLBhfFsFxl8eD1TA6bU4ANg2YWIiChVTpmPu+++G9dddx16enrg9/uxceNGPPfcc3j66afR3NyMW2+9FWvXrkVbWxs8Hg9uv/129PX1TctJF8BYdjFmPoIAAK/HPeX1NpsC1aYgEhNaxoNlFyIiomQ5BR+Dg4P41Kc+hePHj6O5uRkXXnghnn76aVxzzTUAgPvvvx82mw2rVq1CMBjEypUr8eCDD5bkwsvBbMnYgJb5mBp8APGJl0goilAkBiEEyy5EREQpcgo+Hn744bSfd7vdWL9+PdavX1/QRVULh1nPx0g8+Ohqtg4+xkNRhKKxpOVkDD6IiIji+ERMQ1saZsh8+PzWPR/GXxOKxJJ+HZeMERERxTH4SEOWXcKGhtOBkQxlF8N4bjjCzAcREVEqPhHTkBMqkSwbTgF9y2k4GksKWlQbMx9EREQAg4+05ISKLJ8EghFttXrGzIeh7OKwK1AUBh9EREQAg4+0VJu+Lh3Qd3w0utQp57pI2vkuEb3swpILERGRjk/FNBwpo7bpFoyl/pqQoezC4IOIiEjHp2IaDrt55sNqzBZIyXwYyi5EREQUx+AjjdRpl4GRRLNpU7rgww6AZRciIiIrfCqmIaddhACiMaGXXdJlPux6k2ooyrILERFRKj4V03Co+u0JR2N68NFk3fOhlV2iMa1XhGUXIiIiHYOPNIy7OcLRmHauS9qej6RRW5ZdiIiIUvGpmIYxaIhEBQYzLBgz/ppQ1NhwyttMREQk8amYht2mQCY/QsayS5rgg9MuRERE6TH4yEBNZC18o5OIxAQUBZiRTc+HoeyiMvNBRESk4VMxA0ci9XH09AQAoL3BlbaM4jSchCszH04GH0RERBo+FTOQEy9HTo8DALqarbMeAMsuREREmTD4yECe7/JuIvPRlabfAzBMu0RZdiEiIjLDp2IGMmshyy6dGYIPh5b5ECy7EBERmeBTMQO5Yl0ru+SU+WDZhYiIKBWDjwxkc6ksu6Q70RbQez7CXDJGRERkik/FDOT5LuOhKID0Oz4A88wHez6IiIh0fCpmoKaUTNKtVgfMp12cLLsQERFpGHxkkFoy8TZlaDg1mXZh2YWIiEjHp2IGxmZRp2pDS70j7evNMh8suxAREen4VMxA7vkA4s2mipK+hMKyCxERUXoMPjKQezuAzGO2gJ4pCbPsQkREZIpPxQzk2S5A5gVjAOBSOe1CRESUDp+KGRinXbLJfDjtdgA824WIiMgKg48MjCWTrMouqrHskuj5UHmbiYiIJD4VMzAGH50ZtpsC+pKxIDecEhERmeJTMQPVlmPZxWzU1sayCxERkcTgIwPjtEum1eqAnvlg2YWIiMgcn4oZGKddMq1WB/RAIyaAybBsOOVtJiIikvhUzECOyTbXOeB22DO+3pjlGAtG4u/BsgsREZGGwUcGMmvhzaLZ1Ph6ABgLxYMPB8suREREGj4VM5A7OrLp9wDiWQ65gX08GAWg94EQERERg4+M6pzxUsus5rqsXq8oihZsBFh2ISIimkKt9AVUu49ePBvvnp7Apy+bl/WvcdptCEZiCEYSDacsuxAREWkYfGQws7kO9370gpx+jVO1AUHDz1l2ISIi0vCpWAKpez04aktERKTjU7EEUoMNlQfLERERaRh8lEBq5oNlFyIiIh2fiiWQmvlg2YWIiEjHp2IJpGY+WHYhIiLSMfgoARczH0RERJb4VCwBh5qc6WDPBxERkY5PxRJIDTZYdiEiItIx+CiBKT0fXK9ORESkYfBRAsYeD6fdBkVh8EFERCQx+CgBY+bDwZILERFREgYfJeAyBB8qm02JiIiS8MlYAsayC8dsiYiIkvHJWALOpJ4Pll2IiIiMGHyUgJNlFyIiIkt8MpZActmFmQ8iIiIjBh8lkDztwltMRERkxCdjCRinXVIXjhEREdU6PhlLwJjt4HZTIiKiZAw+SoBlFyIiImt8MpZA0qgtyy5ERERJ+GQsAYfKsgsREZEVBh8l4OSGUyIiIks5PRnXrVuHSy65BE1NTejs7MSNN96I/v7+pNdMTk5izZo1aG9vR2NjI1atWgWfz1fUi652TlXPdjhYdiEiIkqS05Nx69atWLNmDbZv345nnnkG4XAY1157LcbGxrTX3HXXXXjyySfx2GOPYevWrTh27Bhuuummol94NXPa7dr/d7DsQkRElETN5cV/+MMfkn7+yCOPoLOzE7t378b73/9+jIyM4OGHH8bGjRtx1VVXAQA2bNiARYsWYfv27Vi+fHnxrryKcdqFiIjIWkFPxpGREQBAW1sbAGD37t0Ih8NYsWKF9pre3l709PRg27Ztpu8RDAYxOjqa9GO6M65UZ9mFiIgoWd5PxlgshjvvvBOXX345zj//fADAwMAAnE4nWlpakl7r9XoxMDBg+j7r1q1Dc3Oz9qO7uzvfS6oaxsyHk5kPIiKiJHk/GdesWYM9e/Zg06ZNBV3A3XffjZGREe3HkSNHCnq/auDiqC0REZGlnHo+pNtuuw1PPfUUnn/+ecyZM0f7eFdXF0KhEIaHh5OyHz6fD11dXabv5XK54HK58rmMqpV0qi3LLkRERElyejIKIXDbbbfh8ccfx5YtWzB//vykzy9ZsgQOhwObN2/WPtbf34/Dhw+jr6+vOFc8DbDhlIiIyFpOmY81a9Zg48aN+N3vfoempiatj6O5uRl1dXVobm7GrbfeirVr16KtrQ0ejwe33347+vr6ambSBUhZMsayCxERUZKcgo+HHnoIAHDllVcmfXzDhg349Kc/DQC4//77YbPZsGrVKgSDQaxcuRIPPvhgUS52ujCWWlh2ISIiSpZT8CGEyPgat9uN9evXY/369Xlf1HTH9epERETW+GQsgeTgg2UXIiIiIwYfJWCzKdqILTMfREREyfhkLBE58cLgg4iIKBmfjCWiBx8suxARERkx+CgRmfFg5oOIiCgZn4wl4mTwQUREZIpPxhJh2YWIiMgcg48Saa13AABa6p0VvhIiIqLqktfBcpTZvTddgNeODOOiOc2VvhQiIqKqwuCjRHq7POjt8lT6MoiIiKoOyy5ERERUVgw+iIiIqKwYfBAREVFZMfggIiKismLwQURERGXF4IOIiIjKisEHERERlRWDDyIiIiorBh9ERERUVgw+iIiIqKwYfBAREVFZMfggIiKismLwQURERGVVdafaCiEAAKOjoxW+EiIiIsqWfG7L53g6VRd8+P1+AEB3d3eFr4SIiIhy5ff70dzcnPY1isgmRCmjWCyGY8eOoampCYqiFPW9R0dH0d3djSNHjsDj8RT1vSkZ73X58F6XD+91+fBel0+x7rUQAn6/H7NmzYLNlr6ro+oyHzabDXPmzCnp1/B4PPzLXCa81+XDe10+vNflw3tdPsW415kyHhIbTomIiKisGHwQERFRWdVU8OFyufDtb38bLper0pdyxuO9Lh/e6/LhvS4f3uvyqcS9rrqGUyIiIjqz1VTmg4iIiCqPwQcRERGVFYMPIiIiKisGH0RERFRWNRN8rF+/HvPmzYPb7cayZcvw8ssvV/qSpr1169bhkksuQVNTEzo7O3HjjTeiv78/6TWTk5NYs2YN2tvb0djYiFWrVsHn81Xois8c9913HxRFwZ133ql9jPe6eI4ePYpPfvKTaG9vR11dHS644ALs2rVL+7wQAt/61rcwc+ZM1NXVYcWKFdi/f38Fr3h6ikaj+OY3v4n58+ejrq4OCxcuxPe///2ks0F4r/P3/PPP48Mf/jBmzZoFRVHw29/+Nunz2dzboaEhrF69Gh6PBy0tLbj11lsRCAQKvzhRAzZt2iScTqf4z//8T/HGG2+Iz33uc6KlpUX4fL5KX9q0tnLlSrFhwwaxZ88e8eqrr4oPfehDoqenRwQCAe01X/ziF0V3d7fYvHmz2LVrl1i+fLm47LLLKnjV09/LL78s5s2bJy688EJxxx13aB/nvS6OoaEhMXfuXPHpT39a7NixQxw6dEg8/fTT4sCBA9pr7rvvPtHc3Cx++9vfitdee0185CMfEfPnzxcTExMVvPLp55577hHt7e3iqaeeEm+//bZ47LHHRGNjo/jJT36ivYb3On//8z//I77xjW+I3/zmNwKAePzxx5M+n829/eAHPyguuugisX37dvHCCy+Is846S9x8880FX1tNBB+XXnqpWLNmjfbzaDQqZs2aJdatW1fBqzrzDA4OCgBi69atQgghhoeHhcPhEI899pj2mn379gkAYtu2bZW6zGnN7/eLs88+WzzzzDPiAx/4gBZ88F4Xz9e+9jVxxRVXWH4+FouJrq4u8YMf/ED72PDwsHC5XOJXv/pVOS7xjHH99deLz372s0kfu+mmm8Tq1auFELzXxZQafGRzb/fu3SsAiJ07d2qv+f3vfy8URRFHjx4t6HrO+LJLKBTC7t27sWLFCu1jNpsNK1aswLZt2yp4ZWeekZERAEBbWxsAYPfu3QiHw0n3vre3Fz09Pbz3eVqzZg2uv/76pHsK8F4X0xNPPIGlS5fiYx/7GDo7O3HxxRfjF7/4hfb5t99+GwMDA0n3urm5GcuWLeO9ztFll12GzZs346233gIAvPbaa3jxxRdx3XXXAeC9LqVs7u22bdvQ0tKCpUuXaq9ZsWIFbDYbduzYUdDXr7qD5Yrt5MmTiEaj8Hq9SR/3er148803K3RVZ55YLIY777wTl19+Oc4//3wAwMDAAJxOJ1paWpJe6/V6MTAwUIGrnN42bdqEv/zlL9i5c+eUz/FeF8+hQ4fw0EMPYe3atfjnf/5n7Ny5E1/5ylfgdDpxyy23aPfT7N8U3uvcfP3rX8fo6Ch6e3tht9sRjUZxzz33YPXq1QDAe11C2dzbgYEBdHZ2Jn1eVVW0tbUVfP/P+OCDymPNmjXYs2cPXnzxxUpfyhnpyJEjuOOOO/DMM8/A7XZX+nLOaLFYDEuXLsW9994LALj44ouxZ88e/PSnP8Utt9xS4as7s/z617/Go48+io0bN+K8887Dq6++ijvvvBOzZs3ivT7DnfFll46ODtjt9ild/z6fD11dXRW6qjPLbbfdhqeeegrPPvss5syZo328q6sLoVAIw8PDSa/nvc/d7t27MTg4iPe+971QVRWqqmLr1q144IEHoKoqvF4v73WRzJw5E4sXL0762KJFi3D48GEA0O4n/00p3D/90z/h61//Oj7xiU/gggsuwD/8wz/grrvuwrp16wDwXpdSNve2q6sLg4ODSZ+PRCIYGhoq+P6f8cGH0+nEkiVLsHnzZu1jsVgMmzdvRl9fXwWvbPoTQuC2227D448/ji1btmD+/PlJn1+yZAkcDkfSve/v78fhw4d573N09dVX4/XXX8err76q/Vi6dClWr16t/X/e6+K4/PLLp4yMv/XWW5g7dy4AYP78+ejq6kq616Ojo9ixYwfvdY7Gx8dhsyU/hux2O2KxGADe61LK5t729fVheHgYu3fv1l6zZcsWxGIxLFu2rLALKKhddZrYtGmTcLlc4pFHHhF79+4Vn//850VLS4sYGBio9KVNa1/60pdEc3OzeO6558Tx48e1H+Pj49prvvjFL4qenh6xZcsWsWvXLtHX1yf6+voqeNVnDuO0ixC818Xy8ssvC1VVxT333CP2798vHn30UVFfXy/++7//W3vNfffdJ1paWsTvfvc78de//lXccMMNHP/Mwy233CJmz56tjdr+5je/ER0dHeKrX/2q9hre6/z5/X7xyiuviFdeeUUAED/60Y/EK6+8It555x0hRHb39oMf/KC4+OKLxY4dO8SLL74ozj77bI7a5uI//uM/RE9Pj3A6neLSSy8V27dvr/QlTXsATH9s2LBBe83ExIT48pe/LFpbW0V9fb346Ec/Ko4fP165iz6DpAYfvNfF8+STT4rzzz9fuFwu0dvbK37+858nfT4Wi4lvfvObwuv1CpfLJa6++mrR399foaudvkZHR8Udd9whenp6hNvtFgsWLBDf+MY3RDAY1F7De52/Z5991vTf6FtuuUUIkd29PXXqlLj55ptFY2Oj8Hg84jOf+Yzw+/0FX5sihGGVHBEREVGJnfE9H0RERFRdGHwQERFRWTH4ICIiorJi8EFERERlxeCDiIiIyorBBxEREZUVgw8iIiIqKwYfREREVFYMPoiIiKisGHwQERFRWTH4ICIiorJi8EFERERl9f8DsVC7vKwTdVoAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "plt.plot(result.scores)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [], + "source": [ + "for name, module in baleen.named_sub_modules(TypedPredictor):\n", + " print(name, module)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/intro.ipynb b/intro.ipynb index 1dc2cf6d93..c6db5871ae 100644 --- a/intro.ipynb +++ b/intro.ipynb @@ -35,9 +35,20 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/var/folders/qz/yy2p38hj2m9c7bfp30yq99340000gn/T/ipykernel_40349/1846046422.py:20: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n", + " import pkg_resources # Install the package if it's not installed\n", + "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -83,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -137,7 +148,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -146,7 +157,7 @@ "(20, 50)" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -177,7 +188,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -204,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -233,7 +244,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -295,7 +306,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -319,15 +330,39 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Question: What is the nationality of the chef and restaurateur featured in Restaurant: Impossible?\n", - "Predicted Answer: American\n" + "ename": "OpenAIError", + "evalue": "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mOpenAIError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[9], line 5\u001b[0m\n\u001b[1;32m 2\u001b[0m generate_answer \u001b[38;5;241m=\u001b[39m dspy\u001b[38;5;241m.\u001b[39mPredict(BasicQA)\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# Call the predictor on a particular input.\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m pred \u001b[38;5;241m=\u001b[39m \u001b[43mgenerate_answer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mquestion\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdev_example\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mquestion\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m# Print the input and the prediction.\u001b[39;00m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mQuestion: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdev_example\u001b[38;5;241m.\u001b[39mquestion\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[0;32m~/repos/dspy/dspy/predict/predict.py:49\u001b[0m, in \u001b[0;36mPredict.__call__\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m---> 49\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/repos/dspy/dspy/predict/predict.py:91\u001b[0m, in \u001b[0;36mPredict.forward\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 88\u001b[0m template \u001b[38;5;241m=\u001b[39m signature_to_template(signature)\n\u001b[1;32m 90\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlm \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m---> 91\u001b[0m x, C \u001b[38;5;241m=\u001b[39m \u001b[43mdsp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtemplate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstage\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstage\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 92\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 93\u001b[0m \u001b[38;5;66;03m# Note: query_only=True means the instructions and examples are not included.\u001b[39;00m\n\u001b[1;32m 94\u001b[0m \u001b[38;5;66;03m# I'm not really sure why we'd want to do that, but it's there.\u001b[39;00m\n\u001b[1;32m 95\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m dsp\u001b[38;5;241m.\u001b[39msettings\u001b[38;5;241m.\u001b[39mcontext(lm\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlm, query_only\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n", + "File \u001b[0;32m~/repos/dspy/dsp/primitives/predict.py:77\u001b[0m, in \u001b[0;36m_generate..do_generate\u001b[0;34m(example, stage, max_depth, original_example)\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[38;5;66;03m# Generate and extract the fields.\u001b[39;00m\n\u001b[1;32m 76\u001b[0m prompt \u001b[38;5;241m=\u001b[39m template(example)\n\u001b[0;32m---> 77\u001b[0m completions: \u001b[38;5;28mlist\u001b[39m[\u001b[38;5;28mdict\u001b[39m[\u001b[38;5;28mstr\u001b[39m, Any]] \u001b[38;5;241m=\u001b[39m \u001b[43mgenerator\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 78\u001b[0m completions: \u001b[38;5;28mlist\u001b[39m[Example] \u001b[38;5;241m=\u001b[39m [template\u001b[38;5;241m.\u001b[39mextract(example, p) \u001b[38;5;28;01mfor\u001b[39;00m p \u001b[38;5;129;01min\u001b[39;00m completions]\n\u001b[1;32m 80\u001b[0m \u001b[38;5;66;03m# Find the completions that are most complete.\u001b[39;00m\n", + "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:186\u001b[0m, in \u001b[0;36mGPT3.__call__\u001b[0;34m(self, prompt, only_completed, return_sorted, **kwargs)\u001b[0m\n\u001b[1;32m 178\u001b[0m \u001b[38;5;28;01massert\u001b[39;00m return_sorted \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfor now\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 180\u001b[0m \u001b[38;5;66;03m# if kwargs.get(\"n\", 1) > 1:\u001b[39;00m\n\u001b[1;32m 181\u001b[0m \u001b[38;5;66;03m# if self.model_type == \"chat\":\u001b[39;00m\n\u001b[1;32m 182\u001b[0m \u001b[38;5;66;03m# kwargs = {**kwargs}\u001b[39;00m\n\u001b[1;32m 183\u001b[0m \u001b[38;5;66;03m# else:\u001b[39;00m\n\u001b[1;32m 184\u001b[0m \u001b[38;5;66;03m# kwargs = {**kwargs, \"logprobs\": 5}\u001b[39;00m\n\u001b[0;32m--> 186\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 188\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m dsp\u001b[38;5;241m.\u001b[39msettings\u001b[38;5;241m.\u001b[39mlog_openai_usage:\n\u001b[1;32m 189\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlog_usage(response)\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/backoff/_sync.py:105\u001b[0m, in \u001b[0;36mretry_exception..retry\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 96\u001b[0m details \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 97\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget\u001b[39m\u001b[38;5;124m\"\u001b[39m: target,\n\u001b[1;32m 98\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124margs\u001b[39m\u001b[38;5;124m\"\u001b[39m: args,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124melapsed\u001b[39m\u001b[38;5;124m\"\u001b[39m: elapsed,\n\u001b[1;32m 102\u001b[0m }\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 105\u001b[0m ret \u001b[38;5;241m=\u001b[39m \u001b[43mtarget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 106\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m exception \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 107\u001b[0m max_tries_exceeded \u001b[38;5;241m=\u001b[39m (tries \u001b[38;5;241m==\u001b[39m max_tries_value)\n", + "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:152\u001b[0m, in \u001b[0;36mGPT3.request\u001b[0;34m(self, prompt, **kwargs)\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[1;32m 150\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m--> 152\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbasic_request\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:125\u001b[0m, in \u001b[0;36mGPT3.basic_request\u001b[0;34m(self, prompt, **kwargs)\u001b[0m\n\u001b[1;32m 123\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m [{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: prompt}]\n\u001b[1;32m 124\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstringify_request\u001b[39m\u001b[38;5;124m\"\u001b[39m: json\u001b[38;5;241m.\u001b[39mdumps(kwargs)}\n\u001b[0;32m--> 125\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mchat_request\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 128\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mprompt\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m prompt\n", + "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:273\u001b[0m, in \u001b[0;36mchat_request\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m 270\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m OPENAI_LEGACY:\n\u001b[1;32m 271\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _cached_gpt3_turbo_request_v2_wrapped(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 273\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mv1_cached_gpt3_turbo_request_v2_wrapped\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mmodel_dump()\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:655\u001b[0m, in \u001b[0;36mMemorizedFunc.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 655\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cached_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:598\u001b[0m, in \u001b[0;36mMemorizedFunc._cached_call\u001b[0;34m(self, args, kwargs, shelving)\u001b[0m\n\u001b[1;32m 595\u001b[0m must_call \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 597\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m must_call:\n\u001b[0;32m--> 598\u001b[0m out, metadata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 599\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmmap_mode \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 600\u001b[0m \u001b[38;5;66;03m# Memmap the output at the first call to be consistent with\u001b[39;00m\n\u001b[1;32m 601\u001b[0m \u001b[38;5;66;03m# later calls\u001b[39;00m\n\u001b[1;32m 602\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose:\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:856\u001b[0m, in \u001b[0;36mMemorizedFunc.call\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 854\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 855\u001b[0m \u001b[38;5;28mprint\u001b[39m(format_call(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc, args, kwargs))\n\u001b[0;32m--> 856\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 857\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstore_backend\u001b[38;5;241m.\u001b[39mdump_item(\n\u001b[1;32m 858\u001b[0m [func_id, args_id], output, verbose\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose)\n\u001b[1;32m 860\u001b[0m duration \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m-\u001b[39m start_time\n", + "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:266\u001b[0m, in \u001b[0;36mv1_cached_gpt3_turbo_request_v2_wrapped\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mlru_cache(maxsize\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01mif\u001b[39;00m cache_turn_on \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 264\u001b[0m \u001b[38;5;129m@NotebookCacheMemory\u001b[39m\u001b[38;5;241m.\u001b[39mcache\n\u001b[1;32m 265\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mv1_cached_gpt3_turbo_request_v2_wrapped\u001b[39m(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 266\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mv1_cached_gpt3_turbo_request_v2\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:655\u001b[0m, in \u001b[0;36mMemorizedFunc.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 655\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cached_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:598\u001b[0m, in \u001b[0;36mMemorizedFunc._cached_call\u001b[0;34m(self, args, kwargs, shelving)\u001b[0m\n\u001b[1;32m 595\u001b[0m must_call \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 597\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m must_call:\n\u001b[0;32m--> 598\u001b[0m out, metadata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 599\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmmap_mode \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 600\u001b[0m \u001b[38;5;66;03m# Memmap the output at the first call to be consistent with\u001b[39;00m\n\u001b[1;32m 601\u001b[0m \u001b[38;5;66;03m# later calls\u001b[39;00m\n\u001b[1;32m 602\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose:\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:856\u001b[0m, in \u001b[0;36mMemorizedFunc.call\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 854\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 855\u001b[0m \u001b[38;5;28mprint\u001b[39m(format_call(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc, args, kwargs))\n\u001b[0;32m--> 856\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 857\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstore_backend\u001b[38;5;241m.\u001b[39mdump_item(\n\u001b[1;32m 858\u001b[0m [func_id, args_id], output, verbose\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose)\n\u001b[1;32m 860\u001b[0m duration \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m-\u001b[39m start_time\n", + "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:260\u001b[0m, in \u001b[0;36mv1_cached_gpt3_turbo_request_v2\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstringify_request\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[1;32m 259\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstringify_request\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[0;32m--> 260\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mopenai\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompletions\u001b[49m\u001b[38;5;241m.\u001b[39mcreate(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_utils/_proxy.py:20\u001b[0m, in \u001b[0;36mLazyProxy.__getattr__\u001b[0;34m(self, attr)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__getattr__\u001b[39m(\u001b[38;5;28mself\u001b[39m, attr: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mobject\u001b[39m:\n\u001b[0;32m---> 20\u001b[0m proxied \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__get_proxied__\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(proxied, LazyProxy):\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m proxied \u001b[38;5;66;03m# pyright: ignore\u001b[39;00m\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_utils/_proxy.py:55\u001b[0m, in \u001b[0;36mLazyProxy.__get_proxied__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__get_proxied__\u001b[39m(\u001b[38;5;28mself\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m T:\n\u001b[0;32m---> 55\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__load__\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_module_client.py:12\u001b[0m, in \u001b[0;36mChatProxy.__load__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;129m@override\u001b[39m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__load__\u001b[39m(\u001b[38;5;28mself\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m resources\u001b[38;5;241m.\u001b[39mChat:\n\u001b[0;32m---> 12\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_load_client\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mchat\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/__init__.py:297\u001b[0m, in \u001b[0;36m_load_client\u001b[0;34m()\u001b[0m\n\u001b[1;32m 281\u001b[0m _client \u001b[38;5;241m=\u001b[39m _AzureModuleClient( \u001b[38;5;66;03m# type: ignore\u001b[39;00m\n\u001b[1;32m 282\u001b[0m api_version\u001b[38;5;241m=\u001b[39mapi_version,\n\u001b[1;32m 283\u001b[0m azure_endpoint\u001b[38;5;241m=\u001b[39mazure_endpoint,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 293\u001b[0m http_client\u001b[38;5;241m=\u001b[39mhttp_client,\n\u001b[1;32m 294\u001b[0m )\n\u001b[1;32m 295\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _client\n\u001b[0;32m--> 297\u001b[0m _client \u001b[38;5;241m=\u001b[39m \u001b[43m_ModuleClient\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 298\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 299\u001b[0m \u001b[43m \u001b[49m\u001b[43morganization\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43morganization\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 300\u001b[0m \u001b[43m \u001b[49m\u001b[43mbase_url\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbase_url\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 301\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 302\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_retries\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_retries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 303\u001b[0m \u001b[43m \u001b[49m\u001b[43mdefault_headers\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdefault_headers\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 304\u001b[0m \u001b[43m \u001b[49m\u001b[43mdefault_query\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdefault_query\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 305\u001b[0m \u001b[43m \u001b[49m\u001b[43mhttp_client\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhttp_client\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 306\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 307\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _client\n\u001b[1;32m 309\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _client\n", + "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_client.py:98\u001b[0m, in \u001b[0;36mOpenAI.__init__\u001b[0;34m(self, api_key, organization, base_url, timeout, max_retries, default_headers, default_query, http_client, _strict_response_validation)\u001b[0m\n\u001b[1;32m 96\u001b[0m api_key \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39menviron\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOPENAI_API_KEY\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 97\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m api_key \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m---> 98\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m OpenAIError(\n\u001b[1;32m 99\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 100\u001b[0m )\n\u001b[1;32m 101\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mapi_key \u001b[38;5;241m=\u001b[39m api_key\n\u001b[1;32m 103\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m organization \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", + "\u001b[0;31mOpenAIError\u001b[0m: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ] } ], @@ -354,7 +389,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -399,7 +434,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -447,7 +482,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -485,7 +520,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -518,7 +553,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -544,7 +579,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -590,7 +625,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -641,7 +676,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -676,7 +711,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -826,7 +861,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -859,7 +894,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1017,7 +1052,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1183,7 +1218,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1206,7 +1241,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1270,7 +1305,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1306,7 +1341,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1437,7 +1472,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1462,7 +1497,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1493,16 +1528,28 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'evaluate_on_hotpotqa' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m uncompiled_baleen_retrieval_score \u001b[38;5;241m=\u001b[39m \u001b[43mevaluate_on_hotpotqa\u001b[49m(uncompiled_baleen, metric\u001b[38;5;241m=\u001b[39mgold_passages_retrieved, display\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n", + "\u001b[0;31mNameError\u001b[0m: name 'evaluate_on_hotpotqa' is not defined" + ] + } + ], "source": [ "uncompiled_baleen_retrieval_score = evaluate_on_hotpotqa(uncompiled_baleen, metric=gold_passages_retrieved, display=False)" ] }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1632,7 +1679,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1671,7 +1718,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -1951,7 +1998,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.17" + "version": "3.11.8" }, "orig_nbformat": 4 }, diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 2d680d3b99..7f8f9237eb 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -9,6 +9,7 @@ import dspy from dspy.functional import predictor, cot, FunctionalModule, TypedPredictor, TypedChainOfThought +from dspy.predict.predict import Predict from dspy.primitives.example import Example from dspy.teleprompt.bootstrap import BootstrapFewShot from dspy.teleprompt.vanilla import LabeledFewShot @@ -232,7 +233,8 @@ def simple_metric(example, prediction, trace=None): lm.inspect_history(n=2) # Check that the compiled student has the correct demos - demos = compiled_student.predictors()[0].demos + _, predict = next(compiled_student.named_sub_modules(Predict, skip_compiled=False)) + demos = predict.demos assert len(demos) == 1 assert demos[0].input == trainset[0].input assert demos[0].output == trainset[0].output diff --git a/tests/primitives/test_program.py b/tests/primitives/test_program.py index 87ce09395f..3f86005154 100644 --- a/tests/primitives/test_program.py +++ b/tests/primitives/test_program.py @@ -1,5 +1,4 @@ import dspy -from dspy.primitives.module import BaseModule from dspy.primitives.program import ( Module, set_attribute_by_name, @@ -59,45 +58,81 @@ def __init__(self): assert "hop.predict2" in names -class SubModule(BaseModule): - pass - - -class AnotherSubModule(BaseModule): - pass - - def test_empty_module(): - module = BaseModule() - assert list(module.named_sub_modules()) == [("base", module)] + module = Module() + assert list(module.named_sub_modules()) == [("self", module)] def test_single_level(): - module = BaseModule() - module.sub = SubModule() - expected = [("base", module), ("base.sub", module.sub)] + module = Module() + module.sub = Module() + expected = [("self", module), ("self.sub", module.sub)] assert list(module.named_sub_modules()) == expected def test_multiple_levels(): - module = BaseModule() - module.sub = SubModule() - module.sub.subsub = SubModule() - expected = [("base", module), ("base.sub", module.sub), ("base.sub.subsub", module.sub.subsub)] + module = Module() + module.sub = Module() + module.sub.subsub = Module() + expected = [("self", module), ("self.sub", module.sub), ("self.sub.subsub", module.sub.subsub)] assert list(module.named_sub_modules()) == expected def test_multiple_sub_modules(): - module = BaseModule() - module.sub1 = SubModule() - module.sub2 = SubModule() - expected = [("base", module), ("base.sub1", module.sub1), ("base.sub2", module.sub2)] + module = Module() + module.sub1 = Module() + module.sub2 = Module() + expected = [("self", module), ("self.sub1", module.sub1), ("self.sub2", module.sub2)] assert sorted(list(module.named_sub_modules())) == sorted(expected) def test_non_base_module_attributes(): - module = BaseModule() - module.sub = SubModule() - module.not_a_sub = "Not a BaseModule" - expected = [("base", module), ("base.sub", module.sub)] + module = Module() + module.sub = Module() + module.not_a_sub = "Not a self" + expected = [("self", module), ("self.sub", module.sub)] assert list(module.named_sub_modules()) == expected + + +def test_complex_module_traversal(): + root = Module() + root.sub_module = Module() + root.sub_module.nested_list = [Module(), {"key": Module()}] + same_sub = Module() + root.sub_module.nested_tuple = (Module(), [Module(), Module()]) + expected_names = { + "self", + "self.sub_module", + "self.sub_module.nested_list[0]", + "self.sub_module.nested_list[1][key]", + "self.sub_module.nested_tuple[0]", + "self.sub_module.nested_tuple[1][0]", + "self.sub_module.nested_tuple[1][1]", + } + found_names = {name for name, _ in root.named_sub_modules()} + + assert ( + found_names == expected_names + ), f"Missing or extra modules found. Missing: {expected_names-found_names}, Extra: {found_names-expected_names}" + + +def test_complex_module_traversal(): + root = Module() + root.sub_module = Module() + root.sub_module.nested_list = [Module(), {"key": Module()}] + same_module = Module() + root.sub_module.nested_tuple = (Module(), [same_module, same_module]) + expected_names = { + "self", + "self.sub_module", + "self.sub_module.nested_list[0]", + "self.sub_module.nested_list[1][key]", + "self.sub_module.nested_tuple[0]", + "self.sub_module.nested_tuple[1][0]", + # "self.sub_module.nested_tuple[1][1]", This should not be included, as it's the same module as the previous one + } + found_names = {name for name, _ in root.named_sub_modules()} + + assert ( + found_names == expected_names + ), f"Missing or extra modules found. Missing: {expected_names-found_names}, Extra: {found_names-expected_names}" From fc664d5e339d2c16b3b537bdbb13d9707e6fd9c4 Mon Sep 17 00:00:00 2001 From: Isaac Miller <17116851+isaacbmiller@users.noreply.github.com> Date: Thu, 7 Mar 2024 16:24:56 -0600 Subject: [PATCH 147/243] ci(dspy): Add main push test run back (#600) * Add main push action back and fix comment bot * Try to fix comment bot * Remove comment bot workflow - to be added later --- .github/workflows/pr_comment.yml | 37 -------------------------------- .github/workflows/run_tests.yml | 21 +++--------------- 2 files changed, 3 insertions(+), 55 deletions(-) delete mode 100644 .github/workflows/pr_comment.yml diff --git a/.github/workflows/pr_comment.yml b/.github/workflows/pr_comment.yml deleted file mode 100644 index 87afb50947..0000000000 --- a/.github/workflows/pr_comment.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Comment for PR - -on: - workflow_run: - workflows: ["Check for Ruff Fix, Test, and Build"] - types: - - completed - -jobs: - comment: - runs-on: ubuntu-latest - steps: - - name: "Download Ruff Fix Outcome Artifact" - uses: actions/download-artifact@v2 - with: - name: ruff-fix-outcome - path: artifacts - - - name: "Read Ruff Fix Outcome" - id: ruff_outcome - run: | - outcome=$(cat artifacts/ruff_fix_outcome.txt) - echo "RUFF_FIX_OUTCOME=$outcome" >> $GITHUB_ENV - - - name: "Comment on PR if Ruff Fix Failed" - if: env.RUFF_FIX_OUTCOME == 'true' - uses: actions/github-script@v5 - with: - script: | - const pr_number = ${{ github.event.workflow_run.pull_requests[0].number }}; - const message = 'It seems like there are issues with the formatting. Please run `ruff check . --fix-only` and commit to address these issues.'; - github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: pr_number, - body: message - }); diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index e57c6ff0f1..96e81e381e 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -1,6 +1,9 @@ name: Lint, Test, and Build on: + push: + branches: + - main pull_request: types: [opened, synchronize, reopened] @@ -24,21 +27,6 @@ jobs: args: --fix-only --exit-non-zero-on-fix continue-on-error: true - - name: Determine Ruff Fix Outcome - run: | - if [ ${{ steps.ruff_fix.outcome }} == 'failure' ]; then - echo "RUFF_FAILED=true" >> $GITHUB_ENV - echo ${{ steps.ruff_fix.outcome }} > ruff_fix_outcome.txt - else - echo "RUFF_FAILED=false" >> $GITHUB_ENV - echo ${{ steps.ruff_fix.outcome }} > ruff_fix_outcome.txt - fi - - - uses: actions/upload-artifact@v2 - with: - name: ruff-fix-outcome - path: ruff_fix_outcome.txt - - name: Fail Workflow if Ruff Fix Failed if: steps.ruff_fix.outcome == 'failure' run: | @@ -52,7 +40,6 @@ jobs: strategy: matrix: python-version: ["3.9"] - if: github.event_name == 'pull_request' steps: - uses: actions/checkout@v4 - name: Load cached Poetry installation @@ -84,7 +71,6 @@ jobs: strategy: matrix: python-version: ["3.9"] - if: github.event_name == 'pull_request' steps: - uses: actions/checkout@v4 - name: Load cached Poetry installation @@ -116,7 +102,6 @@ jobs: strategy: matrix: python-version: ["3.9"] - if: github.event_name == 'pull_request' steps: - uses: actions/checkout@v4 - name: Load cached Poetry installation From a4f7ca623037e22117966e2d88fc700cb6354371 Mon Sep 17 00:00:00 2001 From: swairshah Date: Thu, 7 Mar 2024 14:29:02 -0800 Subject: [PATCH 148/243] Fix code issues in signature-optimizer.mdx 1. use the HotPotQA dataset instead of gsm8k 2. result.rationale instead of result.reason 3. process devset to include input_keys --- .../deep-dive/teleprompter/signature-optimizer.mdx | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/docs/deep-dive/teleprompter/signature-optimizer.mdx b/docs/docs/deep-dive/teleprompter/signature-optimizer.mdx index 25624e60a7..740aa2a4ac 100644 --- a/docs/docs/deep-dive/teleprompter/signature-optimizer.mdx +++ b/docs/docs/deep-dive/teleprompter/signature-optimizer.mdx @@ -26,7 +26,7 @@ from dspy.datasets import HotPotQA dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) -trainset, devset = gms8k.train, gms8k.dev +trainset, devset = dataset.train, dataset.dev ``` We'll now define a class based signature for QA task similar to `question->answer` and pass it to `ChainOfThought` module, that will give us the result via Chain Of Thought from the LM client for this signature. @@ -49,7 +49,7 @@ class CoTPipeline(dspy.Module): result = self.predictor(question=question) return dspy.Prediction( answer=result.answer, - reasoning=result.reasoning, + reasoning=result.rationale, ) ``` @@ -62,6 +62,7 @@ def validate_context_and_answer(example, pred, trace=None): answer_EM = dspy.evaluate.answer_exact_match(example, pred) return answer_EM +NUM_THREADS = 5 evaluate = Evaluate(devset=devset, metric=validate_context_and_answer, num_threads=NUM_THREADS, display_progress=True, display_table=False) ``` @@ -70,7 +71,8 @@ To evaluate the `CoTPipeline` we'll need to create an object of it and pass it a ```python cot_baseline = CoTPipeline() -evaluate(cot_baseline, devset=devset[:]) +devset_with_input = [dspy.Example({"question": r["question"], "answer": r["answer"]}).with_inputs("question") for r in devset] +evaluate(cot_baseline, devset=devset_with_input) ``` Now we have the baseline pipeline ready to use, so let's try using the `SignatureOptimizer` teleprompter and optimizing our pipeline to make it even better! @@ -162,4 +164,4 @@ This iterative approach allows for continuous refinement of instructions and pre *** - \ No newline at end of file + From f44e50e4e4b729efbef2e4ce678b9220321356a0 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 14:50:37 -0800 Subject: [PATCH 149/243] a few optimizer updates --- dspy/teleprompt/mipro_optimizer.py | 18 +- dspy/teleprompt/signature_opt.py | 5 - dspy/teleprompt/signature_opt_bayesian.py | 30 +- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 3950 +++++++++++++++++- 4 files changed, 3935 insertions(+), 68 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 2580e09455..18449de6cc 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -19,13 +19,13 @@ """ USAGE SUGGESTIONS: -The following code can be used to compile a optimized signature teleprompter using the MIPROOptimizer, and evaluate it on an end task: +The following code can be used to compile a optimized signature teleprompter using the MIPRO, and evaluate it on an end task: -from dspy.teleprompt import MIPROOptimizer +from dspy.teleprompt import MIPRO -teleprompter = MIPROOptimizer(prompt_model=prompt_model, task_model=task_model, metric=metric, num_candidates=10, init_temperature=1.0) +teleprompter = MIPRO(prompt_model=prompt_model, task_model=task_model, metric=metric, num_candidates=10, init_temperature=1.0) kwargs = dict(num_threads=NUM_THREADS, display_progress=True, display_table=0) -compiled_prompt_opt = teleprompter.compile(program, trainset=trainset[:TRAIN_NUM], trials_num=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) +compiled_prompt_opt = teleprompter.compile(program, trainset=trainset[:TRAIN_NUM], num_trials=100, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs=kwargs) eval_score = evaluate(compiled_prompt_opt, devset=evalset[:EVAL_NUM], **kwargs) Note that this teleprompter takes in the following parameters: @@ -279,7 +279,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo return candidates, evaluated_candidates - def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, trials_num=None): + def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, num_trials=None): # Define ANSI escape codes for colors YELLOW = '\033[93m' BLUE = '\033[94m' @@ -288,7 +288,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo random.seed(seed) - estimated_task_model_calls_wo_module_calls = len(trainset) * trials_num # M * T * P + estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P estimated_prompt_model_calls = 10 + self.n * len(student.predictors()) # num data summary calls + N * P user_message = textwrap.dedent(f"""\ @@ -296,7 +296,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: - {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{trials_num}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} + {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{num_trials}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.n}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} @@ -307,7 +307,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo For a preliminary estimate of potential costs, we recommend you perform your own calculations based on the task and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: - {YELLOW}- Reducing the number of trials (`trials_num`), the size of the trainset, or the number of LM calls in your program.{ENDC} + {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC} To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. @@ -471,7 +471,7 @@ def objective(trial): objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) sampler = optuna.samplers.TPESampler(seed=seed) study = optuna.create_study(direction="maximize", sampler=sampler) - score = study.optimize(objective_function, n_trials=trials_num) + score = study.optimize(objective_function, n_trials=num_trials) if best_program is not None and self.track_stats: best_program.trial_logs = trial_logs diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index ea4bfa61c1..a051eb4967 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -33,11 +33,6 @@ class SignatureOptimizer(COPRO): def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): - # warnings.warn( - # "`SignatureOptimizer` is deprecated and will be removed in a future version. " - # "Use `COPRO` instead.", - # DeprecationWarning - # ) print(u"\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index e1caef71d1..48b0076716 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -37,35 +37,9 @@ class BayesianSignatureOptimizer(MIPRO): def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): - # warnings.warn( - # "`BayesianSignatureOptimizer` is deprecated and will be removed in a future version. " - # "Use `MIPRO` instead.", - # DeprecationWarning - # ) print(u"\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) - def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, trials_num=None, optuna_trials_num=None): - # Define ANSI escape codes for colors - YELLOW = '\033[93m' - BLUE = '\033[94m' - BOLD = '\033[1m' - ENDC = '\033[0m' # Resets the color to default - - # Check if both trials_num and optuna_trials_num are None - if trials_num is None and optuna_trials_num is None: - raise ValueError(f"{YELLOW}{BOLD}You must specify the number of trials using the 'trials_num' parameter.{ENDC}") - - # Check if the deprecated parameter is used - if optuna_trials_num is not None: - # Issue a deprecation warning - warnings.warn( - "`optuna_trials_num` is deprecated and will be removed in a future version. " - "Use `trials_num` instead.", - DeprecationWarning - ) - # Use trials_num as a fallback if trials_num is not provided - if trials_num is None: - trials_num = optuna_trials_num - return super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=trials_num) \ No newline at end of file + def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, optuna_trials_num, view_data=True, view_examples=True, requires_permission_to_run=True, num_trials=None): + return super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=optuna_trials_num) \ No newline at end of file diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index 3c1df4f37d..cb450d763d 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -15,8 +15,7 @@ "id": "3wEDck3ZqZH0" }, "source": [ - "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy\n", - "" + "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy" ] }, { @@ -70,10 +69,35 @@ "execution_count": 1, "metadata": { "colab": { - "base_uri": "https://localhost:8080/" + "base_uri": "https://localhost:8080/", + "height": 205, + "referenced_widgets": [ + "827d6f08a1894525937562b64df50dc6", + "c977f3fcb63349b294414c28e6bb17d3", + "9f2a85011d284134bced1a72e0f13d9c", + "435b0b18054e454eac41b3722cbe0400", + "b563cc49cf3341f8a54ab0183e8a9794", + "b2c1beb7364a43288209f5e5a6ad2514", + "e338d68fb7694b85bc573c0df83fa23e", + "6fcb23a0eb444b88a1cef94740551fe8", + "35a4261bb9494f068b4e5f2cbe9a927a", + "0cb7129e44b04ef88154a8a32b025391", + "63b53c83345841b69b06b71b830e9adf", + "aa4f1d66d377449c8cb4a73056138d50", + "54f904d1274c499ca7e714fa8cdf6d61", + "2efe34e3d66b4b64b4552d278d4a5962", + "a9c39bc2e1804ad399f1dc66d192df46", + "d04e9ed9ccb64b17903e10e749d097a7", + "abec858558814b2b809cd318aaac1545", + "ce54414ed74441478f593ae366ff16b4", + "fcd8aef3637c4449bef71809764642c2", + "e716458e84c14cb7948ab6dc61f337b9", + "4c081aaabbdb448b91fead4fa73f6c0b", + "da0f46c68b0141a48c887faa0843d5ab" + ] }, "id": "l4Fsh7EhqZH1", - "outputId": "bc43f9ad-e090-4e5d-bd54-1e998225b44d" + "outputId": "5fea0db6-8cb1-4485-91f7-bc1e7dfe5d4b" }, "outputs": [], "source": [ @@ -106,7 +130,7 @@ "base_uri": "https://localhost:8080/" }, "id": "JpijP_d7qZH2", - "outputId": "daf24b9e-7030-4bf1-a08f-ff8b4ad42e22" + "outputId": "422dc4d0-4574-4e4b-935a-c7b4c875472f" }, "outputs": [], "source": [ @@ -137,8 +161,7 @@ " !pip install -e $repo_path\n", " !pip install --upgrade cloudpickle==3.0.0\n", "\n", - "import dspy\n", - "import openai" + "import dspy" ] }, { @@ -198,10 +221,112 @@ "execution_count": 4, "metadata": { "colab": { - "base_uri": "https://localhost:8080/" + "base_uri": "https://localhost:8080/", + "height": 340, + "referenced_widgets": [ + "fb0bb0cd51d24f7a9ea4bc5acdab0b6f", + "4cd600dc92fa4dcbb1f6878dfc951a84", + "2d9675dfff7244e3a2c4273e6beac742", + "ebb8d03b2088443988427c674f2ebd08", + "bb649fd497164542865182285391c1cb", + "0ce5aafe3feb4e30b154e8feee246680", + "292a903dd61540beb28f0dcc33e60173", + "4c10718ba99243a795348fffc0317d89", + "47ba7929b5a3483bad6dba45b3839d28", + "d50c0b7e0f35439c938bee5d2159ff82", + "c8da2fa4b14e43948824f527e9665354", + "02b66439b0d3463c835f54ee43a81b29", + "757ec3ba99bc49b78386be420b315738", + "14f1e99c941349ceabc1d360deb1a14d", + "d601031bf4744c849885f7becf1e5f76", + "2ac62fd2e5d64e44a69551d27913caf8", + "a6a9ecf3b2d74a1ab78a904f76b462c7", + "0fcf2dc801ac414ca7ff9db0790e322c", + "b2b11f62717f482a8c7d28dfef7282e0", + "ffda68bae3e34d39abce4d2064b88617", + "0083f3cd53b742a6916f9131bffc92ab", + "85c4f1c241054d989d893130ca7ec595", + "ef2f0db8dc4148daae63001d4c6a555b", + "2f24b6572cb44cbaac147b28a4490582", + "83926bd6a50441959fdc945808cbea61", + "a93f7fced2444026bed86ff3ac73ab46", + "b7d6568fa80446e1a9ca2f9102b47f7f", + "f66e4271b9544b83b598700cc97593e9", + "530d6c38c848425082f506709fecb5f7", + "e035507b6408448f87ca0fee35bc1bb1", + "42d3de61644547ad80e585629fd97c3d", + "978d5cab723f4f02bcadb71b099840a8", + "05f74ff1c66d41858e4436707089ce10", + "67dd858331754b7a944bd9cb725c5409", + "9b318e36701f43a38bf49db28f1f82d6", + "5859289715b94e09b45bb2215fd8f663", + "4ffa164e576e43788118a2574ae6089b", + "c066cdb020ff4d0f925def7eb9373228", + "48b4d8350a074defb044eeaa9586f9c0", + "b8924c8accbc415e8e28bc232d063003", + "a2b761c3ce72458a8ef3d47903d42828", + "6bd216ee356e4853ac52c81f5632834d", + "053a5c1ad4cd47548a8a358dc8834c50", + "338ffb9834b44e0b9a9a353ede75f9d6", + "3ec77a173a5345fcb06827a87b4ddf38", + "c47e844caea446e1a452426ed899fe36", + "060cbe0e37dc422591916d3768066224", + "e68714c12f8543c9b14360f8f1d37604", + "bfdbc53eb3d34077bbfa0f8806733704", + "7038244ae993489eb76422d4f6135e58", + "76a9b3ae6fdb4ee2a228966556287752", + "13ee127ea31847b9b3d5f6a8a0b10cfa", + "f99a2f9616c744578b88fb1322674c32", + "43d07caba731495f9fe22a7ef81ef19a", + "2532bb223c2d4dbeb2cfeffbfdeac65b", + "cd15509869ed4a518b6d4d48b932e314", + "2ab35b29510c422ca4f07bfad4339419", + "bba71ee322f548439853954f1c055977", + "cb6ef0a279774d1f81cf2f4efb3b3c04", + "eca4539630b04bb2b927d780ef4ca849", + "bdd29de7feba4156823b10124217ef54", + "63742b8642ae4a94999bd5cdb3fa7da2", + "6aec8a01051743acab8de920672cf38f", + "0576fd9cffb4489fa07c4af347ada10e", + "e8e84a2a7343409d8f81439a42e53601", + "f13ff2cfe1a6443296cd1a9fe956312e", + "371665875b63442586198c64f3f441dc", + "4dc0cc26e04741c1b9a28deb1c0f533d", + "c95c3c713b9c4e7b95ab273b17c20788", + "176abb1c80904828965774b9c5ed2558", + "10cf746afb1b4c748ce5fda0dd026ab9", + "67d3a00c8e9e4e9a81a039e8b05c1200", + "b27566e9133a493bbba2af2f1cd1118c", + "ebb50f6701c146efafef65ab75cb5901", + "8dbde39ea3c144268b2547c55863c929", + "70c258c2634b431cb283420f86675818", + "88f9b45c9508415fb29fe786768be085", + "1f5f6e14543a4fe48f90459afe074739", + "823a30d9f3f34793b8eb8b79899eaf19", + "bb56dc209d8646ec84b3d194761c1e57", + "30d9db28931442a0b15ef29792e0a6a9", + "cbe4ee8e8d5646b98f4ff470952384da", + "52747a49054d454e8f899fc832985877", + "1af61e78281347429396163cdae419ed", + "f30e71e4d77c43d7b8b45ff9b3905d51", + "90a8e892d747436aa02bce59454b117d", + "ad62117365344dfbbed1fd46ae8238e6", + "a2454d2f78b34a279e8cad724200d5eb", + "ae83b2e7628e4345896eba886b563bc3", + "560375a7cc364b65be6f72db30139ce3", + "8c0bf6da280e4db88b24b2e1028f9841", + "44ed3f5af4234059b806dcc26abb99c7", + "dd09c11a454b4edb81f18e018dc917ca", + "56ac79bb987146c7ba76f1bd0315360f", + "e2e51fbcedad401ba7d79ac45258a3df", + "97e73f6600c641fd9e724d992a8b8d6d", + "68d13e7ab60245f9910806508ae08cc0", + "571bcef58c3440ce9aa9cb4f441ec97a", + "d2f1105f250542649c0b6cb4c27d75df" + ] }, "id": "hiVgd3N7qZH3", - "outputId": "09e1ea66-7c8d-438a-8c37-1ab96fe8cdf0" + "outputId": "8b0ef2cf-1836-4aab-8990-62aa6240ab1f" }, "outputs": [ { @@ -300,14 +425,21 @@ "base_uri": "https://localhost:8080/" }, "id": "MU2aHQBTqZH3", - "outputId": "fd60fbb3-ca89-4ecb-911b-24751f220cc6" + "outputId": "32f26cb9-2e6d-48b7-9732-9f3c5b60ef7a" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 108 / 500 (21.6): 100%|██████████| 500/500 [00:33<00:00, 14.90it/s]\n", + " 0%| | 0/500 [00:00 Date: Thu, 7 Mar 2024 14:56:33 -0800 Subject: [PATCH 150/243] adding in an import that was accidentally removed --- dspy/teleprompt/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dspy/teleprompt/__init__.py b/dspy/teleprompt/__init__.py index b8b9226338..39462634dc 100644 --- a/dspy/teleprompt/__init__.py +++ b/dspy/teleprompt/__init__.py @@ -1,5 +1,6 @@ from .bootstrap import * from .copro_optimizer import COPRO +from .ensemble import * from .finetune import * from .knn_fewshot import * from .mipro_optimizer import MIPRO From d7bf3bcff5102f328dc4105d15dbedabf2c974dd Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 15:00:19 -0800 Subject: [PATCH 151/243] updating mipro tests --- .../teleprompt/test_signature_opt_bayesian.py | 35 ++++++++++--------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/tests/teleprompt/test_signature_opt_bayesian.py b/tests/teleprompt/test_signature_opt_bayesian.py index 0cf655784f..38f560da45 100644 --- a/tests/teleprompt/test_signature_opt_bayesian.py +++ b/tests/teleprompt/test_signature_opt_bayesian.py @@ -3,7 +3,7 @@ import re import dspy from dsp.modules import LM -from dspy.teleprompt.signature_opt_bayesian import BayesianSignatureOptimizer +from dspy.teleprompt.signature_opt_bayesian import MIPRO from dspy.utils.dummies import DummyLM from dspy import Example @@ -41,7 +41,7 @@ class ConditionalLM(LM): def __init__(self): super().__init__("conditional-lm") - def basic_request(self, prompt, n=1, **kwargs): + def basic_request(self, prompt, num_candidates=1, **kwargs): # If we are in the "optimization" stage, we don't say much. if prompt.endswith("Observations:"): answer = " (*silence*)" @@ -113,8 +113,8 @@ def get_convo(self, index): def test_bayesian_signature_optimizer_initialization(): - optimizer = BayesianSignatureOptimizer( - metric=simple_metric, n=10, init_temperature=1.4, verbose=True, track_stats=True + optimizer = MIPRO( + metric=simple_metric, num_candidates=10, init_temperature=1.4, verbose=True, track_stats=True ) assert optimizer.metric == simple_metric, "Metric not correctly initialized" assert optimizer.n == 10, "Incorrect 'n' parameter initialization" @@ -141,9 +141,9 @@ def test_signature_optimizer_optimization_process(): student = SimpleModule(signature="input -> output") - optimizer = BayesianSignatureOptimizer( + optimizer = MIPRO( metric=simple_metric, - n=10, + num_candidates=10, init_temperature=1.4, verbose=False, track_stats=False, @@ -152,11 +152,12 @@ def test_signature_optimizer_optimization_process(): # Adjustments: Include required parameters for the compile method optimized_student = optimizer.compile( student=student, - devset=trainset, - optuna_trials_num=10, + trainset=trainset, + num_trials=10, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs={"num_threads": 1, "display_progress": False}, + requires_permission_to_run=False, ) assert len(optimized_student.predictor.demos) == 5 @@ -167,9 +168,9 @@ def test_signature_optimizer_bad_lm(): lm=DummyLM([f"Optimized instruction {i}" for i in range(30)]) ) student = SimpleModule(signature="input -> output") - optimizer = BayesianSignatureOptimizer( + optimizer = MIPRO( metric=simple_metric, - n=10, + num_candidates=10, init_temperature=1.4, verbose=False, track_stats=False, @@ -181,11 +182,12 @@ def test_signature_optimizer_bad_lm(): with pytest.raises(ValueError): _optimized_student = optimizer.compile( student=student, - devset=trainset, - optuna_trials_num=10, + trainset=trainset, + num_trials=10, max_bootstrapped_demos=3, max_labeled_demos=5, eval_kwargs={"num_threads": 1, "display_progress": False}, + requires_permission_to_run=False, ) @@ -195,9 +197,9 @@ def test_optimization_and_output_verification(): lm = ConditionalLM() dspy.settings.configure(lm=lm) - optimizer = BayesianSignatureOptimizer( + optimizer = MIPRO( metric=simple_metric, - n=10, + num_candidates=10, init_temperature=1.4, verbose=False, track_stats=True, @@ -208,11 +210,12 @@ def test_optimization_and_output_verification(): # Compile the student with the optimizer optimized_student = optimizer.compile( student=student, - devset=trainset, - optuna_trials_num=4, + trainset=trainset, + num_trials=4, max_bootstrapped_demos=2, max_labeled_demos=3, eval_kwargs={"num_threads": 1, "display_progress": False}, + requires_permission_to_run=False, ) # Simulate calling the optimized student with a new input From 2704f5af1a42f75270d6a9a7692d0f1f0857c770 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 15:03:40 -0800 Subject: [PATCH 152/243] fixing warning statement --- dspy/teleprompt/mipro_optimizer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index f54c2822e0..fce5377fb6 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -306,8 +306,9 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} - {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC} - + {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""") + + user_confirmation_message = textwrap.dedent(f"""\ To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. If you would like to bypass this confirmation step in future executions, set the {YELLOW}`requires_permission_to_run`{ENDC} flag to {YELLOW}`False`.{ENDC} @@ -321,6 +322,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo if requires_permission_to_run: + print(user_confirmation_message) user_input = input("Do you wish to continue? (y/n): ").strip().lower() if user_input != 'y': print("Compilation aborted by the user.") From 484da6aab6da9940fe8c5d0cac0870735eb66565 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 15:07:55 -0800 Subject: [PATCH 153/243] fixing bug when requires_permission_to_run=False, and doing ruff cleanups --- dspy/teleprompt/mipro_optimizer.py | 265 +++++++++++----------- dspy/teleprompt/signature_opt.py | 2 +- dspy/teleprompt/signature_opt_bayesian.py | 3 +- 3 files changed, 135 insertions(+), 135 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index fce5377fb6..3612b0962f 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -117,8 +117,6 @@ def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, num_ def _print_full_program(self, program): for i,predictor in enumerate(program.predictors()): if self.verbose: print(f"Predictor {i}") - # if self.verbose: print(f"i: {self._get_signature(predictor).instructions}") - # if self.verbose: print(f"p: {self._get_signature(predictor).fields[-1].name}") if self.verbose: print(f"i: {self._get_signature(predictor).instructions}") *_, last_field = self._get_signature(predictor).fields.values() if self.verbose: print(f"p: {last_field.json_schema_extra['prefix']}") @@ -321,160 +319,163 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo sys.stdout.flush() # Flush the output buffer to force the message to print + run=True if requires_permission_to_run: print(user_confirmation_message) user_input = input("Do you wish to continue? (y/n): ").strip().lower() if user_input != 'y': print("Compilation aborted by the user.") + run=False + + if run: + # Set up program and evaluation function + module = student.deepcopy() + evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) + + # In the case where the bootstrapped and labeled demos are set to 0, we'll stil bootstrap examples to use in our meta prompt + if max_bootstrapped_demos==0 and max_labeled_demos==0: #TODO: address case when max_bootstrapped alone is 0 + max_bootstrapped_demos_for_candidate_gen = 1 + max_labeled_demos_for_candidate_gen = 1 #TODO: this might only need to be 0 else: - # Set up program and evaluation function - module = student.deepcopy() - evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) - - # In the case where the bootstrapped and labeled demos are set to 0, we'll stil bootstrap examples to use in our meta prompt - if max_bootstrapped_demos==0 and max_labeled_demos==0: #TODO: address case when max_bootstrapped alone is 0 - max_bootstrapped_demos_for_candidate_gen = 1 - max_labeled_demos_for_candidate_gen = 1 #TODO: this might only need to be 0 + max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos + max_labeled_demos_for_candidate_gen = max_labeled_demos + + # Generate N few shot example sets + demo_candidates = {} + for i in range(self.n): + if i == 0: # Story empty set of demos as default for index 0 + for module_p in module.predictors(): + if id(module_p) not in demo_candidates: + demo_candidates[id(module_p)] = [] + demo_candidates[id(module_p)].append([]) else: - max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos - max_labeled_demos_for_candidate_gen = max_labeled_demos - - # Generate N few shot example sets - demo_candidates = {} - for i in range(self.n): - if i == 0: # Story empty set of demos as default for index 0 - for module_p in module.predictors(): - if id(module_p) not in demo_candidates: - demo_candidates[id(module_p)] = [] - demo_candidates[id(module_p)].append([]) - else: - if self.verbose: print(f"Creating basic bootstrap: {i}/{self.n-1}") - - # Create a new basic bootstrap few - shot program . - rng = random.Random(i) - shuffled_trainset = trainset[:] # Create a copy of devset - rng.shuffle(shuffled_trainset) # Shuffle the copy - tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings) - candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_trainset) - - # Store the candidate demos - for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): - if id(module_p) not in demo_candidates: - demo_candidates[id(module_p)] = [] - demo_candidates[id(module_p)].append(candidate_p.demos) - - # Generate N candidate prompts - instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, trainset) + if self.verbose: print(f"Creating basic bootstrap: {i}/{self.n-1}") + + # Create a new basic bootstrap few - shot program . + rng = random.Random(i) + shuffled_trainset = trainset[:] # Create a copy of devset + rng.shuffle(shuffled_trainset) # Shuffle the copy + tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings) + candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_trainset) + + # Store the candidate demos + for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): + if id(module_p) not in demo_candidates: + demo_candidates[id(module_p)] = [] + demo_candidates[id(module_p)].append(candidate_p.demos) + + # Generate N candidate prompts + instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, trainset) - # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples - if max_bootstrapped_demos==0 and max_labeled_demos==0: - demo_candidates = None + # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples + if max_bootstrapped_demos==0 and max_labeled_demos==0: + demo_candidates = None - # Initialize variables to store the best program and its score - best_score = float('-inf') - best_program = None - trial_num = 0 + # Initialize variables to store the best program and its score + best_score = float('-inf') + best_program = None + trial_num = 0 - trial_logs = {} + trial_logs = {} - # Define our trial objective - def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, trainset): - def objective(trial): - nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables - candidate_program = baseline_program.deepcopy() + # Define our trial objective + def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, trainset): + def objective(trial): + nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables + candidate_program = baseline_program.deepcopy() - # Suggest the instruction to use for our predictor - print(f"Starting trial #{trial_num}") - trial_logs[trial_num] = {} + # Suggest the instruction to use for our predictor + print(f"Starting trial #{trial_num}") + trial_logs[trial_num] = {} - for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): + for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): - # Get instruction candidates for our given predictor - p_instruction_candidates = instruction_candidates[id(p_old)] - if demo_candidates: p_demo_candidates = demo_candidates[id(p_old)] + # Get instruction candidates for our given predictor + p_instruction_candidates = instruction_candidates[id(p_old)] + if demo_candidates: p_demo_candidates = demo_candidates[id(p_old)] - # Suggest the index of the instruction candidate to use in our trial - instruction_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_instruction",range(len(p_instruction_candidates))) - if demo_candidates: demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) - trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx - if demo_candidates: trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx + # Suggest the index of the instruction candidate to use in our trial + instruction_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_instruction",range(len(p_instruction_candidates))) + if demo_candidates: demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) + trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx + if demo_candidates: trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx - # Get the selected instruction candidate - selected_candidate = p_instruction_candidates[instruction_idx] - selected_instruction = selected_candidate.proposed_instruction.strip('"').strip() - selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() + # Get the selected instruction candidate + selected_candidate = p_instruction_candidates[instruction_idx] + selected_instruction = selected_candidate.proposed_instruction.strip('"').strip() + selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() - # Use this candidates in our program - *_, last_field = self._get_signature(p_new).fields.keys() - updated_signature = self._get_signature(p_new).with_instructions(selected_instruction).with_updated_fields(last_field, prefix=selected_prefix) - self._set_signature(p_new, updated_signature) + # Use this candidates in our program + *_, last_field = self._get_signature(p_new).fields.keys() + updated_signature = self._get_signature(p_new).with_instructions(selected_instruction).with_updated_fields(last_field, prefix=selected_prefix) + self._set_signature(p_new, updated_signature) - # Get the selected demos - if demo_candidates: selected_demos = p_demo_candidates[demos_idx] + # Get the selected demos + if demo_candidates: selected_demos = p_demo_candidates[demos_idx] - # Use these demos in our program - if demo_candidates: p_new.demos = selected_demos + # Use these demos in our program + if demo_candidates: p_new.demos = selected_demos - # breakpoint() - - if self.verbose: print("Evaling the following program:") - # breakpoint() - if self.verbose: self._print_full_program(candidate_program) - trial_logs[trial_num]["program"] = candidate_program - - # Evaluate with the new prompts - total_score = 0 - batch_size = 100 - num_batches = math.ceil(len(trainset) / batch_size) - - for i in range(num_batches): - start_index = i * batch_size - end_index = min((i + 1) * batch_size, len(trainset)) - split_trainset = trainset[start_index:end_index] - split_score = evaluate(candidate_program, devset=split_trainset, display_table=0) - if self.verbose: print(f"{i}st split score: {split_score}") - - total_score += split_score * len(split_trainset) - curr_weighted_avg_score = total_score / min((i+1)*100,len(trainset)) - if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") - - trial.report(curr_weighted_avg_score, i) - - # Handle pruning based on the intermediate value. - if trial.should_prune(): - print("Trial pruned.") - trial_logs[trial_num]["score"] = curr_weighted_avg_score - trial_logs[trial_num]["pruned"] = True - trial_num += 1 - raise optuna.TrialPruned() - - if self.verbose: print(f"Fully evaled score: {curr_weighted_avg_score}") - if self.verbose: self._print_model_history(self.task_model, n=1) # breakpoint() - score = curr_weighted_avg_score - - trial_logs[trial_num]["score"] = curr_weighted_avg_score - trial_logs[trial_num]["pruned"] = False - # Update the best program if the current score is better - if score > best_score: - best_score = score - best_program = candidate_program.deepcopy() - - trial_num += 1 + if self.verbose: print("Evaling the following program:") + # breakpoint() + if self.verbose: self._print_full_program(candidate_program) + trial_logs[trial_num]["program"] = candidate_program + + # Evaluate with the new prompts + total_score = 0 + batch_size = 100 + num_batches = math.ceil(len(trainset) / batch_size) + + for i in range(num_batches): + start_index = i * batch_size + end_index = min((i + 1) * batch_size, len(trainset)) + split_trainset = trainset[start_index:end_index] + split_score = evaluate(candidate_program, devset=split_trainset, display_table=0) + if self.verbose: print(f"{i}st split score: {split_score}") + + total_score += split_score * len(split_trainset) + curr_weighted_avg_score = total_score / min((i+1)*100,len(trainset)) + if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") + + trial.report(curr_weighted_avg_score, i) + + # Handle pruning based on the intermediate value. + if trial.should_prune(): + print("Trial pruned.") + trial_logs[trial_num]["score"] = curr_weighted_avg_score + trial_logs[trial_num]["pruned"] = True + trial_num += 1 + raise optuna.TrialPruned() + + if self.verbose: print(f"Fully evaled score: {curr_weighted_avg_score}") + if self.verbose: self._print_model_history(self.task_model, n=1) + # breakpoint() + score = curr_weighted_avg_score + + trial_logs[trial_num]["score"] = curr_weighted_avg_score + trial_logs[trial_num]["pruned"] = False + + # Update the best program if the current score is better + if score > best_score: + best_score = score + best_program = candidate_program.deepcopy() + + trial_num += 1 - return score + return score - return objective + return objective - # Run the trial - objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) - sampler = optuna.samplers.TPESampler(seed=seed) - study = optuna.create_study(direction="maximize", sampler=sampler) - score = study.optimize(objective_function, n_trials=num_trials) + # Run the trial + objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) + sampler = optuna.samplers.TPESampler(seed=seed) + study = optuna.create_study(direction="maximize", sampler=sampler) + score = study.optimize(objective_function, n_trials=num_trials) - if best_program is not None and self.track_stats: - best_program.trial_logs = trial_logs + if best_program is not None and self.track_stats: + best_program.trial_logs = trial_logs - print(f"Returning {best_program} from continue_program") - return best_program \ No newline at end of file + print(f"Returning {best_program} from continue_program") + return best_program \ No newline at end of file diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index e7cfac21af..9ba1da92c9 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -34,7 +34,7 @@ class SignatureOptimizer(COPRO): def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): - print(u"\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") + print("\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) def compile(self, student, *, devset, eval_kwargs): diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 82d92fc9ed..1e08042f7d 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -1,4 +1,3 @@ -import warnings from dspy.teleprompt.mipro_optimizer import MIPRO @@ -38,7 +37,7 @@ class BayesianSignatureOptimizer(MIPRO): def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): - print(u"\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") + print("\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) From f9f35b68d05565634e72f173f61561cee37bbc6c Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 15:12:17 -0800 Subject: [PATCH 154/243] fixing some variable names in test --- tests/teleprompt/test_signature_opt_bayesian.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/teleprompt/test_signature_opt_bayesian.py b/tests/teleprompt/test_signature_opt_bayesian.py index 38f560da45..63baf7a5bd 100644 --- a/tests/teleprompt/test_signature_opt_bayesian.py +++ b/tests/teleprompt/test_signature_opt_bayesian.py @@ -82,7 +82,7 @@ def basic_request(self, prompt, num_candidates=1, **kwargs): print("===") dummy_response = {"choices": []} - for _ in range(n): + for _ in range(num_candidates): dummy_response["choices"].append( { "text": answer, @@ -117,7 +117,7 @@ def test_bayesian_signature_optimizer_initialization(): metric=simple_metric, num_candidates=10, init_temperature=1.4, verbose=True, track_stats=True ) assert optimizer.metric == simple_metric, "Metric not correctly initialized" - assert optimizer.n == 10, "Incorrect 'n' parameter initialization" + assert optimizer.num_candidates == 10, "Incorrect 'num_candidates' parameter initialization" assert ( optimizer.init_temperature == 1.4 ), "Initial temperature not correctly initialized" From bbaa0ac890507765e309920a365bbaf15a906046 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 15:14:58 -0800 Subject: [PATCH 155/243] fixing variable name --- dspy/teleprompt/mipro_optimizer.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 3612b0962f..1ff76f8167 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -104,7 +104,7 @@ class DatasetDescriptorWithPriorObservations(dspy.Signature): class MIPRO(Teleprompter): def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, num_candidates=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): - self.n = num_candidates + self.num_candidates = num_candidates self.metric = metric self.init_temperature = init_temperature self.prompt_model = prompt_model if prompt_model is not None else dspy.settings.lm @@ -226,7 +226,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo if 1 not in example_sets[id(predictor)].keys(): raise ValueError("No examples found for the given predictor") instruct = None - for i in range(1, self.n): + for i in range(1, self.num_candidates): new_instruct = dspy.Predict( BasicGenerateInstructionWithExamplesAndDataObservations, n=1, @@ -247,7 +247,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo # Just examples elif view_examples: instruct = None - for i in range(1,self.n): # Note: skip over the first example set which is empty + for i in range(1,self.num_candidates): # Note: skip over the first example set which is empty new_instruct = dspy.Predict( BasicGenerateInstructionWithExamples, n=1, @@ -285,7 +285,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo random.seed(seed) estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P - estimated_prompt_model_calls = 10 + self.n * len(student.predictors()) # num data summary calls + N * P + estimated_prompt_model_calls = 10 + self.num_candidates * len(student.predictors()) # num data summary calls + N * P user_message = textwrap.dedent(f"""\ {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} @@ -293,7 +293,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{num_trials}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} - {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.n}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} + {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.num_candidates}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} @@ -342,14 +342,14 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo # Generate N few shot example sets demo_candidates = {} - for i in range(self.n): + for i in range(self.num_candidates): if i == 0: # Story empty set of demos as default for index 0 for module_p in module.predictors(): if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append([]) else: - if self.verbose: print(f"Creating basic bootstrap: {i}/{self.n-1}") + if self.verbose: print(f"Creating basic bootstrap: {i}/{self.num_candidates-1}") # Create a new basic bootstrap few - shot program . rng = random.Random(i) @@ -365,7 +365,7 @@ def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demo demo_candidates[id(module_p)].append(candidate_p.demos) # Generate N candidate prompts - instruction_candidates, _ = self._generate_first_N_candidates(module, self.n, view_data, view_examples, demo_candidates, trainset) + instruction_candidates, _ = self._generate_first_N_candidates(module, self.num_candidates, view_data, view_examples, demo_candidates, trainset) # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples if max_bootstrapped_demos==0 and max_labeled_demos==0: From 39f5ba3729efbbeb609b3cdd803e3ee5d3ffa595 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Fri, 8 Mar 2024 04:50:07 +0530 Subject: [PATCH 156/243] adding api references and docs files for functional --- docs/api/functional/_category_.json | 8 ++ docs/api/functional/dspy_TypedCoT.md | 41 ++++++++++ docs/api/functional/dspy_TypedPredictor.md | 78 +++++++++++++++++++ docs/api/functional/dspy_cot.md | 34 ++++++++ docs/api/functional/dspy_predictor.md | 30 +++++++ .../language_model_clients/_category_.json | 2 +- docs/api/optimizers/_category_.json | 2 +- .../retrieval_model_clients/_category_.json | 2 +- .../building-blocks/8-typed_predictors.md | 13 ++++ .../typed_predictors/_category_.json | 0 .../functional_typed_predictors.md | 7 ++ .../understanding_predictors.md | 9 +++ 12 files changed, 223 insertions(+), 3 deletions(-) create mode 100644 docs/api/functional/_category_.json create mode 100644 docs/api/functional/dspy_TypedCoT.md create mode 100644 docs/api/functional/dspy_TypedPredictor.md create mode 100644 docs/api/functional/dspy_cot.md create mode 100644 docs/api/functional/dspy_predictor.md create mode 100644 docs/docs/building-blocks/8-typed_predictors.md create mode 100644 docs/docs/deep-dive/typed_predictors/_category_.json create mode 100644 docs/docs/deep-dive/typed_predictors/functional_typed_predictors.md create mode 100644 docs/docs/deep-dive/typed_predictors/understanding_predictors.md diff --git a/docs/api/functional/_category_.json b/docs/api/functional/_category_.json new file mode 100644 index 0000000000..70bd70104a --- /dev/null +++ b/docs/api/functional/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Functional", + "position": 2, + "link": { + "type": "generated-index", + "description": "This documentation provides an overview of the Typed Predictors." + } +} \ No newline at end of file diff --git a/docs/api/functional/dspy_TypedCoT.md b/docs/api/functional/dspy_TypedCoT.md new file mode 100644 index 0000000000..e728bdad05 --- /dev/null +++ b/docs/api/functional/dspy_TypedCoT.md @@ -0,0 +1,41 @@ +--- +sidebar_position: 2 +--- + +# dspy.TypedChainOfThought + +### Overview + +#### `def TypedChainOfThought(signature, max_retries=3) -> dspy.Module` + +Adds a Chain of Thoughts `dspy.OutputField` to the `dspy.TypedPredictor` module by prepending it to the Signature. Similar to `dspy.TypedPredictor` but automatically adds a "reasoning" output field. + +* **Inputs**: + * `signature`: The `dspy.Signature` specifying the input/output fields + * `max_retries`: Maximum number of retries if outputs fail validation +* **Output**: A dspy.Module instance capable of making predictions. + +### Example + +```python +from dspy import InputField, OutputField, Signature +from dspy.functional import TypedChainOfThought +from pydantic import BaseModel + +# We define a pydantic type that automatically checks if it's argument is valid python code. +class CodeOutput(BaseModel): + code: str + api_reference: str + +class CodeSignature(Signature): + function_description: str = InputField() + solution: CodeOutput = OutputField() + +cot_predictor = TypedChainOfThought(CodeSignature) +prediction = cot_predictor( + function_description="Write a function that adds two numbers." +) + +print(prediction["code"]) +print(prediction["api_reference"]) +``` \ No newline at end of file diff --git a/docs/api/functional/dspy_TypedPredictor.md b/docs/api/functional/dspy_TypedPredictor.md new file mode 100644 index 0000000000..8e5fd58572 --- /dev/null +++ b/docs/api/functional/dspy_TypedPredictor.md @@ -0,0 +1,78 @@ +--- +sidebar_position: 1 +--- + +# dspy.TypedPredictor + +The `TypedPredictor` class is a sophisticated module designed for making predictions with strict type validations. It leverages a signature to enforce type constraints on inputs and outputs, ensuring that the data follows to the expected schema. + +### Constructor + +```python +TypedPredictor( + CodeSignature + max_retries=3 +) +``` + +Parameters: +* `signature` (dspy.Signature): The signature that defines the input and output fields along with their types. +* `max_retries` (int, optional): The maximum number of retries for generating a valid prediction output. Defaults to 3. + +### Methods + +#### `copy() -> "TypedPredictor"` + +Creates and returns a deep copy of the current TypedPredictor instance. + +**Returns:** A new instance of TypedPredictor that is a deep copy of the original instance. + +#### `_make_example(type_: Type) -> str` + +A static method that generates a JSON object example based pn the schema of the specified Pydantic model type. This JSON object serves as an example for the expected input or output format. + +**Parameters:** +* `type_`: A Pydantic model class for which an example JSON object is to be generated. + +**Returns:** A string that represents a JSON object example, which validates against the provided Pydantic model's JSON schema. If the method is unable to generate a valid example, it returns an empty string. + +#### `_prepare_signature() -> dspy.Signature` + +Prepares and returns a modified version of the signature associated with the TypedPredictor instance. This method iterates over the signature's fields to add format and parser functions based on their type annotations. + +**Returns:** A dspy.Signature object that has been enhanced with formatting and parsing specifications for its fields. + +#### `forward(**kwargs) -> dspy.Prediction` + +Executes the prediction logic, making use of the `dspy.Predict` component to generate predictions based on the input arguments. This method handles type validation, parsing of output data, and implements retry logic in case the output does not initially follow to the specified output schema. + +**Parameters:** + +* `**kwargs`: Keyword arguments corresponding to the input fields defined in the signature. + +**Returns:** A dspy.Prediction object containing the prediction results. Each key in this object corresponds to an output field defined in the signature, and its value is the parsed result of the prediction. + +### Example + +```python +from dspy import InputField, OutputField, Signature +from dspy.functional import TypedPredictor +from pydantic import BaseModel + +# We define a pydantic type that automatically checks if it's argument is valid python code. +class CodeOutput(BaseModel): + code: str + api_reference: str + +class CodeSignature(Signature): + function_description: str = InputField() + solution: CodeOutput = OutputField() + +cot_predictor = TypedPredictor(CodeSignature) +prediction = cot_predictor( + function_description="Write a function that adds two numbers." +) + +print(prediction["code"]) +print(prediction["api_reference"]) +``` \ No newline at end of file diff --git a/docs/api/functional/dspy_cot.md b/docs/api/functional/dspy_cot.md new file mode 100644 index 0000000000..9b9e3bbeae --- /dev/null +++ b/docs/api/functional/dspy_cot.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 4 +--- + +--- +sidebar_position: 3 +--- + +# dspy.cot + +### Overview + +#### `def cot(func) -> dspy.Module` + +The `@cot` decorator is used to create a Chain of Thoughts module based on the provided function. It automatically generates a `dspy.TypedPredictor` and from the function's type annotations and docstring. Similar to predictor, but adds a "Reasoning" output field to capture the model's step-by-step thinking. + +* **Input**: Function with input parameters and return type annotation. +* **Output**: A dspy.Module instance capable of making predictions. + +### Example + +```python +import dspy + +context = ["Roses are red.", "Violets are blue"] +question = "What color are roses?" + +@dspy.cot +def generate_answer(self, context: list[str], question) -> str: + """Answer questions with short factoid answers.""" + pass + +generate_answer(context=context, question=question) +``` \ No newline at end of file diff --git a/docs/api/functional/dspy_predictor.md b/docs/api/functional/dspy_predictor.md new file mode 100644 index 0000000000..9d4814c0fe --- /dev/null +++ b/docs/api/functional/dspy_predictor.md @@ -0,0 +1,30 @@ +--- +sidebar_position: 3 +--- + +# dspy.predictor + +### Overview + +#### `def predictor(func) -> dspy.Module` + +The `@predictor` decorator is used to create a predictor module based on the provided function. It automatically generates a `dspy.TypedPredictor` and from the function's type annotations and docstring. + +* **Input**: Function with input parameters and return type annotation. +* **Output**: A dspy.Module instance capable of making predictions. + +### Example + +```python +import dspy + +context = ["Roses are red.", "Violets are blue"] +question = "What color are roses?" + +@dspy.predictor +def generate_answer(self, context: list[str], question) -> str: + """Answer questions with short factoid answers.""" + pass + +generate_answer(context=context, question=question) +``` \ No newline at end of file diff --git a/docs/api/language_model_clients/_category_.json b/docs/api/language_model_clients/_category_.json index 3f6129baea..1527ce1876 100644 --- a/docs/api/language_model_clients/_category_.json +++ b/docs/api/language_model_clients/_category_.json @@ -1,6 +1,6 @@ { "label": "Language Model API Clients", - "position": 4, + "position": 5, "link": { "type": "generated-index", "description": "This documentation provides an overview of the DSPy Language Model Clients." diff --git a/docs/api/optimizers/_category_.json b/docs/api/optimizers/_category_.json index 5d7edc9df5..8f98c5b3bb 100644 --- a/docs/api/optimizers/_category_.json +++ b/docs/api/optimizers/_category_.json @@ -1,6 +1,6 @@ { "label": "Optimizers", - "position": 2, + "position": 3, "link": { "type": "generated-index", "description": "Teleprompters are powerful optimizers (included in DSPy) that can learn to bootstrap and select effective prompts for the modules of any program. (The \"tele-\" in the name means \"at a distance\", i.e., automatic prompting at a distance.)\n\nThis documentation provides an overview of the DSPy Teleprompters." diff --git a/docs/api/retrieval_model_clients/_category_.json b/docs/api/retrieval_model_clients/_category_.json index 0c3ec89a3d..964dab01b5 100644 --- a/docs/api/retrieval_model_clients/_category_.json +++ b/docs/api/retrieval_model_clients/_category_.json @@ -1,6 +1,6 @@ { "label": "Retrieval Model Clients", - "position": 3, + "position": 4, "link": { "type": "generated-index", "description": "This documentation provides an overview of the DSPy Retrieval Model Clients." diff --git a/docs/docs/building-blocks/8-typed_predictors.md b/docs/docs/building-blocks/8-typed_predictors.md new file mode 100644 index 0000000000..65032474bf --- /dev/null +++ b/docs/docs/building-blocks/8-typed_predictors.md @@ -0,0 +1,13 @@ +# Typed Predictors + +In DSPy, alongside Signatures a + +## Executing Typed Predictors + +## Chain of Thoughts with Typed Predictors + +## Optimizing Typed Predictors + +## Typed Predictors via Decorators + +## Composing Functional Typed Predictors in `dspy.Module` \ No newline at end of file diff --git a/docs/docs/deep-dive/typed_predictors/_category_.json b/docs/docs/deep-dive/typed_predictors/_category_.json new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/deep-dive/typed_predictors/functional_typed_predictors.md b/docs/docs/deep-dive/typed_predictors/functional_typed_predictors.md new file mode 100644 index 0000000000..9a8f79ae90 --- /dev/null +++ b/docs/docs/deep-dive/typed_predictors/functional_typed_predictors.md @@ -0,0 +1,7 @@ +# Functional Typed Predictors + +## Typed Predictors as Decorators + +## Functional Typed Predictors in `dspy.Module` + +## How Functional Typed Predictors work? \ No newline at end of file diff --git a/docs/docs/deep-dive/typed_predictors/understanding_predictors.md b/docs/docs/deep-dive/typed_predictors/understanding_predictors.md new file mode 100644 index 0000000000..f0ad17354a --- /dev/null +++ b/docs/docs/deep-dive/typed_predictors/understanding_predictors.md @@ -0,0 +1,9 @@ +# Understanding Typed Predictors + +## Why use a Typed Predictor? + +## How to use a Typed Predictor? + +## Prompt of Typed Predictors + +## How Typed Predictors work? \ No newline at end of file From 2b2ad809c0678d187eca2cb6cee89aa35570ecc2 Mon Sep 17 00:00:00 2001 From: VivHarsha <101607940+VivHarsha@users.noreply.github.com> Date: Thu, 7 Mar 2024 16:37:10 -0800 Subject: [PATCH 157/243] Update provider name to Claude or Bedrock --- dsp/modules/bedrock.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dsp/modules/bedrock.py b/dsp/modules/bedrock.py index ac8b42d2d4..02fc78a1fa 100644 --- a/dsp/modules/bedrock.py +++ b/dsp/modules/bedrock.py @@ -34,6 +34,7 @@ def __init__( batch_n=True, # Bedrock does not support the `n` parameter ) self._validate_model(model) + self.provider = "claude" if "claude" in model.lower() else "bedrock" def _validate_model(self, model: str) -> None: if "claude" not in model.lower(): From 8b5dd0eb5c9fc59717cb8d56465348817b749994 Mon Sep 17 00:00:00 2001 From: VivHarsha <101607940+VivHarsha@users.noreply.github.com> Date: Thu, 7 Mar 2024 16:41:21 -0800 Subject: [PATCH 158/243] Inspect history for Claude models --- dsp/modules/lm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dsp/modules/lm.py b/dsp/modules/lm.py index fb0b0aab53..5fbf5ec5e1 100644 --- a/dsp/modules/lm.py +++ b/dsp/modules/lm.py @@ -46,7 +46,7 @@ def inspect_history(self, n: int = 1, skip: int = 0): if prompt != last_prompt: - if provider == "clarifai" or provider == "google": + if provider == "clarifai" or provider == "google" or provider == "claude": printed.append( ( prompt, @@ -80,7 +80,7 @@ def inspect_history(self, n: int = 1, skip: int = 0): text = choices[0].text elif provider == "openai" or provider == "ollama": text = ' ' + self._get_choice_text(choices[0]).strip() - elif provider == "clarifai": + elif provider == "clarifai" or provider == "claude" : text=choices elif provider == "google": text = choices[0].parts[0].text From a5f75af4339ebff5cda56d558412ba8a798670e1 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 21:39:13 -0800 Subject: [PATCH 159/243] updating tests for new optimizers --- tests/teleprompt/test_signature_opt.py | 24 ++++++++-------- .../teleprompt/test_signature_opt_bayesian.py | 28 +++++++++++++------ 2 files changed, 31 insertions(+), 21 deletions(-) diff --git a/tests/teleprompt/test_signature_opt.py b/tests/teleprompt/test_signature_opt.py index d7f3475514..c0fe712bcf 100644 --- a/tests/teleprompt/test_signature_opt.py +++ b/tests/teleprompt/test_signature_opt.py @@ -1,6 +1,6 @@ import textwrap import dspy -from dspy.teleprompt.signature_opt import SignatureOptimizer +from dspy.teleprompt.signature_opt import COPRO from dspy.utils.dummies import DummyLM from dspy import Example @@ -16,7 +16,7 @@ def simple_metric(example, prediction): ] def test_signature_optimizer_initialization(): - optimizer = SignatureOptimizer(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) assert optimizer.metric == simple_metric, "Metric not correctly initialized" assert optimizer.breadth == 2, "Breadth not correctly initialized" assert optimizer.depth == 1, "Depth not correctly initialized" @@ -25,20 +25,20 @@ def test_signature_optimizer_initialization(): class SimpleModule(dspy.Module): def __init__(self, signature): super().__init__() - # SignatureOptimizer doesn't work with dspy.Predict + # COPRO doesn't work with dspy.Predict self.predictor = dspy.ChainOfThought(signature) def forward(self, **kwargs): return self.predictor(**kwargs) def test_signature_optimizer_optimization_process(): - optimizer = SignatureOptimizer(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) dspy.settings.configure(lm=DummyLM(["Optimized instruction 1", "Optimized instruction 2"])) student = SimpleModule("input -> output") - # Assuming the compile method of SignatureOptimizer requires a student module, a development set, and evaluation kwargs - optimized_student = optimizer.compile(student, devset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) + # Assuming the compile method of COPRO requires a student module, a development set, and evaluation kwargs + optimized_student = optimizer.compile(student, trainset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) # Check that the optimized student has been modified from the original # This check can be more specific based on how the optimization modifies the student @@ -48,12 +48,12 @@ def test_signature_optimizer_optimization_process(): # such as checking the instructions of the optimized student's predictors. def test_signature_optimizer_statistics_tracking(): - optimizer = SignatureOptimizer(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking dspy.settings.configure(lm=DummyLM(["Optimized instruction"])) student = SimpleModule("input -> output") - optimized_student = optimizer.compile(student, devset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) + optimized_student = optimizer.compile(student, trainset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) # Verify that statistics have been tracked and attached to the optimized student assert hasattr(optimized_student, 'total_calls'), "Total calls statistic not tracked" @@ -67,12 +67,12 @@ def test_optimization_and_output_verification(): "Optimized Prefix", ]) dspy.settings.configure(lm=lm) - optimizer = SignatureOptimizer(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) student = SimpleModule("input -> output") # Compile the student with the optimizer - optimized_student = optimizer.compile(student, devset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) + optimized_student = optimizer.compile(student, trainset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) # Simulate calling the optimized student with a new input test_input = "What is the capital of France?" @@ -102,11 +102,11 @@ def test_optimization_and_output_verification(): def test_statistics_tracking_during_optimization(): dspy.settings.configure(lm=DummyLM(["Optimized instruction for stats tracking"])) - optimizer = SignatureOptimizer(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking student = SimpleModule("input -> output") - optimized_student = optimizer.compile(student, devset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) + optimized_student = optimizer.compile(student, trainset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}) # Verify that statistics have been tracked assert hasattr(optimized_student, 'total_calls'), "Optimizer did not track total metric calls" diff --git a/tests/teleprompt/test_signature_opt_bayesian.py b/tests/teleprompt/test_signature_opt_bayesian.py index 63baf7a5bd..c1cb61de09 100644 --- a/tests/teleprompt/test_signature_opt_bayesian.py +++ b/tests/teleprompt/test_signature_opt_bayesian.py @@ -53,10 +53,18 @@ def basic_request(self, prompt, num_candidates=1, **kwargs): answer = " summarizing..." else: pairs = re.findall(r"Input: (.*)\nOutput: (.*)", prompt) + # pairs = re.findall(r"(?<=\n|^)---\n\nInput: (.*?)\nOutput: (.*?)(?=\n\n---|\n*$)", prompt, re.DOTALL) + # pairs = re.findall(r"Input: (.*?)\n(?:Reasoning: .*?\n)?Output: (.*)", prompt, re.DOTALL) + pairs = re.findall(r"Input: (.*?)\n(?:Reasoning:.*?\n)?Output: (.*?)\n", prompt, re.DOTALL) + + # breakpoint() print("PROMPT:", prompt) print("PAIRS:", pairs) + # if "What is the capital of Spain?" in prompt: + # breakpoint() + last = re.search(r"Input: (.*)\nReasoning: (.*)$", prompt) current_question = last.group(1) @@ -227,34 +235,34 @@ def test_optimization_and_output_verification(): assert prediction.output == "Madrid" - assert lm.get_convo(-1) == textwrap.dedent( + expected_lm_output = textwrap.dedent( """\ Input: --- - + Follow the following format. - + Input: ${input} Reasoning: Let's think step by step in order to ${produce the output}. We ... Output: ${output} --- - Input: What is the capital of Norway? + Input: What is the capital of France? Reasoning: Let's think step by step in order to think deeply. - Output: Oslo + Output: Paris --- - Input: What is the capital of Sweden? + Input: What is the capital of Norway? Reasoning: Let's think step by step in order to think deeply. - Output: Stockholm + Output: Oslo --- - Input: What is the capital of France? - Output: Paris + Input: What does the fox say? + Output: Ring-ding-ding-ding-dingeringeding! --- @@ -262,3 +270,5 @@ def test_optimization_and_output_verification(): Reasoning: Let's think step by step in order to think deeply. Output: Madrid""" ) + + assert lm.get_convo(-1) == expected_lm_output \ No newline at end of file From 8e13bd9033260e105e8c3b28728ea5ee4bc5788c Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 21:45:50 -0800 Subject: [PATCH 160/243] renaming tests --- .../{test_signature_opt.py => test_copro_optimizer.py} | 0 ...t_signature_opt_bayesian.py => test_mipro_optimizer.py} | 7 ------- 2 files changed, 7 deletions(-) rename tests/teleprompt/{test_signature_opt.py => test_copro_optimizer.py} (100%) rename tests/teleprompt/{test_signature_opt_bayesian.py => test_mipro_optimizer.py} (95%) diff --git a/tests/teleprompt/test_signature_opt.py b/tests/teleprompt/test_copro_optimizer.py similarity index 100% rename from tests/teleprompt/test_signature_opt.py rename to tests/teleprompt/test_copro_optimizer.py diff --git a/tests/teleprompt/test_signature_opt_bayesian.py b/tests/teleprompt/test_mipro_optimizer.py similarity index 95% rename from tests/teleprompt/test_signature_opt_bayesian.py rename to tests/teleprompt/test_mipro_optimizer.py index c1cb61de09..17e94a580e 100644 --- a/tests/teleprompt/test_signature_opt_bayesian.py +++ b/tests/teleprompt/test_mipro_optimizer.py @@ -52,19 +52,12 @@ def basic_request(self, prompt, num_candidates=1, **kwargs): elif prompt.endswith("Summary:"): answer = " summarizing..." else: - pairs = re.findall(r"Input: (.*)\nOutput: (.*)", prompt) - # pairs = re.findall(r"(?<=\n|^)---\n\nInput: (.*?)\nOutput: (.*?)(?=\n\n---|\n*$)", prompt, re.DOTALL) - - # pairs = re.findall(r"Input: (.*?)\n(?:Reasoning: .*?\n)?Output: (.*)", prompt, re.DOTALL) pairs = re.findall(r"Input: (.*?)\n(?:Reasoning:.*?\n)?Output: (.*?)\n", prompt, re.DOTALL) # breakpoint() print("PROMPT:", prompt) print("PAIRS:", pairs) - # if "What is the capital of Spain?" in prompt: - # breakpoint() - last = re.search(r"Input: (.*)\nReasoning: (.*)$", prompt) current_question = last.group(1) From 063a1446b76bb7b3b40bb456eec0bbaec8270890 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 21:48:00 -0800 Subject: [PATCH 161/243] small cleanup of breakpoints --- dspy/teleprompt/mipro_optimizer.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 1ff76f8167..3125c562ca 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -415,11 +415,8 @@ def objective(trial): # Use these demos in our program if demo_candidates: p_new.demos = selected_demos - - # breakpoint() if self.verbose: print("Evaling the following program:") - # breakpoint() if self.verbose: self._print_full_program(candidate_program) trial_logs[trial_num]["program"] = candidate_program @@ -451,7 +448,6 @@ def objective(trial): if self.verbose: print(f"Fully evaled score: {curr_weighted_avg_score}") if self.verbose: self._print_model_history(self.task_model, n=1) - # breakpoint() score = curr_weighted_avg_score trial_logs[trial_num]["score"] = curr_weighted_avg_score From b8bb02160cc2f116e9df96c3a400d23941b5e570 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Thu, 7 Mar 2024 22:51:10 -0800 Subject: [PATCH 162/243] minor updates --- dspy/teleprompt/copro_optimizer.py | 372 +++++++++++-------- dspy/teleprompt/signature_opt_bayesian.py | 2 +- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 143 +++---- 3 files changed, 285 insertions(+), 232 deletions(-) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index 9d304cf9ed..49a6ff4388 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -117,185 +117,237 @@ def compile(self, student, *, trainset, eval_kwargs): results_best = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} results_latest = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} - if self.track_stats: - import numpy as np - - - candidates = {} - evaluated_candidates = defaultdict(dict) - - # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts - for predictor in module.predictors(): - basic_instruction = None - basic_prefix = None - *_, last_key = self._get_signature(predictor).fields.keys() - basic_instruction = self._get_signature(predictor).instructions - basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra['prefix'] - if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - else: - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - # Add in our initial prompt as a candidate as well - instruct.completions.proposed_instruction.append(basic_instruction) - instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) - candidates[id(predictor)] = instruct.completions - evaluated_candidates[id(predictor)] = {} + # Define ANSI escape codes for colors + YELLOW = '\033[93m' + BLUE = '\033[94m' + BOLD = '\033[1m' + ENDC = '\033[0m' # Resets the color to default + + random.seed(seed) - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P + estimated_prompt_model_calls = 10 + self.num_candidates * len(student.predictors()) # num data summary calls + N * P + + user_message = textwrap.dedent(f"""\ + {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} + + Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: + + {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{num_trials}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} + {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.num_candidates}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} + + {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} - latest_candidates = candidates - all_candidates = candidates + {YELLOW}Total Cost = (Number of calls to task model * (Avg Input Token Length per Call * Task Model Price per Input Token + Avg Output Token Length per Call * Task Model Price per Output Token) + + (Number of calls to prompt model * (Avg Input Token Length per Call * Task Prompt Price per Input Token + Avg Output Token Length per Call * Prompt Model Price per Output Token).{ENDC} + + For a preliminary estimate of potential costs, we recommend you perform your own calculations based on the task + and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: + + {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} + {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""") - module_clone = module.deepcopy() + user_confirmation_message = textwrap.dedent(f"""\ + To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. - # For each iteration in depth... - for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors - print(f"Iteration Depth: {d+1}/{self.depth}.") + If you would like to bypass this confirmation step in future executions, set the {YELLOW}`requires_permission_to_run`{ENDC} flag to {YELLOW}`False`.{ENDC} - latest_scores = [] + {YELLOW}Awaiting your input...{ENDC} + """) + + print(user_message) - # Go through our module's predictors - for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): - candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation - if len(module.predictors()) > 1: - candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) - - # For each candidate - for c_i, c in enumerate(candidates_): - # Get the candidate instruction and prefix - instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() - - # Set this new module with our instruction / prefix - *_, last_key = self._get_signature(p_new).fields.keys() - updated_signature = self._get_signature(p_new) \ - .with_instructions(instruction) \ - .with_updated_fields(last_key, prefix=prefix) - self._set_signature(p_new, updated_signature) + sys.stdout.flush() # Flush the output buffer to force the message to print - # Score the instruction / prefix - if self.verbose: print("----------------") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i+1}") - self._print_signature(predictor) - print(f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.") - score = evaluate(module_clone, devset=trainset, **eval_kwargs) - if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") - total_calls += 1 - if self.verbose: print("----------------") - - replace_entry = True - if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") - # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") - if ((instruction, prefix) in evaluated_candidates[id(p_old)]): - # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") - if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: - replace_entry = False - - if replace_entry: - # Add it to our evaluated candidates list - evaluated_candidates[id(p_old)][(instruction, prefix)] = { - "score": score, - "program": module_clone.deepcopy(), - "instruction": instruction, - "prefix": prefix, - "depth": d, - } - - if (len(candidates_)-self.breadth <= c_i): - latest_scores.append(score) - if self.track_stats: - results_latest[id(p_old)]["depth"].append(d) - results_latest[id(p_old)]["max"].append(max(latest_scores)) - results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) - results_latest[id(p_old)]["min"].append(min(latest_scores)) - results_latest[id(p_old)]["std"].append(np.std(latest_scores)) - - # Now that we've evaluated the candidates, set this predictor to the best performing version - # to ensure the next round of scores reflect the best possible version - best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) - *_, last_key = self._get_signature(p_old).fields.keys() - updated_signature = self._get_signature(p_new) \ - .with_instructions(best_candidate["instruction"]) \ - .with_updated_fields(last_key, prefix=best_candidate["prefix"]) - self._set_signature(p_new, updated_signature) - if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") - if self.verbose: print("Full predictor with update: ") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i}") - self._print_signature(predictor) - - if d == self.depth-1: - break + run=True + if requires_permission_to_run: + print(user_confirmation_message) + user_input = input("Do you wish to continue? (y/n): ").strip().lower() + if user_input != 'y': + print("Compilation aborted by the user.") + run=False - - new_candidates = {} - for p_base in module.predictors(): - # Build Few-Shot Example of Optimized Prompts - attempts = [] - shortest_len = self.breadth - shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) - best_predictors = list(evaluated_candidates[id(p_base)].values()) + if run: + if self.track_stats: + import numpy as np - # best_predictors = evaluated_candidates[id(p_base)].values()[:] - best_predictors.sort(key=lambda x: x['score'], reverse=True) - if self.track_stats: - scores = [x['score'] for x in best_predictors][:10] - results_best[id(p_base)]["depth"].append(d) - results_best[id(p_base)]["max"].append(max(scores)) - results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) - results_best[id(p_base)]["min"].append(min(scores)) - results_best[id(p_base)]["std"].append(np.std(scores)) - - for i in range(shortest_len-1,-1,-1): - # breakpoint() - attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') - attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') - attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') - - # Generate next batch of potential prompts to optimize, with previous attempts as input + candidates = {} + evaluated_candidates = defaultdict(dict) + + # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts + for predictor in module.predictors(): + basic_instruction = None + basic_prefix = None + *_, last_key = self._get_signature(predictor).fields.keys() + basic_instruction = self._get_signature(predictor).instructions + basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra['prefix'] if self.prompt_model: with dspy.settings.context(lm=self.prompt_model): - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) else: - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + # Add in our initial prompt as a candidate as well + instruct.completions.proposed_instruction.append(basic_instruction) + instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) + candidates[id(predictor)] = instruct.completions + evaluated_candidates[id(predictor)] = {} + + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - # Get candidates for each predictor - new_candidates[id(p_base)] = instr.completions - all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) - all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) + latest_candidates = candidates + all_candidates = candidates + + module_clone = module.deepcopy() - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - latest_candidates = new_candidates - - candidates = [] - for predictor in module.predictors(): - candidates.extend(list(evaluated_candidates[id(predictor)].values())) + # For each iteration in depth... + for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors + print(f"Iteration Depth: {d+1}/{self.depth}.") - if self.track_stats: - best_predictors = list(evaluated_candidates[id(predictor)].values()) - best_predictors.sort(key=lambda x: x['score'], reverse=True) + latest_scores = [] + + # Go through our module's predictors + for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): + candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation + if len(module.predictors()) > 1: + candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) + + # For each candidate + for c_i, c in enumerate(candidates_): + # Get the candidate instruction and prefix + instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() + + # Set this new module with our instruction / prefix + *_, last_key = self._get_signature(p_new).fields.keys() + updated_signature = self._get_signature(p_new) \ + .with_instructions(instruction) \ + .with_updated_fields(last_key, prefix=prefix) + self._set_signature(p_new, updated_signature) + + # Score the instruction / prefix + if self.verbose: print("----------------") + for i,predictor in enumerate(module_clone.predictors()): + if self.verbose: print(f"Predictor {i+1}") + self._print_signature(predictor) + print(f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.") + score = evaluate(module_clone, devset=trainset, **eval_kwargs) + if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") + total_calls += 1 + if self.verbose: print("----------------") + + replace_entry = True + if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") + # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") + if ((instruction, prefix) in evaluated_candidates[id(p_old)]): + # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") + if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: + replace_entry = False + + if replace_entry: + # Add it to our evaluated candidates list + evaluated_candidates[id(p_old)][(instruction, prefix)] = { + "score": score, + "program": module_clone.deepcopy(), + "instruction": instruction, + "prefix": prefix, + "depth": d, + } + + if (len(candidates_)-self.breadth <= c_i): + latest_scores.append(score) + + if self.track_stats: + results_latest[id(p_old)]["depth"].append(d) + results_latest[id(p_old)]["max"].append(max(latest_scores)) + results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) + results_latest[id(p_old)]["min"].append(min(latest_scores)) + results_latest[id(p_old)]["std"].append(np.std(latest_scores)) + + # Now that we've evaluated the candidates, set this predictor to the best performing version + # to ensure the next round of scores reflect the best possible version + best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) + *_, last_key = self._get_signature(p_old).fields.keys() + updated_signature = self._get_signature(p_new) \ + .with_instructions(best_candidate["instruction"]) \ + .with_updated_fields(last_key, prefix=best_candidate["prefix"]) + self._set_signature(p_new, updated_signature) + if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") + if self.verbose: print("Full predictor with update: ") + for i,predictor in enumerate(module_clone.predictors()): + if self.verbose: print(f"Predictor {i}") + self._print_signature(predictor) + + if d == self.depth-1: + break - scores = [x['score'] for x in best_predictors][:10] - results_best[id(predictor)]["depth"].append(d) - results_best[id(predictor)]["max"].append(max(scores)) - results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) - results_best[id(predictor)]["min"].append(min(scores)) - results_best[id(predictor)]["std"].append(np.std(scores)) + + new_candidates = {} + for p_base in module.predictors(): + # Build Few-Shot Example of Optimized Prompts + attempts = [] + shortest_len = self.breadth + shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) + best_predictors = list(evaluated_candidates[id(p_base)].values()) + + # best_predictors = evaluated_candidates[id(p_base)].values()[:] + best_predictors.sort(key=lambda x: x['score'], reverse=True) + + if self.track_stats: + scores = [x['score'] for x in best_predictors][:10] + results_best[id(p_base)]["depth"].append(d) + results_best[id(p_base)]["max"].append(max(scores)) + results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) + results_best[id(p_base)]["min"].append(min(scores)) + results_best[id(p_base)]["std"].append(np.std(scores)) + + for i in range(shortest_len-1,-1,-1): + # breakpoint() + attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') + attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') + attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') + + # Generate next batch of potential prompts to optimize, with previous attempts as input + if self.prompt_model: + with dspy.settings.context(lm=self.prompt_model): + instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + else: + instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) - # if verbose: print(f"candidates: {candidates}") - candidates.sort(key=lambda x: x['score'], reverse=True) + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + # Get candidates for each predictor + new_candidates[id(p_base)] = instr.completions + all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) + all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) - candidates = self._drop_duplicates(candidates) + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + latest_candidates = new_candidates + + candidates = [] + for predictor in module.predictors(): + candidates.extend(list(evaluated_candidates[id(predictor)].values())) - best_program = candidates[0]["program"] - best_program.candidate_programs = candidates - best_program.total_calls = total_calls - if self.track_stats: - best_program.results_best = results_best - best_program.results_latest = results_latest + if self.track_stats: + best_predictors = list(evaluated_candidates[id(predictor)].values()) + best_predictors.sort(key=lambda x: x['score'], reverse=True) + + scores = [x['score'] for x in best_predictors][:10] + results_best[id(predictor)]["depth"].append(d) + results_best[id(predictor)]["max"].append(max(scores)) + results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) + results_best[id(predictor)]["min"].append(min(scores)) + results_best[id(predictor)]["std"].append(np.std(scores)) + + # if verbose: print(f"candidates: {candidates}") + candidates.sort(key=lambda x: x['score'], reverse=True) + + candidates = self._drop_duplicates(candidates) + + best_program = candidates[0]["program"] + best_program.candidate_programs = candidates + best_program.total_calls = total_calls + if self.track_stats: + best_program.results_best = results_best + best_program.results_latest = results_latest - return best_program \ No newline at end of file + return best_program \ No newline at end of file diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 1e08042f7d..5d009f86ee 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -41,5 +41,5 @@ def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10 super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) - def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, optuna_trials_num, view_data=True, view_examples=True, requires_permission_to_run=True, num_trials=None): + def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, optuna_trials_num, view_data=True, view_examples=True, requires_permission_to_run=False, num_trials=None): return super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=optuna_trials_num) diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index cb450d763d..e50831c930 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -58,15 +58,66 @@ { "cell_type": "markdown", "metadata": { - "id": "rbGIXWcqqZH1" + "id": "5Vo4Tb9srSow" }, "source": [ - "First, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook. We'll also load in our pre optimized program from hugging face to inspect later." + "First, we will install __DSPy__ if it's not there already." ] }, { "cell_type": "code", "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "JpijP_d7qZH2", + "outputId": "422dc4d0-4574-4e4b-935a-c7b4c875472f" + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import sys\n", + "import os\n", + "import regex as re\n", + "\n", + "try: # When on google Colab, let's clone the notebook so we download the cache.\n", + " import google.colab\n", + " repo_path = 'dspy'\n", + "\n", + " !git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path\n", + "except:\n", + " repo_path = '.'\n", + "\n", + "if repo_path not in sys.path:\n", + " sys.path.append(repo_path)\n", + "\n", + "\n", + "import pkg_resources # Install the package if it's not installed\n", + "if not \"dspy-ai\" in {pkg.key for pkg in pkg_resources.working_set}:\n", + " !pip install -U pip\n", + " !pip install dspy-ai\n", + " !pip install openai~=0.28.1\n", + " !pip install -e $repo_path\n", + " !pip install --upgrade cloudpickle==3.0.0\n", + "\n", + "import dspy" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rbGIXWcqqZH1" + }, + "source": [ + "Then, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook. We'll also load in our pre optimized program from hugging face to inspect later." + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -113,57 +164,6 @@ "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = f\"{os.getcwd()}/MIPRO_notebook_cache\"" ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "5Vo4Tb9srSow" - }, - "source": [ - "Next, we will install __DSPy__ if it's not there already." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "JpijP_d7qZH2", - "outputId": "422dc4d0-4574-4e4b-935a-c7b4c875472f" - }, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "\n", - "import sys\n", - "import os\n", - "import regex as re\n", - "\n", - "try: # When on google Colab, let's clone the notebook so we download the cache.\n", - " import google.colab\n", - " repo_path = 'dspy'\n", - "\n", - " !git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path\n", - "except:\n", - " repo_path = '.'\n", - "\n", - "if repo_path not in sys.path:\n", - " sys.path.append(repo_path)\n", - "\n", - "\n", - "import pkg_resources # Install the package if it's not installed\n", - "if not \"dspy-ai\" in {pkg.key for pkg in pkg_resources.working_set}:\n", - " !pip install -U pip\n", - " !pip install dspy-ai\n", - " !pip install openai~=0.28.1\n", - " !pip install -e $repo_path\n", - " !pip install --upgrade cloudpickle==3.0.0\n", - "\n", - "import dspy" - ] - }, { "cell_type": "markdown", "metadata": { @@ -182,6 +182,7 @@ "outputs": [], "source": [ "### NOTE: if you'd like to run this code without a cache, you can remove these lines to configure your OPEN AI key ###\n", + "# import openai\n", "# os.environ['OPENAI_API_KEY'] = \"TODO: ADD YOUR OPEN AI KEY HERE\"\n", "# openai.api_key = os.environ.get('OPENAI_API_KEY')\n", "# openai.api_base = \"https://api.openai.com/v1\"\n", @@ -432,15 +433,15 @@ "name": "stderr", "output_type": "stream", "text": [ - " 0%| | 0/500 [00:00 Date: Thu, 7 Mar 2024 22:54:38 -0800 Subject: [PATCH 163/243] removing accidentally added warning from copro --- dspy/teleprompt/copro_optimizer.py | 372 +++++++++++++---------------- 1 file changed, 160 insertions(+), 212 deletions(-) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index 49a6ff4388..9d304cf9ed 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -117,237 +117,185 @@ def compile(self, student, *, trainset, eval_kwargs): results_best = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} results_latest = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} - # Define ANSI escape codes for colors - YELLOW = '\033[93m' - BLUE = '\033[94m' - BOLD = '\033[1m' - ENDC = '\033[0m' # Resets the color to default - - random.seed(seed) + if self.track_stats: + import numpy as np + + + candidates = {} + evaluated_candidates = defaultdict(dict) + + # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts + for predictor in module.predictors(): + basic_instruction = None + basic_prefix = None + *_, last_key = self._get_signature(predictor).fields.keys() + basic_instruction = self._get_signature(predictor).instructions + basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra['prefix'] + if self.prompt_model: + with dspy.settings.context(lm=self.prompt_model): + instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + else: + instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + # Add in our initial prompt as a candidate as well + instruct.completions.proposed_instruction.append(basic_instruction) + instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) + candidates[id(predictor)] = instruct.completions + evaluated_candidates[id(predictor)] = {} - estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P - estimated_prompt_model_calls = 10 + self.num_candidates * len(student.predictors()) # num data summary calls + N * P - - user_message = textwrap.dedent(f"""\ - {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} - - Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: - - {YELLOW}- Task Model: {BLUE}{BOLD}{len(trainset)}{ENDC}{YELLOW} examples in dev set * {BLUE}{BOLD}{num_trials}{ENDC}{YELLOW} trials * {BLUE}{BOLD}# of LM calls in your program{ENDC}{YELLOW} = ({BLUE}{BOLD}{estimated_task_model_calls_wo_module_calls} * # of LM calls in your program{ENDC}{YELLOW}) task model calls{ENDC} - {YELLOW}- Prompt Model: # data summarizer calls (max {BLUE}{BOLD}10{ENDC}{YELLOW}) + {BLUE}{BOLD}{self.num_candidates}{ENDC}{YELLOW} * {BLUE}{BOLD}{len(student.predictors())}{ENDC}{YELLOW} lm calls in program = {BLUE}{BOLD}{estimated_prompt_model_calls}{ENDC}{YELLOW} prompt model calls{ENDC} - - {YELLOW}{BOLD}Estimated Cost Calculation:{ENDC} + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - {YELLOW}Total Cost = (Number of calls to task model * (Avg Input Token Length per Call * Task Model Price per Input Token + Avg Output Token Length per Call * Task Model Price per Output Token) - + (Number of calls to prompt model * (Avg Input Token Length per Call * Task Prompt Price per Input Token + Avg Output Token Length per Call * Prompt Model Price per Output Token).{ENDC} - - For a preliminary estimate of potential costs, we recommend you perform your own calculations based on the task - and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: - - {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} - {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""") + latest_candidates = candidates + all_candidates = candidates - user_confirmation_message = textwrap.dedent(f"""\ - To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. + module_clone = module.deepcopy() - If you would like to bypass this confirmation step in future executions, set the {YELLOW}`requires_permission_to_run`{ENDC} flag to {YELLOW}`False`.{ENDC} + # For each iteration in depth... + for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors + print(f"Iteration Depth: {d+1}/{self.depth}.") - {YELLOW}Awaiting your input...{ENDC} - """) - - print(user_message) + latest_scores = [] - sys.stdout.flush() # Flush the output buffer to force the message to print - - - run=True - if requires_permission_to_run: - print(user_confirmation_message) - user_input = input("Do you wish to continue? (y/n): ").strip().lower() - if user_input != 'y': - print("Compilation aborted by the user.") - run=False - - if run: - if self.track_stats: - import numpy as np - - - candidates = {} - evaluated_candidates = defaultdict(dict) - - # Seed the prompt optimizer zero shot with just the instruction, generate BREADTH new prompts - for predictor in module.predictors(): - basic_instruction = None - basic_prefix = None - *_, last_key = self._get_signature(predictor).fields.keys() - basic_instruction = self._get_signature(predictor).instructions - basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra['prefix'] - if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - else: - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - # Add in our initial prompt as a candidate as well - instruct.completions.proposed_instruction.append(basic_instruction) - instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) - candidates[id(predictor)] = instruct.completions - evaluated_candidates[id(predictor)] = {} - - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - - latest_candidates = candidates - all_candidates = candidates - - module_clone = module.deepcopy() - - # For each iteration in depth... - for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors - print(f"Iteration Depth: {d+1}/{self.depth}.") - - latest_scores = [] - - # Go through our module's predictors - for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): - candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation - if len(module.predictors()) > 1: - candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) - - # For each candidate - for c_i, c in enumerate(candidates_): - # Get the candidate instruction and prefix - instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() - - # Set this new module with our instruction / prefix - *_, last_key = self._get_signature(p_new).fields.keys() - updated_signature = self._get_signature(p_new) \ - .with_instructions(instruction) \ - .with_updated_fields(last_key, prefix=prefix) - self._set_signature(p_new, updated_signature) - - # Score the instruction / prefix - if self.verbose: print("----------------") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i+1}") - self._print_signature(predictor) - print(f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.") - score = evaluate(module_clone, devset=trainset, **eval_kwargs) - if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") - total_calls += 1 - if self.verbose: print("----------------") - - replace_entry = True - if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") - # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") - if ((instruction, prefix) in evaluated_candidates[id(p_old)]): - # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") - if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: - replace_entry = False - - if replace_entry: - # Add it to our evaluated candidates list - evaluated_candidates[id(p_old)][(instruction, prefix)] = { - "score": score, - "program": module_clone.deepcopy(), - "instruction": instruction, - "prefix": prefix, - "depth": d, - } - - if (len(candidates_)-self.breadth <= c_i): - latest_scores.append(score) - - if self.track_stats: - results_latest[id(p_old)]["depth"].append(d) - results_latest[id(p_old)]["max"].append(max(latest_scores)) - results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) - results_latest[id(p_old)]["min"].append(min(latest_scores)) - results_latest[id(p_old)]["std"].append(np.std(latest_scores)) - - # Now that we've evaluated the candidates, set this predictor to the best performing version - # to ensure the next round of scores reflect the best possible version - best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) - *_, last_key = self._get_signature(p_old).fields.keys() + # Go through our module's predictors + for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): + candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation + if len(module.predictors()) > 1: + candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) + + # For each candidate + for c_i, c in enumerate(candidates_): + # Get the candidate instruction and prefix + instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() + + # Set this new module with our instruction / prefix + *_, last_key = self._get_signature(p_new).fields.keys() updated_signature = self._get_signature(p_new) \ - .with_instructions(best_candidate["instruction"]) \ - .with_updated_fields(last_key, prefix=best_candidate["prefix"]) + .with_instructions(instruction) \ + .with_updated_fields(last_key, prefix=prefix) self._set_signature(p_new, updated_signature) - if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") - if self.verbose: print("Full predictor with update: ") + + # Score the instruction / prefix + if self.verbose: print("----------------") for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i}") + if self.verbose: print(f"Predictor {i+1}") self._print_signature(predictor) - - if d == self.depth-1: - break - - - new_candidates = {} - for p_base in module.predictors(): - # Build Few-Shot Example of Optimized Prompts - attempts = [] - shortest_len = self.breadth - shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) - best_predictors = list(evaluated_candidates[id(p_base)].values()) - - # best_predictors = evaluated_candidates[id(p_base)].values()[:] - best_predictors.sort(key=lambda x: x['score'], reverse=True) - - if self.track_stats: - scores = [x['score'] for x in best_predictors][:10] - results_best[id(p_base)]["depth"].append(d) - results_best[id(p_base)]["max"].append(max(scores)) - results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) - results_best[id(p_base)]["min"].append(min(scores)) - results_best[id(p_base)]["std"].append(np.std(scores)) + print(f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.") + score = evaluate(module_clone, devset=trainset, **eval_kwargs) + if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") + total_calls += 1 + if self.verbose: print("----------------") + + replace_entry = True + if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") + # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") + if ((instruction, prefix) in evaluated_candidates[id(p_old)]): + # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") + if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: + replace_entry = False + + if replace_entry: + # Add it to our evaluated candidates list + evaluated_candidates[id(p_old)][(instruction, prefix)] = { + "score": score, + "program": module_clone.deepcopy(), + "instruction": instruction, + "prefix": prefix, + "depth": d, + } - for i in range(shortest_len-1,-1,-1): - # breakpoint() - attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') - attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') - attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') - - # Generate next batch of potential prompts to optimize, with previous attempts as input - if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) - else: - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + if (len(candidates_)-self.breadth <= c_i): + latest_scores.append(score) - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - # Get candidates for each predictor - new_candidates[id(p_base)] = instr.completions - all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) - all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) + if self.track_stats: + results_latest[id(p_old)]["depth"].append(d) + results_latest[id(p_old)]["max"].append(max(latest_scores)) + results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) + results_latest[id(p_old)]["min"].append(min(latest_scores)) + results_latest[id(p_old)]["std"].append(np.std(latest_scores)) + + # Now that we've evaluated the candidates, set this predictor to the best performing version + # to ensure the next round of scores reflect the best possible version + best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) + *_, last_key = self._get_signature(p_old).fields.keys() + updated_signature = self._get_signature(p_new) \ + .with_instructions(best_candidate["instruction"]) \ + .with_updated_fields(last_key, prefix=best_candidate["prefix"]) + self._set_signature(p_new, updated_signature) + if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") + if self.verbose: print("Full predictor with update: ") + for i,predictor in enumerate(module_clone.predictors()): + if self.verbose: print(f"Predictor {i}") + self._print_signature(predictor) + + if d == self.depth-1: + break - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") - latest_candidates = new_candidates - candidates = [] - for predictor in module.predictors(): - candidates.extend(list(evaluated_candidates[id(predictor)].values())) + new_candidates = {} + for p_base in module.predictors(): + # Build Few-Shot Example of Optimized Prompts + attempts = [] + shortest_len = self.breadth + shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) + best_predictors = list(evaluated_candidates[id(p_base)].values()) - if self.track_stats: - best_predictors = list(evaluated_candidates[id(predictor)].values()) - best_predictors.sort(key=lambda x: x['score'], reverse=True) + # best_predictors = evaluated_candidates[id(p_base)].values()[:] + best_predictors.sort(key=lambda x: x['score'], reverse=True) + if self.track_stats: scores = [x['score'] for x in best_predictors][:10] - results_best[id(predictor)]["depth"].append(d) - results_best[id(predictor)]["max"].append(max(scores)) - results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) - results_best[id(predictor)]["min"].append(min(scores)) - results_best[id(predictor)]["std"].append(np.std(scores)) + results_best[id(p_base)]["depth"].append(d) + results_best[id(p_base)]["max"].append(max(scores)) + results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) + results_best[id(p_base)]["min"].append(min(scores)) + results_best[id(p_base)]["std"].append(np.std(scores)) + + for i in range(shortest_len-1,-1,-1): + # breakpoint() + attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') + attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') + attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') + + # Generate next batch of potential prompts to optimize, with previous attempts as input + if self.prompt_model: + with dspy.settings.context(lm=self.prompt_model): + instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + else: + instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) - # if verbose: print(f"candidates: {candidates}") - candidates.sort(key=lambda x: x['score'], reverse=True) + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + # Get candidates for each predictor + new_candidates[id(p_base)] = instr.completions + all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) + all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) - candidates = self._drop_duplicates(candidates) + if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + latest_candidates = new_candidates + + candidates = [] + for predictor in module.predictors(): + candidates.extend(list(evaluated_candidates[id(predictor)].values())) - best_program = candidates[0]["program"] - best_program.candidate_programs = candidates - best_program.total_calls = total_calls if self.track_stats: - best_program.results_best = results_best - best_program.results_latest = results_latest + best_predictors = list(evaluated_candidates[id(predictor)].values()) + best_predictors.sort(key=lambda x: x['score'], reverse=True) + + scores = [x['score'] for x in best_predictors][:10] + results_best[id(predictor)]["depth"].append(d) + results_best[id(predictor)]["max"].append(max(scores)) + results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) + results_best[id(predictor)]["min"].append(min(scores)) + results_best[id(predictor)]["std"].append(np.std(scores)) + + # if verbose: print(f"candidates: {candidates}") + candidates.sort(key=lambda x: x['score'], reverse=True) + + candidates = self._drop_duplicates(candidates) + + best_program = candidates[0]["program"] + best_program.candidate_programs = candidates + best_program.total_calls = total_calls + if self.track_stats: + best_program.results_best = results_best + best_program.results_latest = results_latest - return best_program \ No newline at end of file + return best_program \ No newline at end of file From 695cefcee02e0d8d2c29b8a0b43634efced6385a Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Fri, 8 Mar 2024 00:19:13 -0800 Subject: [PATCH 164/243] make num_trials a required variable, plus minor changes to notebook --- dspy/teleprompt/mipro_optimizer.py | 4 +- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 61 ++++++++------------ 2 files changed, 26 insertions(+), 39 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 3125c562ca..c9045ff79f 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -103,7 +103,7 @@ class DatasetDescriptorWithPriorObservations(dspy.Signature): observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add") class MIPRO(Teleprompter): - def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, num_candidates=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): + def __init__(self, metric, prompt_model=None, task_model=None, teacher_settings={}, num_candidates=10, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): self.num_candidates = num_candidates self.metric = metric self.init_temperature = init_temperature @@ -275,7 +275,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo return candidates, evaluated_candidates - def compile(self, student, *, trainset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True, num_trials=None): + def compile(self, student, *, trainset, num_trials, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True): # Define ANSI escape codes for colors YELLOW = '\033[93m' BLUE = '\033[94m' diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index e50831c930..964c6329c7 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -66,7 +66,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -74,7 +74,16 @@ "id": "JpijP_d7qZH2", "outputId": "422dc4d0-4574-4e4b-935a-c7b4c875472f" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -117,7 +126,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 11, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -175,7 +184,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 12, "metadata": { "id": "UHWzGRVgqZH2" }, @@ -219,7 +228,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 13, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -420,7 +429,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 14, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -433,14 +442,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 0 / 2 (0.0): 0%| | 1/500 [00:00<03:17, 2.52it/s]" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Average Metric: 80 / 500 (16.0): 100%|██████████| 500/500 [00:33<00:00, 15.10it/s]\n", + "Average Metric: 80 / 500 (16.0): 100%|██████████| 500/500 [00:30<00:00, 16.16it/s]\n", "/lfs/0/kristaoo/dspy/dspy/evaluate/evaluate.py:187: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", " df = df.applymap(truncate_cell)\n" ] @@ -456,7 +458,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 107 / 500 (21.4): 100%|██████████| 500/500 [00:32<00:00, 15.31it/s]" + "Average Metric: 107 / 500 (21.4): 100%|██████████| 500/500 [00:30<00:00, 16.50it/s]\n" ] }, { @@ -465,13 +467,6 @@ "text": [ "Average Metric: 107 / 500 (21.4%)\n" ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] } ], "source": [ @@ -515,7 +510,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 15, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -523,15 +518,7 @@ "id": "TpFW7IaYqZH3", "outputId": "b83c427f-953d-4fa7-b19c-03704369ab53" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/lfs/0/kristaoo/dspy/examples/qa/hotpot/MIPRO_notebook_cache/compiler\n" - ] - } - ], + "outputs": [], "source": [ "import cloudpickle as pickle\n", "from dspy.teleprompt import MIPRO\n", @@ -569,7 +556,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 16, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -582,7 +569,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 192 / 500 (38.4): 100%|██████████| 500/500 [00:30<00:00, 16.24it/s]\n" + "Average Metric: 192 / 500 (38.4): 100%|██████████| 500/500 [00:30<00:00, 16.19it/s]\n" ] }, { @@ -596,7 +583,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 193 / 500 (38.6): 100%|██████████| 500/500 [00:32<00:00, 15.53it/s]\n" + "Average Metric: 193 / 500 (38.6): 100%|██████████| 500/500 [00:30<00:00, 16.22it/s]\n" ] }, { @@ -632,7 +619,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 17, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -692,7 +679,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 18, "metadata": { "colab": { "base_uri": "https://localhost:8080/" From d2a2f8994499c110b81d71d54beadd18506732e1 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Fri, 8 Mar 2024 00:49:14 -0800 Subject: [PATCH 165/243] adding notebook cache_dir in front of dspy import --- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 3011 +++++++----------- 1 file changed, 1142 insertions(+), 1869 deletions(-) diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index 964c6329c7..d0f9e4a493 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -61,29 +61,20 @@ "id": "5Vo4Tb9srSow" }, "source": [ - "First, we will install __DSPy__ if it's not there already." + "First, we will install __DSPy__ if it's not there already. We'll also __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook. We'll also load in our pre optimized program from hugging face to inspect later." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "JpijP_d7qZH2", - "outputId": "422dc4d0-4574-4e4b-935a-c7b4c875472f" + "outputId": "c641a4b1-05f5-45cc-d715-4347c526b576" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], + "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -112,55 +103,6 @@ " !pip install -e $repo_path\n", " !pip install --upgrade cloudpickle==3.0.0\n", "\n", - "import dspy" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rbGIXWcqqZH1" - }, - "source": [ - "Then, we'll __load in the cached requests__ for this tasks, so that we don't actually need to call any LMs for this notebook. We'll also load in our pre optimized program from hugging face to inspect later." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 205, - "referenced_widgets": [ - "827d6f08a1894525937562b64df50dc6", - "c977f3fcb63349b294414c28e6bb17d3", - "9f2a85011d284134bced1a72e0f13d9c", - "435b0b18054e454eac41b3722cbe0400", - "b563cc49cf3341f8a54ab0183e8a9794", - "b2c1beb7364a43288209f5e5a6ad2514", - "e338d68fb7694b85bc573c0df83fa23e", - "6fcb23a0eb444b88a1cef94740551fe8", - "35a4261bb9494f068b4e5f2cbe9a927a", - "0cb7129e44b04ef88154a8a32b025391", - "63b53c83345841b69b06b71b830e9adf", - "aa4f1d66d377449c8cb4a73056138d50", - "54f904d1274c499ca7e714fa8cdf6d61", - "2efe34e3d66b4b64b4552d278d4a5962", - "a9c39bc2e1804ad399f1dc66d192df46", - "d04e9ed9ccb64b17903e10e749d097a7", - "abec858558814b2b809cd318aaac1545", - "ce54414ed74441478f593ae366ff16b4", - "fcd8aef3637c4449bef71809764642c2", - "e716458e84c14cb7948ab6dc61f337b9", - "4c081aaabbdb448b91fead4fa73f6c0b", - "da0f46c68b0141a48c887faa0843d5ab" - ] - }, - "id": "l4Fsh7EhqZH1", - "outputId": "5fea0db6-8cb1-4485-91f7-bc1e7dfe5d4b" - }, - "outputs": [], - "source": [ "from huggingface_hub import hf_hub_download\n", "import zipfile\n", "import os\n", @@ -170,7 +112,9 @@ "compiled_program_file_path = hf_hub_download(repo_id=repo_id, filename='compiled_program.pickle')\n", "with zipfile.ZipFile(cache_file_path, 'r') as zip_ref:\n", " zip_ref.extractall(\".\")\n", - "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = f\"{os.getcwd()}/MIPRO_notebook_cache\"" + "os.environ[\"DSP_NOTEBOOK_CACHEDIR\"] = f\"{os.getcwd()}/MIPRO_notebook_cache\"\n", + "\n", + "import dspy" ] }, { @@ -184,14 +128,15 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "metadata": { "id": "UHWzGRVgqZH2" }, "outputs": [], "source": [ + "import openai\n", + "\n", "### NOTE: if you'd like to run this code without a cache, you can remove these lines to configure your OPEN AI key ###\n", - "# import openai\n", "# os.environ['OPENAI_API_KEY'] = \"TODO: ADD YOUR OPEN AI KEY HERE\"\n", "# openai.api_key = os.environ.get('OPENAI_API_KEY')\n", "# openai.api_base = \"https://api.openai.com/v1\"\n", @@ -228,115 +173,115 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 340, "referenced_widgets": [ - "fb0bb0cd51d24f7a9ea4bc5acdab0b6f", - "4cd600dc92fa4dcbb1f6878dfc951a84", - "2d9675dfff7244e3a2c4273e6beac742", - "ebb8d03b2088443988427c674f2ebd08", - "bb649fd497164542865182285391c1cb", - "0ce5aafe3feb4e30b154e8feee246680", - "292a903dd61540beb28f0dcc33e60173", - "4c10718ba99243a795348fffc0317d89", - "47ba7929b5a3483bad6dba45b3839d28", - "d50c0b7e0f35439c938bee5d2159ff82", - "c8da2fa4b14e43948824f527e9665354", - "02b66439b0d3463c835f54ee43a81b29", - "757ec3ba99bc49b78386be420b315738", - "14f1e99c941349ceabc1d360deb1a14d", - "d601031bf4744c849885f7becf1e5f76", - "2ac62fd2e5d64e44a69551d27913caf8", - "a6a9ecf3b2d74a1ab78a904f76b462c7", - "0fcf2dc801ac414ca7ff9db0790e322c", - "b2b11f62717f482a8c7d28dfef7282e0", - "ffda68bae3e34d39abce4d2064b88617", - "0083f3cd53b742a6916f9131bffc92ab", - "85c4f1c241054d989d893130ca7ec595", - "ef2f0db8dc4148daae63001d4c6a555b", - "2f24b6572cb44cbaac147b28a4490582", - "83926bd6a50441959fdc945808cbea61", - "a93f7fced2444026bed86ff3ac73ab46", - "b7d6568fa80446e1a9ca2f9102b47f7f", - "f66e4271b9544b83b598700cc97593e9", - "530d6c38c848425082f506709fecb5f7", - "e035507b6408448f87ca0fee35bc1bb1", - "42d3de61644547ad80e585629fd97c3d", - "978d5cab723f4f02bcadb71b099840a8", - "05f74ff1c66d41858e4436707089ce10", - "67dd858331754b7a944bd9cb725c5409", - "9b318e36701f43a38bf49db28f1f82d6", - "5859289715b94e09b45bb2215fd8f663", - "4ffa164e576e43788118a2574ae6089b", - "c066cdb020ff4d0f925def7eb9373228", - "48b4d8350a074defb044eeaa9586f9c0", - "b8924c8accbc415e8e28bc232d063003", - "a2b761c3ce72458a8ef3d47903d42828", - "6bd216ee356e4853ac52c81f5632834d", - "053a5c1ad4cd47548a8a358dc8834c50", - "338ffb9834b44e0b9a9a353ede75f9d6", - "3ec77a173a5345fcb06827a87b4ddf38", - "c47e844caea446e1a452426ed899fe36", - "060cbe0e37dc422591916d3768066224", - "e68714c12f8543c9b14360f8f1d37604", - "bfdbc53eb3d34077bbfa0f8806733704", - "7038244ae993489eb76422d4f6135e58", - "76a9b3ae6fdb4ee2a228966556287752", - "13ee127ea31847b9b3d5f6a8a0b10cfa", - "f99a2f9616c744578b88fb1322674c32", - "43d07caba731495f9fe22a7ef81ef19a", - "2532bb223c2d4dbeb2cfeffbfdeac65b", - "cd15509869ed4a518b6d4d48b932e314", - "2ab35b29510c422ca4f07bfad4339419", - "bba71ee322f548439853954f1c055977", - "cb6ef0a279774d1f81cf2f4efb3b3c04", - "eca4539630b04bb2b927d780ef4ca849", - "bdd29de7feba4156823b10124217ef54", - "63742b8642ae4a94999bd5cdb3fa7da2", - "6aec8a01051743acab8de920672cf38f", - "0576fd9cffb4489fa07c4af347ada10e", - "e8e84a2a7343409d8f81439a42e53601", - "f13ff2cfe1a6443296cd1a9fe956312e", - "371665875b63442586198c64f3f441dc", - "4dc0cc26e04741c1b9a28deb1c0f533d", - "c95c3c713b9c4e7b95ab273b17c20788", - "176abb1c80904828965774b9c5ed2558", - "10cf746afb1b4c748ce5fda0dd026ab9", - "67d3a00c8e9e4e9a81a039e8b05c1200", - "b27566e9133a493bbba2af2f1cd1118c", - "ebb50f6701c146efafef65ab75cb5901", - "8dbde39ea3c144268b2547c55863c929", - "70c258c2634b431cb283420f86675818", - "88f9b45c9508415fb29fe786768be085", - "1f5f6e14543a4fe48f90459afe074739", - "823a30d9f3f34793b8eb8b79899eaf19", - "bb56dc209d8646ec84b3d194761c1e57", - "30d9db28931442a0b15ef29792e0a6a9", - "cbe4ee8e8d5646b98f4ff470952384da", - "52747a49054d454e8f899fc832985877", - "1af61e78281347429396163cdae419ed", - "f30e71e4d77c43d7b8b45ff9b3905d51", - "90a8e892d747436aa02bce59454b117d", - "ad62117365344dfbbed1fd46ae8238e6", - "a2454d2f78b34a279e8cad724200d5eb", - "ae83b2e7628e4345896eba886b563bc3", - "560375a7cc364b65be6f72db30139ce3", - "8c0bf6da280e4db88b24b2e1028f9841", - "44ed3f5af4234059b806dcc26abb99c7", - "dd09c11a454b4edb81f18e018dc917ca", - "56ac79bb987146c7ba76f1bd0315360f", - "e2e51fbcedad401ba7d79ac45258a3df", - "97e73f6600c641fd9e724d992a8b8d6d", - "68d13e7ab60245f9910806508ae08cc0", - "571bcef58c3440ce9aa9cb4f441ec97a", - "d2f1105f250542649c0b6cb4c27d75df" + "e16044d880174c49af557bd12789493f", + "531c380db44a404ebb168648ea77c3f4", + "e8a46c6ac8a54fdbb44b2c8914552e64", + "81cbe1842465400cba02a46309d98064", + "f8ada809ecdb417ebc8214d860dd6552", + "74d58314b1c449e0b7dec3bda8b653c2", + "8b7b2b9489ae49049befae848d0fb1a1", + "2d47a6a66054438d8e9a3c5e5767c056", + "3b57d0a9768f4befb7509581289035ad", + "134fa83980e142bf82d5b97cfc10da69", + "35e4e99036894a2ba37d9ba23581a8ff", + "835a1e675186490692e336da943d635d", + "95bcef02eb794979b7873b81021e4b40", + "d84324c4c3dc40fea04cc0df1539b4d8", + "a6213bcdbcb24b1694204e6baeb763d8", + "bc423d0878154adf940062660a8315ad", + "0a1744c363f640b5b8939f32f6e72922", + "bbd2db64988147f1a4f8856d890bb4c7", + "ba2b4d7ecdbc4512acf180d8a0d602e8", + "1bb155669e2a441d8992030e8aaa84a9", + "43ed7af1d9c84ac8a6ec2195e48e60eb", + "ac6822762e6b46bd862d6f923aeb4437", + "348bb4fff900492cba8333156626a947", + "697eaeb1a004493a9260a3a89671a9ca", + "8a64950ea896468da3d10f46e9718ec8", + "7b0b45020d0f45288e2f0e4ceff22524", + "ad21e20d1f1b4b6ea74fdab03af8acdb", + "1aab1d48c6124526bee6c74892ecd953", + "598c6f37331e448889b175393a00deff", + "20d8df0ee0a4442582686846757bcc7f", + "fa4e50aaf53d4edfb9ca3046cbada2db", + "96ad4454539145aea6549995344160e3", + "acc3573e48f14dffb635f8632c6f6e74", + "6c0432c6cd8f4cae9aeb8c2870b39922", + "99b8f3882032404990f2b453111a63ba", + "6c21df0657ce486381ba2310c7fa0029", + "a20ac344a32340c785cd162fd0eb55b5", + "6ba50c3ec9e542d5a8d2f900fbcb8689", + "a298045042014c88939a9fe785fccda8", + "84bed63c6597486ca6c39b9c0c4d20bd", + "2a75d45acfb44463af974acb7a1b0d8e", + "ae814b8e55454e0aadc48f7e595575ee", + "ff4bcec9c18e4f04b1f7898daaffeb1a", + "e9c64a790d684c9eaf7f49eea003f43c", + "52f1714412df4ec78f6ff7d0f8a69862", + "988055b44cf9411e8d45d8a4185fef07", + "59be08d44c824d76b8525df14191d569", + "6fde64f36dd84d8eae670efe95e2313a", + "66e6502a463145daa440e967d8bdfdca", + "29cab51fec0b4dacaa8f829ff217c839", + "6ecbf23579324112981d4bce0c0ce369", + "a105ef4f1f754c44b7bc0ec8edf0c0cc", + "1c7d71e42c8c494f867b30d781beb681", + "2f1e652cfa514054abfda794bdfa61a5", + "b17f4731e8e44858889114c52b8f3dd4", + "d50f5eafe90c4684bbdd96569b8ff247", + "9f07fa213de247dabcd3f4213d35a97e", + "782af098d37d4543a4a01ecfed4e5da2", + "037b10adf9724e058c6468f4ca74ed86", + "c7d10443100e49d28266aef9f644ed47", + "d6a78aabcca74314a5b4bb98f350693a", + "954f3cfcb5d44dddbf1d4db9e99c0d70", + "62d8196169bb4a76a94c16d6f1ca61a6", + "0a16c7f37b9a4bbe9bfe7e737ea62801", + "942897aa22214745adee56d2c54447e9", + "0b49910850f3445abe63b6bc7ac18412", + "12a9763cd97742f4ab80c0494a398ca6", + "9d4f14862b704d9bb53d9e74365d14ac", + "56989c76e1534cfd8c9b0da93b3b8bf8", + "f120c447645446e4a04791b97ce9ae93", + "6a5d5ea44c0d4e4384b8956b48c34f14", + "e3dd508496f44171b0d91aca0e8b16a5", + "dc8077859eec4261a28d708ab06a1008", + "9a733957f8fc446fbde053ba87289c71", + "287c9f993cd44039923fdc122cd9e040", + "5cfebfd6308349038f9cd7ad5bc00fe5", + "56b80fc45dd247deadceffe17557f0f9", + "7d80528c4b1c48318756230b0174923c", + "398d7d36bbd041fe81ec633e973fc504", + "d01ca0dd46ee4a4daeeee92214bc07d1", + "6028a5e08f8641f5b8e8182e50f55419", + "567646442eb940b09456328d49248945", + "41961130caaf4537a4c1793574c785d7", + "e9935b60c48d46469904492e20f9c2e8", + "f048b50740e94bd8805c142eac1673b6", + "a5effa5d67534fb4be2e3320c1fb2b9f", + "09b3f2c456af41cba9264dbb9a724027", + "e2f3a3377ad64a4cb9f3a42c1ac97344", + "8e396041a4fd4e6db02410a09b7556c7", + "4cd1dc71c80b401da19ed52ef5148d60", + "1f23a95e292249a19055f70cda2622a3", + "69412c7605ec48859baef6f31c91c520", + "ee64a46b61454949ade4177c059bcf61", + "de6507e9f45741218bf31a9af728b2bf", + "38abb8f460d24c27a66c54bb0417f8ce", + "33267de625db44c2a63d7c7d412a7e61", + "19fe27a0df5a4d39873dd1031904405c", + "dba7ce7c756d468b94d0bc8f4815ab6f", + "926cbb119ea745cf9e344c0e50b75f62" ] }, "id": "hiVgd3N7qZH3", - "outputId": "8b0ef2cf-1836-4aab-8990-62aa6240ab1f" + "outputId": "81b97d83-7c5f-4763-d104-3ad320425a2a" }, "outputs": [ { @@ -429,20 +374,27 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 4, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "MU2aHQBTqZH3", - "outputId": "32f26cb9-2e6d-48b7-9732-9f3c5b60ef7a" + "outputId": "786ba2f2-ae0a-4c68-b602-d601fb5a5aa5" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 80 / 500 (16.0): 100%|██████████| 500/500 [00:30<00:00, 16.16it/s]\n", + "Average Metric: 3 / 7 (42.9): 1%| | 6/500 [00:00<00:43, 11.48it/s]" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Average Metric: 108 / 500 (21.6): 100%|██████████| 500/500 [00:33<00:00, 14.99it/s]\n", "/lfs/0/kristaoo/dspy/dspy/evaluate/evaluate.py:187: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead.\n", " df = df.applymap(truncate_cell)\n" ] @@ -451,21 +403,21 @@ "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 80 / 500 (16.0%)\n" + "Average Metric: 108 / 500 (21.6%)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 107 / 500 (21.4): 100%|██████████| 500/500 [00:30<00:00, 16.50it/s]\n" + "Average Metric: 113 / 500 (22.6): 100%|██████████| 500/500 [00:33<00:00, 14.78it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Average Metric: 107 / 500 (21.4%)\n" + "Average Metric: 113 / 500 (22.6%)\n" ] } ], @@ -510,15 +462,19 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 5, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "TpFW7IaYqZH3", - "outputId": "b83c427f-953d-4fa7-b19c-03704369ab53" + "id": "NVfMJ_FpBlSI" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/lfs/0/kristaoo/dspy/examples/qa/hotpot/MIPRO_notebook_cache/compiler\n" + ] + } + ], "source": [ "import cloudpickle as pickle\n", "from dspy.teleprompt import MIPRO\n", @@ -556,41 +512,51 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 6, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "VvnBp7huqZH3", - "outputId": "b9752a09-1d8b-4225-cc29-21b32a831eea" + "id": "VvnBp7huqZH3" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Average Metric: 192 / 500 (38.4): 100%|██████████| 500/500 [00:30<00:00, 16.19it/s]\n" + " 0%| | 0/500 [00:00 Date: Fri, 8 Mar 2024 00:58:39 -0800 Subject: [PATCH 166/243] removing re import from notebook --- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index d0f9e4a493..3909acb7f5 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -81,7 +81,6 @@ "\n", "import sys\n", "import os\n", - "import regex as re\n", "\n", "try: # When on google Colab, let's clone the notebook so we download the cache.\n", " import google.colab\n", From 48663a2327b082b17526a8b7c0bd13714b397e98 Mon Sep 17 00:00:00 2001 From: domci Date: Fri, 8 Mar 2024 13:35:38 +0100 Subject: [PATCH 167/243] feature(dspy): added Metadata for ChromaDB retrieval --- dspy/retrieve/chromadb_rm.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index 07ef407d1d..e8a892ea19 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -145,6 +145,10 @@ def forward( query_embeddings=embeddings, n_results=k, ) - passages = [dotdict({"long_text": x}) for x in results["documents"][0]] - - return passages + zipped_results = zip( + results["ids"][0], + results["distances"][0], + results["documents"][0], + results["metadatas"][0]) + results = [dotdict({"id": id, "score": dist, "long_text": doc, "metadatas": meta }) for id, dist, doc, meta in zipped_results] + return results From 1d090ebe6c47b09f2d1f413e2ae70d788f37ca26 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Fri, 8 Mar 2024 07:43:59 -0800 Subject: [PATCH 168/243] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3f3bb18585..b98fe08072 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name="dspy-ai", - version="2.3.6", + version="2.3.7", description="DSPy", long_description=long_description, long_description_content_type='text/markdown', From 75f9d252b26ba7ae15447099aabd7468a18a9a4f Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Fri, 8 Mar 2024 07:44:14 -0800 Subject: [PATCH 169/243] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7d4c7b5633..e93f4b8238 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.3.6" +version = "2.3.7" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] From df992444d7630ec5ea6176d4ae6438122008c27f Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Fri, 8 Mar 2024 08:06:17 -0800 Subject: [PATCH 170/243] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b98fe08072..437589758c 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name="dspy-ai", - version="2.3.7", + version="2.4.0", description="DSPy", long_description=long_description, long_description_content_type='text/markdown', From f03ba08daaac369ab2e103713f50f0d71fb7bd21 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Fri, 8 Mar 2024 08:06:35 -0800 Subject: [PATCH 171/243] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e93f4b8238..689a27ec49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.3.7" +version = "2.4.0" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] From fbef3051ba24ddd3711176dab964f9e829e98c39 Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Fri, 8 Mar 2024 09:26:22 -0800 Subject: [PATCH 172/243] fix to signature optimizer - setting num_candidates --- dspy/teleprompt/signature_opt_bayesian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 5d009f86ee..b74c961511 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -39,7 +39,7 @@ class BayesianSignatureOptimizer(MIPRO): def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): print("\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") - super().__init__(prompt_model, task_model, teacher_settings,n,metric,init_temperature,verbose,track_stats,view_data_batch_size) + super().__init__(metric=metric,prompt_model=prompt_model, task_model=task_model, teacher_settings=teacher_settings,num_candidates=n,init_temperature=init_temperature,verbose=verbose,track_stats=track_stats,view_data_batch_size=view_data_batch_size) def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, optuna_trials_num, view_data=True, view_examples=True, requires_permission_to_run=False, num_trials=None): return super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=optuna_trials_num) From 0c48d8652efe23a49f2616e4f20b6b4c01bca8ff Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Fri, 8 Mar 2024 09:32:24 -0800 Subject: [PATCH 173/243] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 437589758c..db9e1f8e4f 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name="dspy-ai", - version="2.4.0", + version="2.4.1", description="DSPy", long_description=long_description, long_description_content_type='text/markdown', From 0456b51eb60d12f952466e18e27b89c847d62f35 Mon Sep 17 00:00:00 2001 From: Omar Khattab Date: Fri, 8 Mar 2024 09:33:05 -0800 Subject: [PATCH 174/243] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 689a27ec49..adc6f20368 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.4.0" +version = "2.4.1" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] From 167a13321dcf778048b93c73daa31639722ae18c Mon Sep 17 00:00:00 2001 From: klopsahlong Date: Fri, 8 Mar 2024 10:11:38 -0800 Subject: [PATCH 175/243] adding in colab badge --- examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb index 3909acb7f5..a863e0e96c 100644 --- a/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb +++ b/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb @@ -15,7 +15,8 @@ "id": "3wEDck3ZqZH0" }, "source": [ - "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy" + "# Using __Multi-stage Instruction Proposal & Optimization (MIPRO)__ in DSPy\n", + "[![colab-badge](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/stanfordnlp/dspy/blob/main/examples/qa/hotpot/hotpotqa_with_MIPRO.ipynb)" ] }, { From 5d3c2c41ae5ce8b597541c9854f5b6c54f028283 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Sat, 9 Mar 2024 00:15:16 +0530 Subject: [PATCH 176/243] Adding typed predictor docs --- docs/api/functional/dspy_cot.md | 4 - docs/docs/building-blocks/7-assertions.md | 1 + .../building-blocks/8-typed_predictors.md | 159 +++++++++++++++++- .../language_model_clients/_category_.json | 2 +- .../retrieval_models_clients/_category_.json | 2 +- .../deep-dive/teleprompter/_category_.json | 2 +- .../typed_predictors/_category_.json | 8 + 7 files changed, 167 insertions(+), 11 deletions(-) diff --git a/docs/api/functional/dspy_cot.md b/docs/api/functional/dspy_cot.md index 9b9e3bbeae..73c140f8f8 100644 --- a/docs/api/functional/dspy_cot.md +++ b/docs/api/functional/dspy_cot.md @@ -2,10 +2,6 @@ sidebar_position: 4 --- ---- -sidebar_position: 3 ---- - # dspy.cot ### Overview diff --git a/docs/docs/building-blocks/7-assertions.md b/docs/docs/building-blocks/7-assertions.md index c8b5acc060..71ad163136 100644 --- a/docs/docs/building-blocks/7-assertions.md +++ b/docs/docs/building-blocks/7-assertions.md @@ -1,4 +1,5 @@ # DSPy Assertions + ## Introduction Language models (LMs) have transformed how we interact with machine learning, offering vast capabilities in natural language understanding and generation. However, ensuring these models adhere to domain-specific constraints remains a challenge. Despite the growth of techniques like fine-tuning or “prompt engineering”, these approaches are extremely tedious and rely on heavy, manual hand-waving to guide the LMs in adhering to specific constraints. Even DSPy's modularity of programming prompting pipelines lacks mechanisms to effectively and automatically enforce these constraints. diff --git a/docs/docs/building-blocks/8-typed_predictors.md b/docs/docs/building-blocks/8-typed_predictors.md index 65032474bf..874fa29bee 100644 --- a/docs/docs/building-blocks/8-typed_predictors.md +++ b/docs/docs/building-blocks/8-typed_predictors.md @@ -1,13 +1,164 @@ # Typed Predictors -In DSPy, alongside Signatures a +In DSPy Signatures, we have `InputField` and `OutputField` that define the nature of inputs and outputs of the field. However the inputs and output to these fields is always string, you can seperately process these inputs and outputs in complicated scenarios but the inherent type is always `str`. + +Pydantic `BaseModel` is a great way to enforce type constraints on the fields, but it is not directly compatible with the `dspy.Signature`. This is where Typed Predictors come in. They are a way to enforce the type constraints on the inputs and outputs of the fields in a `dspy.Signature`. ## Executing Typed Predictors -## Chain of Thoughts with Typed Predictors +Using Typed Predictors is not too different than any other module, infact aside from adding type hints to signature attributes and using a special Predictor module instead of `dspy.Predict` there is nothing else to do. Let's take a look at a simple example to understand this. + +### Defining Input and Output Models + +Let's take an simple task as example i.e. given the `context` and `query` the LLM should give me an `answer` and `confidence_score`. The task could be modelled better but this is just for illustration purposes. Let's define our `Input` and `Output` models via pydantic. + +```python +from pydantic import BaseModel, Field + +class Input(BaseModel): + context: str = Field(..., description="The context for the question") + query: str = Field(..., description="The question to be answered") + +class Output(BaseModel): + answer: str = Field(..., description="The answer for the question") + factual_: float = Field(..., description="The confidence score for the answer") +``` + +As you can see this is where you can provide description to the attributes now. Now that we have the input and output models, let's define a simple Signature that takes in the input and returns the output. + +### Creating Typed Predictor + +A Typed Predictor needs a Typed Signature which is not any different than a normal `dspy.Signature` everything is the same except here you provide type of each field as well. + +```python +class QASignature(dspy.Signature): + """Answer the question based on the context and query provided, and on the scale of 10 tell how confident you are about the answer.""" + + input: Input = dspy.InputField() + output: Output = dspy.OutputField() +``` + +Now that we have the `QASignature`, let's define a Typed Predictor that let's use execute this Signature while conforming to the type constraints. + +```python +predictor = dspy.TypedPredictor(QASignature) +``` + +Just how we pass the Signature to other modules we pass the `QASignature` to `dspy.TypedPredictor`, where typed constraints are inforced. + +### I/O in Typed Predictors + +Now that we have the Typed Predictor let's test it out by providing some sample input to the predictor and see the output and it's type. We can create a `Input` instance and pass it to the predictor to get a dictionary of the output. + +```python +doc_query_pair = Input( + context="The quick brown fox jumps over the lazy dog", + query="What does the fox jumps over?", +) + +prediction = predictor(input=doc_query_pair) +``` + +Now that we have the prediction, we can see the output and it's type. + +```python +answer = prediction['answer'] +confidence_score = prediction['confidence_score'] + +print(f"Prediction: {prediction}\n\n") +print(f"Answer: {answer}, Answer Type: {type(answer)}") +print(f"Confidence Score: {confidence_score}, Confidence Score Type: {type(confidence_score)}") +``` + +## Typed Chain of Thoughts with `dspy.TypedChainOfThought` + +If `TypedPredictor` is the typed counterpart of `dspy.Predict` then `TypedChainOfThought` is the typed counterpart of `dspy.ChainOfThought`. It adds a Chain of Thoughts `dspy.OutputField` to the `dspy.TypedPredictor` module by prepending it to the Signature. + +```python +cot_predictor = dspy.TypedChainOfThought(QASignature) + +doc_query_pair = Input( + context="The quick brown fox jumps over the lazy dog", + query="What does the fox jumps over?", +) + +prediction = cot_predictor(input=doc_query_pair) +``` + +## Typed Predictors as Decorators + +While the `dspy.TypedPredictor` and `dspy.TypedChainOfThought` provide a convinient way to use typed predictors, you can also use their as decorators to enforce type constraints on the inputs and outputs of the function. Good thing is that you won't need to explicitly define a Signature class because it's created internally based on function arguments, outputs and docstring. + +``` +# Function name is output key + +@dspy.predictor +def qa_function(doc_query_pair: Input) -> Output: + """Answer the question based on the context and query provided, and on the scale of 10 tell how confident you are about the answer.""" + pass + +@dspy.cot +def qa_function(doc_query_pair: Input) -> Output: + """Answer the question based on the context and query provided, and on the scale of 10 tell how confident you are about the answer.""" + pass +``` + +## Composing Functional Typed Predictors in `dspy.Module` + +If you're creating DSPy pipelines via `dspy.Module` then you can simply use Functional Typed Predictors by creating these class methods and using them as decorators. Here is an example of using functional typed predictors to create a `SimplifiedBaleen` pipeline: + +```python +class SimplifiedBaleen(FunctionalModule): + def __init__(self, passages_per_hop=3, max_hops=1): + super().__init__() + self.retrieve = dspy.Retrieve(k=passages_per_hop) + self.max_hops = max_hops + + @cot + def generate_query(self, context: list[str], question) -> str: + """Write a simple search query that will help answer a complex question.""" + pass + + @cot + def generate_answer(self, context: list[str], question) -> str: + """Answer questions with short factoid answers.""" + pass + + def forward(self, question): + context = [] + + for _ in range(self.max_hops): + query = self.generate_query(context=context, question=question) + passages = self.retrieve(query).passages + context = deduplicate(context + passages) + + answer = self.generate_answer(context=context, question=question) + return dspy.Prediction(context=context, answer=answer) +``` ## Optimizing Typed Predictors -## Typed Predictors via Decorators +Typed predictors can be optimized using `optimize_signature` optimizer which optimizes the instructions of the Signature. Here is an example of using `optimize_signature` to optimize the `QASignature`: + +```python +import dspy +from dspy.evaluate import Evaluate +from dspy.evaluate.metrics import answer_exact_match +from dspy.teleprompt.signature_opt_typed import optimize_signature + +turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=4000) +gpt4 = dspy.OpenAI(model='gpt-4', max_tokens=4000) +dspy.settings.configure(lm=turbo) + +evaluator = Evaluate(devset=devset, metric=answer_exact_match, num_threads=10, display_progress=True) -## Composing Functional Typed Predictors in `dspy.Module` \ No newline at end of file +result = optimize_signature( + student=dspy.TypedPredictor(QASignature), + evaluator=evaluator, + initial_prompts=6, + n_iterations=100, + max_examples=30, + verbose=True, + prompt_model=gpt4, +) +``` \ No newline at end of file diff --git a/docs/docs/deep-dive/language_model_clients/_category_.json b/docs/docs/deep-dive/language_model_clients/_category_.json index ccd8f3122b..6fe54c326e 100644 --- a/docs/docs/deep-dive/language_model_clients/_category_.json +++ b/docs/docs/deep-dive/language_model_clients/_category_.json @@ -1,6 +1,6 @@ { "label": "Language Model Clients", - "position": 3, + "position": 5, "link": { "type": "generated-index", "description": "Language Model Clients in DSPy" diff --git a/docs/docs/deep-dive/retrieval_models_clients/_category_.json b/docs/docs/deep-dive/retrieval_models_clients/_category_.json index c2c3e449d3..4422ec12e8 100644 --- a/docs/docs/deep-dive/retrieval_models_clients/_category_.json +++ b/docs/docs/deep-dive/retrieval_models_clients/_category_.json @@ -1,6 +1,6 @@ { "label": "Retrieval Model Clients", - "position": 4, + "position": 6, "link": { "type": "generated-index", "description": "Retrieval Models in DSPy" diff --git a/docs/docs/deep-dive/teleprompter/_category_.json b/docs/docs/deep-dive/teleprompter/_category_.json index de6473d1e3..c7be633ac5 100644 --- a/docs/docs/deep-dive/teleprompter/_category_.json +++ b/docs/docs/deep-dive/teleprompter/_category_.json @@ -1,6 +1,6 @@ { "label": "Teleprompters", - "position": 5, + "position": 7, "link": { "type": "generated-index", "description": "Teleprompters are powerful optimizers (included in DSPy) that can learn to bootstrap and select effective prompts for the modules of any program. (The \"tele-\" in the name means \"at a distance\", i.e., automatic prompting at a distance.)" diff --git a/docs/docs/deep-dive/typed_predictors/_category_.json b/docs/docs/deep-dive/typed_predictors/_category_.json index e69de29bb2..2b9a4e2f0c 100644 --- a/docs/docs/deep-dive/typed_predictors/_category_.json +++ b/docs/docs/deep-dive/typed_predictors/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Typed Predictors", + "position": 4, + "link": { + "type": "generated-index", + "description": "Typed Predictors in DSPy" + } +} \ No newline at end of file From 2c9c67136627e613dab45e2c981a5d4772f8bbe3 Mon Sep 17 00:00:00 2001 From: arnavsinghvi11 <54859892+arnavsinghvi11@users.noreply.github.com> Date: Fri, 8 Mar 2024 11:03:03 -0800 Subject: [PATCH 177/243] Update 8-typed_predictors.md --- .../building-blocks/8-typed_predictors.md | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/docs/building-blocks/8-typed_predictors.md b/docs/docs/building-blocks/8-typed_predictors.md index 874fa29bee..e8c7167c19 100644 --- a/docs/docs/building-blocks/8-typed_predictors.md +++ b/docs/docs/building-blocks/8-typed_predictors.md @@ -1,16 +1,16 @@ # Typed Predictors -In DSPy Signatures, we have `InputField` and `OutputField` that define the nature of inputs and outputs of the field. However the inputs and output to these fields is always string, you can seperately process these inputs and outputs in complicated scenarios but the inherent type is always `str`. +In DSPy Signatures, we have `InputField` and `OutputField` that define the nature of inputs and outputs of the field. However, the inputs and output to these fields are always `str`-typed, which requires input and output string processing. -Pydantic `BaseModel` is a great way to enforce type constraints on the fields, but it is not directly compatible with the `dspy.Signature`. This is where Typed Predictors come in. They are a way to enforce the type constraints on the inputs and outputs of the fields in a `dspy.Signature`. +Pydantic `BaseModel` is a great way to enforce type constraints on the fields, but it is not directly compatible with the `dspy.Signature`. Typed Predictors resolves this as a way to enforce the type constraints on the inputs and outputs of the fields in a `dspy.Signature`. ## Executing Typed Predictors -Using Typed Predictors is not too different than any other module, infact aside from adding type hints to signature attributes and using a special Predictor module instead of `dspy.Predict` there is nothing else to do. Let's take a look at a simple example to understand this. +Using Typed Predictors is not too different than any other module with the minor additions of type hints to signature attributes and using a special Predictor module instead of `dspy.Predict`. Let's take a look at a simple example to understand this. ### Defining Input and Output Models -Let's take an simple task as example i.e. given the `context` and `query` the LLM should give me an `answer` and `confidence_score`. The task could be modelled better but this is just for illustration purposes. Let's define our `Input` and `Output` models via pydantic. +Let's take a simple task as an example i.e. given the `context` and `query`, the LLM should return an `answer` and `confidence_score`. Let's define our `Input` and `Output` models via pydantic. ```python from pydantic import BaseModel, Field @@ -24,11 +24,11 @@ class Output(BaseModel): factual_: float = Field(..., description="The confidence score for the answer") ``` -As you can see this is where you can provide description to the attributes now. Now that we have the input and output models, let's define a simple Signature that takes in the input and returns the output. +As you can see, we can describe the attributes by defining a simple Signature that takes in the input and returns the output. ### Creating Typed Predictor -A Typed Predictor needs a Typed Signature which is not any different than a normal `dspy.Signature` everything is the same except here you provide type of each field as well. +A Typed Predictor needs a Typed Signature, which extends a `dspy.Signature` with the addition of specifying "field type". ```python class QASignature(dspy.Signature): @@ -38,17 +38,17 @@ class QASignature(dspy.Signature): output: Output = dspy.OutputField() ``` -Now that we have the `QASignature`, let's define a Typed Predictor that let's use execute this Signature while conforming to the type constraints. +Now that we have the `QASignature`, let's define a Typed Predictor that executes this Signature while conforming to the type constraints. ```python predictor = dspy.TypedPredictor(QASignature) ``` -Just how we pass the Signature to other modules we pass the `QASignature` to `dspy.TypedPredictor`, where typed constraints are inforced. +Similar to other modules, we pass the `QASignature` to `dspy.TypedPredictor` which enforces the typed constraints. ### I/O in Typed Predictors -Now that we have the Typed Predictor let's test it out by providing some sample input to the predictor and see the output and it's type. We can create a `Input` instance and pass it to the predictor to get a dictionary of the output. +Now let's test out the Typed Predictor by providing some sample input to the predictor and verifying the output type. We can create an `Input` instance and pass it to the predictor to get a dictionary of the output. ```python doc_query_pair = Input( @@ -59,7 +59,7 @@ doc_query_pair = Input( prediction = predictor(input=doc_query_pair) ``` -Now that we have the prediction, we can see the output and it's type. +Let's see the output and its type. ```python answer = prediction['answer'] @@ -72,7 +72,7 @@ print(f"Confidence Score: {confidence_score}, Confidence Score Type: {type(confi ## Typed Chain of Thoughts with `dspy.TypedChainOfThought` -If `TypedPredictor` is the typed counterpart of `dspy.Predict` then `TypedChainOfThought` is the typed counterpart of `dspy.ChainOfThought`. It adds a Chain of Thoughts `dspy.OutputField` to the `dspy.TypedPredictor` module by prepending it to the Signature. +Extending the analogous comparison of `TypedPredictor` to `dspy.Predict`, we create `TypedChainOfThought`, the typed counterpart of `dspy.ChainOfThought`: ```python cot_predictor = dspy.TypedChainOfThought(QASignature) @@ -87,7 +87,7 @@ prediction = cot_predictor(input=doc_query_pair) ## Typed Predictors as Decorators -While the `dspy.TypedPredictor` and `dspy.TypedChainOfThought` provide a convinient way to use typed predictors, you can also use their as decorators to enforce type constraints on the inputs and outputs of the function. Good thing is that you won't need to explicitly define a Signature class because it's created internally based on function arguments, outputs and docstring. +While the `dspy.TypedPredictor` and `dspy.TypedChainOfThought` provide a convenient way to use typed predictors, you can also use them as decorators to enforce type constraints on the inputs and outputs of the function. This relies on the internal definitions of the Signature class and its function arguments, outputs, and docstrings. ``` # Function name is output key @@ -105,7 +105,7 @@ def qa_function(doc_query_pair: Input) -> Output: ## Composing Functional Typed Predictors in `dspy.Module` -If you're creating DSPy pipelines via `dspy.Module` then you can simply use Functional Typed Predictors by creating these class methods and using them as decorators. Here is an example of using functional typed predictors to create a `SimplifiedBaleen` pipeline: +If you're creating DSPy pipelines via `dspy.Module`, then you can simply use Functional Typed Predictors by creating these class methods and using them as decorators. Here is an example of using functional typed predictors to create a `SimplifiedBaleen` pipeline: ```python class SimplifiedBaleen(FunctionalModule): @@ -138,7 +138,7 @@ class SimplifiedBaleen(FunctionalModule): ## Optimizing Typed Predictors -Typed predictors can be optimized using `optimize_signature` optimizer which optimizes the instructions of the Signature. Here is an example of using `optimize_signature` to optimize the `QASignature`: +Typed predictors can be optimized on the Signature instructions through the `optimize_signature` optimizer. Here is an example of this optimization on the `QASignature`: ```python import dspy @@ -161,4 +161,4 @@ result = optimize_signature( verbose=True, prompt_model=gpt4, ) -``` \ No newline at end of file +``` From e72ef1bc2e56058b348621602307dc3149d72424 Mon Sep 17 00:00:00 2001 From: dominik Date: Sat, 9 Mar 2024 13:15:37 +0100 Subject: [PATCH 178/243] =?UTF-8?q?=F0=9F=93=9D=20(ChromadbRM.md):=20Updat?= =?UTF-8?q?e=20the=20return=20type=20description=20in=20the=20documentatio?= =?UTF-8?q?n=20The=20return=20type=20description=20for=20the=20search=20fu?= =?UTF-8?q?nction=20in=20the=20ChromadbRM=20documentation=20was=20updated?= =?UTF-8?q?=20to=20provide=20more=20detailed=20information=20about=20the?= =?UTF-8?q?=20structure=20of=20the=20returned=20object.=20This=20change=20?= =?UTF-8?q?will=20help=20developers=20understand=20the=20structure=20of=20?= =?UTF-8?q?the=20returned=20object=20and=20how=20to=20use=20it=20in=20thei?= =?UTF-8?q?r=20code.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/api/retrieval_model_clients/ChromadbRM.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/retrieval_model_clients/ChromadbRM.md b/docs/api/retrieval_model_clients/ChromadbRM.md index 42f6f42e7f..bbd1aaafd8 100644 --- a/docs/api/retrieval_model_clients/ChromadbRM.md +++ b/docs/api/retrieval_model_clients/ChromadbRM.md @@ -34,7 +34,7 @@ Search the chromadb collection for the top `k` passages matching the given query - `k` (_Optional[int]_, _optional_): The number of results to retrieve. If not specified, defaults to the value set during initialization. **Returns:** -- `dspy.Prediction`: Contains the retrieved passages, each represented as a `dotdict` with a `long_text` attribute. +- `dspy.Prediction`: Contains the retrieved passages, each represented as a `dotdict` with schema `[{"id": str, "score": float, "long_text": str, "metadatas": dict }]` ### Quickstart with OpenAI Embeddings From 7b5e0d31f51d395f04bcae26d88934c413d761a4 Mon Sep 17 00:00:00 2001 From: Matthew Billman Date: Sat, 9 Mar 2024 11:53:18 -0500 Subject: [PATCH 179/243] hf models can use auth tokens now (#611) --- .pre-commit-config.yaml | 34 +- dsp/modules/hf.py | 76 ++- poetry.lock | 876 +++++++++++++++++++++++++++------ pyproject.toml | 5 +- tests/modules/test_hf_model.py | 31 ++ 5 files changed, 848 insertions(+), 174 deletions(-) create mode 100644 tests/modules/test_hf_model.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b78b22a8bb..ff32437647 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,15 +5,15 @@ default_stages: [commit] default_install_hook_types: [pre-commit, commit-msg] repos: - - repo: https://github.com/astral-sh/ruff-pre-commit - # Ruff version. - rev: v0.1.11 - hooks: - # Run the linter. - - id: ruff - args: [--fix] - # Run the formatter. - - id: ruff-format + # - repo: https://github.com/astral-sh/ruff-pre-commit + # # Ruff version. + # rev: v0.1.11 + # hooks: + # # Run the linter. + # - id: ruff + # args: [--fix] + # # Run the formatter. + # - id: ruff-format - repo: https://github.com/timothycrosley/isort rev: 5.12.0 @@ -50,14 +50,14 @@ repos: args: - "--autofix" - "--indent=2" - - repo: local - hooks: - - id: validate-commit-msg - name: Commit Message is Valid - language: pygrep - entry: ^(break|build|ci|docs|feat|fix|perf|refactor|style|test|ops|hotfix|release|maint|init|enh|revert)\([\w,\.,\-,\(,\),\/]+\)(!?)(:)\s{1}([\w,\W,:]+) - stages: [commit-msg] - args: [--negate] + # - repo: local + # hooks: + # - id: validate-commit-msg + # name: Commit Message is Valid + # language: pygrep + # entry: ^(break|build|ci|docs|feat|fix|perf|refactor|style|test|ops|hotfix|release|maint|init|enh|revert)\([\w,\.,\-,\(,\),\/]+\)(!?)(:)\s{1}([\w,\W,:]+) + # stages: [commit-msg] + # args: [--negate] - repo: https://github.com/pre-commit/mirrors-prettier rev: v3.0.3 diff --git a/dsp/modules/hf.py b/dsp/modules/hf.py index aad0c0e36c..7407b3f47e 100644 --- a/dsp/modules/hf.py +++ b/dsp/modules/hf.py @@ -1,11 +1,13 @@ # from peft import PeftConfig, PeftModel # from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM, AutoTokenizer, AutoConfig +import os from typing import Literal, Optional from dsp.modules.lm import LM # from dsp.modules.finetuning.finetune_hf import preprocess_prompt + def openai_to_hf(**kwargs): hf_kwargs = {} for k, v in kwargs.items(): @@ -26,8 +28,19 @@ def openai_to_hf(**kwargs): class HFModel(LM): - def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool = False, - hf_device_map: Literal["auto", "balanced", "balanced_low_0", "sequential"] = "auto"): + def __init__( + self, + model: str, + checkpoint: Optional[str] = None, + is_client: bool = False, + hf_device_map: Literal[ + "auto", + "balanced", + "balanced_low_0", + "sequential", + ] = "auto", + token: Optional[str] = None, + ): """wrapper for Hugging Face models Args: @@ -42,6 +55,10 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool self.provider = "hf" self.is_client = is_client self.device_map = hf_device_map + + hf_autoconfig_kwargs = dict(token=token or os.environ.get("HF_TOKEN")) + hf_autotokenizer_kwargs = hf_autoconfig_kwargs.copy() + hf_automodel_kwargs = hf_autoconfig_kwargs.copy() if not self.is_client: try: import torch @@ -52,40 +69,68 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool ) from exc self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") try: - architecture = AutoConfig.from_pretrained(model).__dict__["architectures"][0] - self.encoder_decoder_model = ("ConditionalGeneration" in architecture) or ("T5WithLMHeadModel" in architecture) + architecture = AutoConfig.from_pretrained( + model, + **hf_autoconfig_kwargs, + ).__dict__["architectures"][0] + self.encoder_decoder_model = ("ConditionalGeneration" in architecture) or ( + "T5WithLMHeadModel" in architecture + ) self.decoder_only_model = ("CausalLM" in architecture) or ("GPT2LMHeadModel" in architecture) - assert self.encoder_decoder_model or self.decoder_only_model, f"Unknown HuggingFace model class: {model}" - self.tokenizer = AutoTokenizer.from_pretrained(model if checkpoint is None else checkpoint) + assert ( + self.encoder_decoder_model or self.decoder_only_model + ), f"Unknown HuggingFace model class: {model}" + self.tokenizer = AutoTokenizer.from_pretrained( + model if checkpoint is None else checkpoint, + **hf_autotokenizer_kwargs, + ) self.rationale = True AutoModelClass = AutoModelForSeq2SeqLM if self.encoder_decoder_model else AutoModelForCausalLM if checkpoint: # with open(os.path.join(checkpoint, '..', 'compiler_config.json'), 'r') as f: # config = json.load(f) - self.rationale = False #config['rationale'] + self.rationale = False # config['rationale'] # if config['peft']: # peft_config = PeftConfig.from_pretrained(checkpoint) # self.model = AutoModelClass.from_pretrained(peft_config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map=hf_device_map) # self.model = PeftModel.from_pretrained(self.model, checkpoint) # else: if self.device_map: - self.model = AutoModelClass.from_pretrained(checkpoint, device_map=self.device_map) + self.model = AutoModelClass.from_pretrained( + checkpoint, + device_map=self.device_map, + **hf_automodel_kwargs, + ) else: - self.model = AutoModelClass.from_pretrained(checkpoint).to(self.device) + self.model = AutoModelClass.from_pretrained( + checkpoint, + **hf_automodel_kwargs, + ).to(self.device) else: if self.device_map: - self.model = AutoModelClass.from_pretrained(model, device_map=self.device_map) + self.model = AutoModelClass.from_pretrained( + model, + device_map=self.device_map, + **hf_automodel_kwargs, + ) else: - self.model = AutoModelClass.from_pretrained(model).to(self.device) + self.model = AutoModelClass.from_pretrained( + model, + **hf_automodel_kwargs, + ).to(self.device) self.drop_prompt_from_output = False except ValueError: self.model = AutoModelForCausalLM.from_pretrained( model if checkpoint is None else checkpoint, device_map=self.device_map, + **hf_automodel_kwargs, ) self.drop_prompt_from_output = True - self.tokenizer = AutoTokenizer.from_pretrained(model) + self.tokenizer = AutoTokenizer.from_pretrained( + model, + **hf_autotokenizer_kwargs, + ) self.drop_prompt_from_output = True self.history = [] @@ -111,7 +156,7 @@ def _generate(self, prompt, **kwargs): # print(prompt) if isinstance(prompt, dict): try: - prompt = prompt['messages'][0]['content'] + prompt = prompt["messages"][0]["content"] except (KeyError, IndexError, TypeError): print("Failed to extract 'content' from the prompt.") inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device) @@ -121,10 +166,7 @@ def _generate(self, prompt, **kwargs): if self.drop_prompt_from_output: input_length = inputs.input_ids.shape[1] outputs = outputs[:, input_length:] - completions = [ - {"text": c} - for c in self.tokenizer.batch_decode(outputs, skip_special_tokens=True) - ] + completions = [{"text": c} for c in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)] response = { "prompt": prompt, "choices": completions, diff --git a/poetry.lock b/poetry.lock index 136e7473b5..236ee58290 100644 --- a/poetry.lock +++ b/poetry.lock @@ -364,6 +364,52 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "black" +version = "24.2.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-24.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29"}, + {file = "black-24.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430"}, + {file = "black-24.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f"}, + {file = "black-24.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a"}, + {file = "black-24.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd"}, + {file = "black-24.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2"}, + {file = "black-24.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92"}, + {file = "black-24.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23"}, + {file = "black-24.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b"}, + {file = "black-24.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9"}, + {file = "black-24.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693"}, + {file = "black-24.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982"}, + {file = "black-24.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4"}, + {file = "black-24.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218"}, + {file = "black-24.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0"}, + {file = "black-24.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"}, + {file = "black-24.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8"}, + {file = "black-24.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8"}, + {file = "black-24.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540"}, + {file = "black-24.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31"}, + {file = "black-24.2.0-py3-none-any.whl", hash = "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6"}, + {file = "black-24.2.0.tar.gz", hash = "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + [[package]] name = "cachetools" version = "5.3.3" @@ -951,20 +997,21 @@ all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)" [[package]] name = "fastembed" -version = "0.1.1" +version = "0.1.3" description = "Fast, light, accurate library built for retrieval embedding generation" optional = true python-versions = ">=3.8.0,<3.12" files = [ - {file = "fastembed-0.1.1-py3-none-any.whl", hash = "sha256:131413ae52cd72f4c8cced7a675f8269dbfd1a852abade3c815e265114bcc05a"}, - {file = "fastembed-0.1.1.tar.gz", hash = "sha256:f7e524ee4f74bb8aad16be5b687d1f77f608d40e96e292c87881dc36baf8f4c7"}, + {file = "fastembed-0.1.3-py3-none-any.whl", hash = "sha256:98b6c6d9effec8c96d97048e59cdd53627b16a70fcdbfa7c663772de66e11b3a"}, + {file = "fastembed-0.1.3.tar.gz", hash = "sha256:c17dc83a02938f8baae6717f18d24ed0ff0c5397b5329cbb5c7264239411346f"}, ] [package.dependencies] +huggingface-hub = "0.19.4" onnx = ">=1.11,<2.0" onnxruntime = ">=1.15,<2.0" requests = ">=2.31,<3.0" -tokenizers = ">=0.13,<0.14" +tokenizers = ">=0.15.0,<0.16.0" tqdm = ">=4.65,<5.0" [[package]] @@ -999,13 +1046,13 @@ typing = ["typing-extensions (>=4.8)"] [[package]] name = "flatbuffers" -version = "23.5.26" +version = "24.3.7" description = "The FlatBuffers serialization format for Python" optional = true python-versions = "*" files = [ - {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, - {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, + {file = "flatbuffers-24.3.7-py2.py3-none-any.whl", hash = "sha256:80c4f5dcad0ee76b7e349671a0d657f2fbba927a0244f88dd3f5ed6a3694e1fc"}, + {file = "flatbuffers-24.3.7.tar.gz", hash = "sha256:0895c22b9a6019ff2f4de2e5e2f7cd15914043e6e7033a94c0c6369422690f22"}, ] [[package]] @@ -1558,13 +1605,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.21.3" +version = "0.19.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.21.3-py3-none-any.whl", hash = "sha256:b183144336fdf2810a8c109822e0bb6ef1fd61c65da6fb60e8c3f658b7144016"}, - {file = "huggingface_hub-0.21.3.tar.gz", hash = "sha256:26a15b604e4fc7bad37c467b76456543ec849386cbca9cd7e1e135f53e500423"}, + {file = "huggingface_hub-0.19.4-py3-none-any.whl", hash = "sha256:dba013f779da16f14b606492828f3760600a1e1801432d09fe1c33e50b825bb5"}, + {file = "huggingface_hub-0.19.4.tar.gz", hash = "sha256:176a4fc355a851c17550e7619488f383189727eab209534d7cef2114dae77b22"}, ] [package.dependencies] @@ -1577,16 +1624,16 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)", "watchdog"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["safetensors", "torch"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -1638,32 +1685,32 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.0.1" +version = "7.0.2" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, - {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, + {file = "importlib_metadata-7.0.2-py3-none-any.whl", hash = "sha256:f4bc4c0c070c490abf4ce96d715f68e95923320370efb66143df00199bb6c100"}, + {file = "importlib_metadata-7.0.2.tar.gz", hash = "sha256:198f568f3230878cb1b44fbd7975f87906c22336dba2e4a7f05278c281fbd792"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "importlib-resources" -version = "6.1.2" +version = "6.1.3" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.1.2-py3-none-any.whl", hash = "sha256:9a0a862501dc38b68adebc82970140c9e4209fc99601782925178f8386339938"}, - {file = "importlib_resources-6.1.2.tar.gz", hash = "sha256:308abf8474e2dba5f867d279237cd4076482c3de7104a40b41426370e891549b"}, + {file = "importlib_resources-6.1.3-py3-none-any.whl", hash = "sha256:4c0269e3580fe2634d364b39b38b961540a7738c02cb984e98add8b4221d793d"}, + {file = "importlib_resources-6.1.3.tar.gz", hash = "sha256:56fb4525197b78544a3354ea27793952ab93f935bb4bf746b846bb1015020f2b"}, ] [package.dependencies] @@ -1671,7 +1718,7 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] +testing = ["jaraco.collections", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] [[package]] name = "iniconfig" @@ -2279,13 +2326,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.12" +version = "9.5.13" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.12-py3-none-any.whl", hash = "sha256:d6f0c269f015e48c76291cdc79efb70f7b33bbbf42d649cfe475522ebee61b1f"}, - {file = "mkdocs_material-9.5.12.tar.gz", hash = "sha256:5f69cef6a8aaa4050b812f72b1094fda3d079b9a51cf27a247244c03ec455e97"}, + {file = "mkdocs_material-9.5.13-py3-none-any.whl", hash = "sha256:5cbe17fee4e3b4980c8420a04cc762d8dc052ef1e10532abd4fce88e5ea9ce6a"}, + {file = "mkdocs_material-9.5.13.tar.gz", hash = "sha256:d8e4caae576312a88fd2609b81cf43d233cdbe36860d67a68702b018b425bd87"}, ] [package.dependencies] @@ -2376,7 +2423,7 @@ files = [ name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -optional = true +optional = false python-versions = "*" files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, @@ -2512,6 +2559,17 @@ files = [ [package.dependencies] dill = ">=0.3.8" +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + [[package]] name = "myst-nb" version = "1.0.0" @@ -2620,6 +2678,24 @@ files = [ {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, ] +[[package]] +name = "networkx" +version = "3.2.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.9" +files = [ + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, +] + +[package.extras] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + [[package]] name = "numpy" version = "1.26.4" @@ -2665,6 +2741,147 @@ files = [ {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.19.3" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.4.99" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c6428836d20fe7e327191c175791d38570e10762edc588fb46749217cd444c74"}, + {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-win_amd64.whl", hash = "sha256:991905ffa2144cb603d8ca7962d75c35334ae82bf92820b6ba78157277da1ad2"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + [[package]] name = "oauthlib" version = "3.2.2" @@ -3281,47 +3498,47 @@ files = [ [[package]] name = "pyarrow" -version = "15.0.0" +version = "15.0.1" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"}, - {file = "pyarrow-15.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e"}, - {file = "pyarrow-15.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3"}, - {file = "pyarrow-15.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5"}, - {file = "pyarrow-15.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e"}, - {file = "pyarrow-15.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99"}, - {file = "pyarrow-15.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351"}, - {file = "pyarrow-15.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f"}, - {file = "pyarrow-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177"}, - {file = "pyarrow-15.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac"}, - {file = "pyarrow-15.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7"}, - {file = "pyarrow-15.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b"}, - {file = "pyarrow-15.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f"}, - {file = "pyarrow-15.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed"}, - {file = "pyarrow-15.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6"}, - {file = "pyarrow-15.0.0.tar.gz", hash = "sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83"}, + {file = "pyarrow-15.0.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:c2ddb3be5ea938c329a84171694fc230b241ce1b6b0ff1a0280509af51c375fa"}, + {file = "pyarrow-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7543ea88a0ff72f8e6baaf9bfdbec2c62aeabdbede9e4a571c71cc3bc43b6302"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1519e218a6941fc074e4501088d891afcb2adf77c236e03c34babcf3d6a0d1c7"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28cafa86e1944761970d3b3fc0411b14ff9b5c2b73cd22aaf470d7a3976335f5"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:be5c3d463e33d03eab496e1af7916b1d44001c08f0f458ad27dc16093a020638"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:47b1eda15d3aa3f49a07b1808648e1397e5dc6a80a30bf87faa8e2d02dad7ac3"}, + {file = "pyarrow-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e524a31be7db22deebbbcf242b189063ab9a7652c62471d296b31bc6e3cae77b"}, + {file = "pyarrow-15.0.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:a476fefe8bdd56122fb0d4881b785413e025858803cc1302d0d788d3522b374d"}, + {file = "pyarrow-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:309e6191be385f2e220586bfdb643f9bb21d7e1bc6dd0a6963dc538e347b2431"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83bc586903dbeb4365cbc72b602f99f70b96c5882e5dfac5278813c7d624ca3c"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07e652daac6d8b05280cd2af31c0fb61a4490ec6a53dc01588014d9fa3fdbee9"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:abad2e08652df153a72177ce20c897d083b0c4ebeec051239e2654ddf4d3c996"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cde663352bc83ad75ba7b3206e049ca1a69809223942362a8649e37bd22f9e3b"}, + {file = "pyarrow-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:1b6e237dd7a08482a8b8f3f6512d258d2460f182931832a8c6ef3953203d31e1"}, + {file = "pyarrow-15.0.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:7bd167536ee23192760b8c731d39b7cfd37914c27fd4582335ffd08450ff799d"}, + {file = "pyarrow-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c08bb31eb2984ba5c3747d375bb522e7e536b8b25b149c9cb5e1c49b0ccb736"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0f9c1d630ed2524bd1ddf28ec92780a7b599fd54704cd653519f7ff5aec177a"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5186048493395220550bca7b524420471aac2d77af831f584ce132680f55c3df"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:31dc30c7ec8958da3a3d9f31d6c3630429b2091ede0ecd0d989fd6bec129f0e4"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3f111a014fb8ac2297b43a74bf4495cc479a332908f7ee49cb7cbd50714cb0c1"}, + {file = "pyarrow-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a6d1f7c15d7f68f08490d0cb34611497c74285b8a6bbeab4ef3fc20117310983"}, + {file = "pyarrow-15.0.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:9ad931b996f51c2f978ed517b55cb3c6078272fb4ec579e3da5a8c14873b698d"}, + {file = "pyarrow-15.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:738f6b53ab1c2f66b2bde8a1d77e186aeaab702d849e0dfa1158c9e2c030add3"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c1c3fc16bc74e33bf8f1e5a212938ed8d88e902f372c4dac6b5bad328567d2f"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1fa92512128f6c1b8dde0468c1454dd70f3bff623970e370d52efd4d24fd0be"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:b4157f307c202cbbdac147d9b07447a281fa8e63494f7fc85081da351ec6ace9"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:b75e7da26f383787f80ad76143b44844ffa28648fcc7099a83df1538c078d2f2"}, + {file = "pyarrow-15.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:3a99eac76ae14096c209850935057b9e8ce97a78397c5cde8724674774f34e5d"}, + {file = "pyarrow-15.0.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:dd532d3177e031e9b2d2df19fd003d0cc0520d1747659fcabbd4d9bb87de508c"}, + {file = "pyarrow-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce8c89848fd37e5313fc2ce601483038ee5566db96ba0808d5883b2e2e55dc53"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:862eac5e5f3b6477f7a92b2f27e560e1f4e5e9edfca9ea9da8a7478bb4abd5ce"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f0ea3a29cd5cb99bf14c1c4533eceaa00ea8fb580950fb5a89a5c771a994a4e"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bb902f780cfd624b2e8fd8501fadab17618fdb548532620ef3d91312aaf0888a"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:4f87757f02735a6bb4ad2e1b98279ac45d53b748d5baf52401516413007c6999"}, + {file = "pyarrow-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:efd3816c7fbfcbd406ac0f69873cebb052effd7cdc153ae5836d1b00845845d7"}, + {file = "pyarrow-15.0.1.tar.gz", hash = "sha256:21d812548d39d490e0c6928a7c663f37b96bf764034123d4b4ab4530ecc757a9"}, ] [package.dependencies] @@ -3560,13 +3777,13 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pyparsing" -version = "3.1.1" +version = "3.1.2" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, ] [package.extras] @@ -3617,6 +3834,23 @@ toml = "*" [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] +[[package]] +name = "pytest-mock" +version = "3.12.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -3704,7 +3938,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -3860,13 +4093,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qdrant-client" -version = "1.7.3" +version = "1.8.0" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.7.3-py3-none-any.whl", hash = "sha256:b062420ba55eb847652c7d2a26404fb1986bea13aa785763024013f96a7a915c"}, - {file = "qdrant_client-1.7.3.tar.gz", hash = "sha256:7b809be892cdc5137ae80ea3335da40c06499ad0b0072b5abc6bad79da1d29fc"}, + {file = "qdrant_client-1.8.0-py3-none-any.whl", hash = "sha256:fa28d3eb64c0c57ec029c7c85c71f6c72c197f92502022655741f3632c518e29"}, + {file = "qdrant_client-1.8.0.tar.gz", hash = "sha256:2a1a3f2cbacc7adba85644cf6cfdee20401cf25764b32da479c81fb63e178d15"}, ] [package.dependencies] @@ -3879,7 +4112,7 @@ pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<3" [package.extras] -fastembed = ["fastembed (==0.1.1)"] +fastembed = ["fastembed (==0.2.2)"] [[package]] name = "referencing" @@ -4161,30 +4394,162 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.3.0" +version = "0.3.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.3.0-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7deb528029bacf845bdbb3dbb2927d8ef9b4356a5e731b10eef171e3f0a85944"}, - {file = "ruff-0.3.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:e1e0d4381ca88fb2b73ea0766008e703f33f460295de658f5467f6f229658c19"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f7dbba46e2827dfcb0f0cc55fba8e96ba7c8700e0a866eb8cef7d1d66c25dcb"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23dbb808e2f1d68eeadd5f655485e235c102ac6f12ad31505804edced2a5ae77"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ef655c51f41d5fa879f98e40c90072b567c666a7114fa2d9fe004dffba00932"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d0d3d7ef3d4f06433d592e5f7d813314a34601e6c5be8481cccb7fa760aa243e"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b08b356d06a792e49a12074b62222f9d4ea2a11dca9da9f68163b28c71bf1dd4"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9343690f95710f8cf251bee1013bf43030072b9f8d012fbed6ad702ef70d360a"}, - {file = "ruff-0.3.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1f3ed501a42f60f4dedb7805fa8d4534e78b4e196f536bac926f805f0743d49"}, - {file = "ruff-0.3.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:cc30a9053ff2f1ffb505a585797c23434d5f6c838bacfe206c0e6cf38c921a1e"}, - {file = "ruff-0.3.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5da894a29ec018a8293d3d17c797e73b374773943e8369cfc50495573d396933"}, - {file = "ruff-0.3.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:755c22536d7f1889be25f2baf6fedd019d0c51d079e8417d4441159f3bcd30c2"}, - {file = "ruff-0.3.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:dd73fe7f4c28d317855da6a7bc4aa29a1500320818dd8f27df95f70a01b8171f"}, - {file = "ruff-0.3.0-py3-none-win32.whl", hash = "sha256:19eacceb4c9406f6c41af806418a26fdb23120dfe53583df76d1401c92b7c14b"}, - {file = "ruff-0.3.0-py3-none-win_amd64.whl", hash = "sha256:128265876c1d703e5f5e5a4543bd8be47c73a9ba223fd3989d4aa87dd06f312f"}, - {file = "ruff-0.3.0-py3-none-win_arm64.whl", hash = "sha256:e3a4a6d46aef0a84b74fcd201a4401ea9a6cd85614f6a9435f2d33dd8cefbf83"}, - {file = "ruff-0.3.0.tar.gz", hash = "sha256:0886184ba2618d815067cf43e005388967b67ab9c80df52b32ec1152ab49f53a"}, + {file = "ruff-0.3.1-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:6b82e3937d0d76554cd5796bc3342a7d40de44494d29ff490022d7a52c501744"}, + {file = "ruff-0.3.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ae7954c8f692b70e6a206087ae3988acc9295d84c550f8d90b66c62424c16771"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b730f56ccf91225da0f06cfe421e83b8cc27b2a79393db9c3df02ed7e2bbc01"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c78bfa85637668f47bd82aa2ae17de2b34221ac23fea30926f6409f9e37fc927"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6abaad602d6e6daaec444cbf4d9364df0a783e49604c21499f75bb92237d4af"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5f0c21b6914c3c9a25a59497cbb1e5b6c2d8d9beecc9b8e03ee986e24eee072e"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:434c3fc72e6311c85cd143c4c448b0e60e025a9ac1781e63ba222579a8c29200"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78a7025e6312cbba496341da5062e7cdd47d95f45c1b903e635cdeb1ba5ec2b9"}, + {file = "ruff-0.3.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52b02bb46f1a79b0c1fa93f6495bc7e77e4ef76e6c28995b4974a20ed09c0833"}, + {file = "ruff-0.3.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:11b5699c42f7d0b771c633d620f2cb22e727fb226273aba775a91784a9ed856c"}, + {file = "ruff-0.3.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:54e5dca3e411772b51194b3102b5f23b36961e8ede463776b289b78180df71a0"}, + {file = "ruff-0.3.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:951efb610c5844e668bbec4f71cf704f8645cf3106e13f283413969527ebfded"}, + {file = "ruff-0.3.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:09c7333b25e983aabcf6e38445252cff0b4745420fc3bda45b8fce791cc7e9ce"}, + {file = "ruff-0.3.1-py3-none-win32.whl", hash = "sha256:d937f9b99ebf346e0606c3faf43c1e297a62ad221d87ef682b5bdebe199e01f6"}, + {file = "ruff-0.3.1-py3-none-win_amd64.whl", hash = "sha256:c0318a512edc9f4e010bbaab588b5294e78c5cdc9b02c3d8ab2d77c7ae1903e3"}, + {file = "ruff-0.3.1-py3-none-win_arm64.whl", hash = "sha256:d3b60e44240f7e903e6dbae3139a65032ea4c6f2ad99b6265534ff1b83c20afa"}, + {file = "ruff-0.3.1.tar.gz", hash = "sha256:d30db97141fc2134299e6e983a6727922c9e03c031ae4883a6d69461de722ae7"}, +] + +[[package]] +name = "safetensors" +version = "0.4.2" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "safetensors-0.4.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:69d8bb8384dc2cb5b72c36c4d6980771b293d1a1377b378763f5e37b6bb8d133"}, + {file = "safetensors-0.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3d420e19fcef96d0067f4de4699682b4bbd85fc8fea0bd45fcd961fdf3e8c82c"}, + {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ca54742122fa3c4821754adb67318e1cd25c3a22bbf0c5520d5176e77a099ac"}, + {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b47aa643afdfd66cf7ce4c184092ae734e15d10aba2c2948f24270211801c3c"}, + {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d88a16bbc330f27e7f2d4caaf6fb061ad0b8a756ecc4033260b0378e128ce8a2"}, + {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9223b8ac21085db614a510eb3445e7083cae915a9202357555fa939695d4f57"}, + {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce6cb86133dc8930a7ab5e7438545a7f205f7a1cdd5aaf108c1d0da6bdcfbc2b"}, + {file = "safetensors-0.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8a628e0ae2bbc334b62952c384aa5f41621d01850f8d67b04a96b9c39dd7326"}, + {file = "safetensors-0.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:88d6beb7f811a081e0e5f1d9669fdac816c45340c04b1eaf7ebfda0ce93ea403"}, + {file = "safetensors-0.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b57fc5b1b54cb12d8690a58a4cf4b7144730d4bde9d98aa0e1dab6295a1cd579"}, + {file = "safetensors-0.4.2-cp310-none-win32.whl", hash = "sha256:9d87a1c98803c16cf113b9ba03f07b2dce5e8eabfd1811a7f7323fcaa2a1bf47"}, + {file = "safetensors-0.4.2-cp310-none-win_amd64.whl", hash = "sha256:18930ec1d1ecb526d3d9835abc2489b8f1530877518f0c541e77ef0b7abcbd99"}, + {file = "safetensors-0.4.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c5dd2ed788730ed56b415d1a11c62026b8cc8c573f55a2092afb3ab383e94fff"}, + {file = "safetensors-0.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc41791b33efb9c83a59b731619f3d15f543dfe71f3a793cb8fbf9bd5d0d5d71"}, + {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c888bf71d5ca12a720f1ed87d407c4918afa022fb247a6546d8fac15b1f112b"}, + {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e6b2feb4b47226a16a792e6fac3f49442714884a3d4c1008569d5068a3941be9"}, + {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f41cc0ee4b838ae8f4d8364a1b162067693d11a3893f0863be8c228d40e4d0ee"}, + {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:51b7228e46c0a483c40ba4b9470dea00fb1ff8685026bb4766799000f6328ac2"}, + {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02697f8f2be8ca3c37a4958702dbdb1864447ef765e18b5328a1617022dcf164"}, + {file = "safetensors-0.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:27fd8f65cf7c80e4280cae1ee6bcd85c483882f6580821abe71ee1a0d3dcfca7"}, + {file = "safetensors-0.4.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c487b5f113b0924c9534a07dc034830fb4ef05ce9bb6d78cfe016a7dedfe281f"}, + {file = "safetensors-0.4.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:da7f6483f3fe67ff39b3a55552552c67930ea10a36e9f2539d36fc205273d767"}, + {file = "safetensors-0.4.2-cp311-none-win32.whl", hash = "sha256:52a7012f6cb9cb4a132760b6308daede18a9f5f8952ce08adc7c67a7d865c2d8"}, + {file = "safetensors-0.4.2-cp311-none-win_amd64.whl", hash = "sha256:4d1361a097ac430b310ce9eed8ed4746edee33ddafdfbb965debc8966fc34dc2"}, + {file = "safetensors-0.4.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:77af8aa0edcc2863760fd6febbfdb82e88fd75d0e60c1ce4ba57208ba5e4a89b"}, + {file = "safetensors-0.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846666c1c5a8c8888d2dfda8d3921cb9cb8e2c5f78365be756c11021e75a0a2a"}, + {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f4bfc7ea19b446bfad41510d4b4c76101698c00caaa8a332c8edd8090a412ef"}, + {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:233436fd30f27ffeb3c3780d0b84f496518868445c7a8db003639a649cc98453"}, + {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a09237a795d11cd11f9dae505d170a29b5616151db1e10c14f892b11caadc7d"}, + {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de01c9a3a3b7b69627d624ff69d9f11d28ce9908eea2fb6245adafa4b1d43df6"}, + {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c1f25c5069ee42a5bcffdc66c300a407941edd73f3239e9fdefd26216407391"}, + {file = "safetensors-0.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7a73b3649456d09ca8506140d44484b63154a7378434cc1e8719f8056550b224"}, + {file = "safetensors-0.4.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e1625a8d07d046e968bd5c4961810aba1225984e4fb9243626f9d04a06ed3fee"}, + {file = "safetensors-0.4.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f74c86b25615cb24ad4cff765a2eefc09d71bf0fed97588cf585aad9c38fbb4"}, + {file = "safetensors-0.4.2-cp312-none-win32.whl", hash = "sha256:8523b9c5777d771bcde5c2389c03f1cdf7ebe8797432a1bd5e345efe25c55987"}, + {file = "safetensors-0.4.2-cp312-none-win_amd64.whl", hash = "sha256:dcff0243e1737a21f83d664c63fed89d1f532c23fc6830d0427279fabd789ccb"}, + {file = "safetensors-0.4.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:96ad3d7d472612e26cbe413922b4fb13933310f0511d346ea5cc9a1e856e52eb"}, + {file = "safetensors-0.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:88250922401b5ae4e37de929178caf46be47ed16c817b2237b81679bec07c120"}, + {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d40443554142fc0ab30652d5cc8554c4b7a613513bde00373e18afd5de8cbe4b"}, + {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27f53f70106224d32d874aacecbeb4a6e4c5b16a1d2006d0e876d97229086d71"}, + {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc068afe23734dfb26ce19db0a7877499ddf73b1d55ceb762417e8da4a1b05fb"}, + {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9be1918eb8d43a11a6f8806759fccfa0eeb0542b12924caba66af8a7800ad01a"}, + {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41911087d20a7bbd78cb4ad4f98aab0c431533107584df6635d8b54b99945573"}, + {file = "safetensors-0.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50771c662aab909f31e94d048e76861fd027d66076ea773eef2e66c717766e24"}, + {file = "safetensors-0.4.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:13f2e57be007b7ea9329133d2399e6bdfcf1910f655440a4da17df3a45afcd30"}, + {file = "safetensors-0.4.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c772147e6395bc829842e0a98e1b30c67fe25d816299c28196488511d5a5e951"}, + {file = "safetensors-0.4.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:36239a0060b537a3e8c473df78cffee14c3ec4f51d5f1a853af99371a2fb2a35"}, + {file = "safetensors-0.4.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:d0cbb7664fad2c307f95195f951b7059e95dc23e0e1822e5978c8b500098543c"}, + {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b3e55adb6bd9dc1c2a341e72f48f075953fa35d173dd8e29a95b3b02d0d1462"}, + {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42f743b3cca863fba53ca57a193f510e5ec359b97f38c282437716b6768e4a25"}, + {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04e6af4a6dbeb06c4e6e7d46cf9c716cbc4cc5ef62584fd8a7c0fe558562df45"}, + {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a492ba21b5c8f14ee5ec9b20f42ba969e53ca1f909a4d04aad736b66a341dcc2"}, + {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b25b8233a1a85dc67e39838951cfb01595d792f3b7b644add63edb652992e030"}, + {file = "safetensors-0.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fd27e063fbdafe776f7b1714da59110e88f270e86db00788a8fd65f4eacfeba7"}, + {file = "safetensors-0.4.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1b6fa399f251bbeb52029bf5a0ac2878d7705dd3612a2f8895b48e9c11f0367d"}, + {file = "safetensors-0.4.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de642d46b459e4afd5c2020b26c0d6d869a171ea00411897d5776c127cac74f0"}, + {file = "safetensors-0.4.2-cp37-none-win32.whl", hash = "sha256:77b72d17754c93bb68f3598182f14d78776e0b9b31682ca5bb2c7c5bd9a75267"}, + {file = "safetensors-0.4.2-cp37-none-win_amd64.whl", hash = "sha256:d36ee3244d461cd655aeef493792c3bccf4875282f8407fd9af99e9a41cf2530"}, + {file = "safetensors-0.4.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:16b6b3884f7876c6b3b23a742428223a7170a5a9dac819d8c12a1569422c4b5a"}, + {file = "safetensors-0.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ee25d311493fbbe0be9d395faee46e9d79e8948f461e388ff39e59875ed9a350"}, + {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eed8097968585cd752a1171f86fce9aa1d89a29033e5cd8bec5a502e29f6b7af"}, + {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:880e6865cf72cb67f9ab8d04a3c4b49dd95ae92fb1583929ce65aed94e1f685f"}, + {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91290f83daf80ce6d1a7f629b244443c200060a80f908b29d879021409e5ea94"}, + {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3517d568486ab3508a7acc360b82d7a4a3e26b86efdf210a9ecd9d233c40708a"}, + {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1f43a77eb38540f782999e5dc5645164fe9027d3f0194f6c9a5126168017efa"}, + {file = "safetensors-0.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b684d9818aa5d63fddc65f7d0151968037d255d91adf74eba82125b41c680aaa"}, + {file = "safetensors-0.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ab1f5d84185f9fefaf21413efb764e4908057b8a9a0b987ede890c353490fd70"}, + {file = "safetensors-0.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2bd979642e6c3a517ef4b84ff36c2fee4015664fea05a61154fc565978347553"}, + {file = "safetensors-0.4.2-cp38-none-win32.whl", hash = "sha256:11be6e7afed29e5a5628f0aa6214e34bc194da73f558dc69fc7d56e07037422a"}, + {file = "safetensors-0.4.2-cp38-none-win_amd64.whl", hash = "sha256:2f7a6e5d29bd2cc340cffaa391fa437b1be9d21a2bd8b8724d2875d13a6ef2a9"}, + {file = "safetensors-0.4.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a5a921b4fe6925f9942adff3ebae8c16e0487908c54586a5a42f35b59fd69794"}, + {file = "safetensors-0.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b691727228c28f2d82d8a92b2bc26e7a1f129ee40b2f2a3185b5974e038ed47c"}, + {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91ca1056decc4e981248786e87b2a202d4841ee5f99d433f1adf3d44d4bcfa0e"}, + {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55969fd2e6fdb38dc221b0ab380668c21b0efa12a7562db9924759faa3c51757"}, + {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ae429bfaecc10ab5fe78c93009b3d1656c1581da560041e700eadb497dbe7a4"}, + {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff88f194fe4ac50b463a4a6f0c03af9ad72eb5d24ec6d6730af59522e37fedb"}, + {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80cb48d0a447f8dd18e61813efa7d3f8f8d52edf0f05806abc0c59b83431f57"}, + {file = "safetensors-0.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b286fb7adfee70a4189898ac2342b8a67d5f493e6b21b0af89ca8eac1b967cbf"}, + {file = "safetensors-0.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0ceeff9ddbab4f78738489eb6682867ae946178776f33699737b2129b5394dc1"}, + {file = "safetensors-0.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a26fae748a7488cb3aac381eddfa818c42052c87b5e689fb4c6e82ed58cec209"}, + {file = "safetensors-0.4.2-cp39-none-win32.whl", hash = "sha256:039a42ab33c9d68b39706fd38f1922ace26866eff246bf20271edb619f5f848b"}, + {file = "safetensors-0.4.2-cp39-none-win_amd64.whl", hash = "sha256:b3a3e1f5b85859e398773f064943b62a4059f225008a2a8ee6add1edcf77cacf"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4e70d442ad17e8b153ef9095bf48ea64f15a66bf26dc2b6ca94660c154edbc24"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b90f1d9809caf4ff395951b4703295a68d12907f6945bbc3129e934ff8ae46f6"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c7ac9ad3728838006598e296b3ae9f27d80b489effd4685b92d97b3fc4c98f6"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5730d77e6ff7f4c7039e20913661ad0ea2f86c09e71c039e73dfdd1f394f08"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44feb8cb156d6803dcd19fc6b81b27235f29b877660605a6ac35e1da7d64f0e4"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:523a241c33e7c827ab9a3a23760d75c7d062f43dfe55b6b019409f89b0fb52d1"}, + {file = "safetensors-0.4.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fb18300e8eb74291225214f26c9a8ae2110fd61a6c9b5a2ff4c4e0eb1bb9a998"}, + {file = "safetensors-0.4.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fe5437ff9fb116e44f2ab558981249ae63f978392b4576e62fcfe167d353edbc"}, + {file = "safetensors-0.4.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9304a0934ced5a5d272f39de36291dc141dfc152d277f03fb4d65f2fb2ffa7c"}, + {file = "safetensors-0.4.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:160ba1b1e11cf874602c233ab80a14f588571d09556cbc3586900121d622b5ed"}, + {file = "safetensors-0.4.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04fcd6fcf7d9c13c7e5dc7e08de5e492ee4daa8f4ad74b4d8299d3eb0224292f"}, + {file = "safetensors-0.4.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:906d14c4a677d35834fb0f3a5455ef8305e1bba10a5e0f2e0f357b3d1ad989f2"}, + {file = "safetensors-0.4.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:df3fcdec0cd543084610d1f09c65cdb10fb3079f79bceddc092b0d187c6a265b"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5ca76f13fb1cef242ea3ad2cb37388e7d005994f42af8b44bee56ba48b2d45ce"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:278a1a3414c020785decdcd741c578725721274d2f9f787fcc930882e83b89cc"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b5a461cc68ecd42d9d546e5e1268a39d8ede7934a68d1ce17c3c659cb829d6"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2341411412a41671d25e26bed59ec121e46bf4fadb8132895e610411c4b9681"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3497ac3895acf17c5f98197f1fa4769f09c5e7ede07fcb102f1c201e663e052c"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:01b5e71d3754d2201294f1eb7a6d59cce3a5702ff96d83d226571b2ca2183837"}, + {file = "safetensors-0.4.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3627dbd1ea488dd8046a0491de5087f3c0d641e7acc80c0189a33c69398f1cd1"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9d56f0ef53afad26ec54ceede78a43e9a23a076dadbbda7b44d304c591abf4c1"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b259ca73d42daf658a1bda463f1f83885ae4d93a60869be80d7f7dfcc9d8bbb5"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebc3cd401e4eb54e7c0a70346be565e81942d9a41fafd5f4bf7ab3a55d10378"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5bc384a0309b706aa0425c93abb0390508a61bf029ce99c7d9df4220f25871a5"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:af2d8f7235d8a08fbccfb8394387890e7fa38942b349a94e6eff13c52ac98087"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0911315bbcc5289087d063c2c2c7ccd711ea97a7e557a7bce005ac2cf80146aa"}, + {file = "safetensors-0.4.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1efe31673be91832d73439a2af426743e1395fc9ef7b081914e9e1d567bd7b5f"}, + {file = "safetensors-0.4.2.tar.gz", hash = "sha256:acc85dcb09ec5e8aa787f588d7ad4d55c103f31e4ff060e17d92cc0e8b8cac73"}, ] +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface_hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools_rust (>=1.5.2)"] +torch = ["safetensors[numpy]", "torch (>=1.10)"] + [[package]] name = "setuptools" version = "69.1.1" @@ -4603,7 +4968,7 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7 name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, @@ -4643,56 +5008,129 @@ doc = ["reno", "sphinx", "tornado (>=4.5)"] [[package]] name = "tokenizers" -version = "0.13.3" -description = "Fast and Customizable Tokenizers" -optional = true -python-versions = "*" +version = "0.15.2" +description = "" +optional = false +python-versions = ">=3.7" files = [ - {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, - {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, - {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, - {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, - {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, - {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, - {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, - {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, - {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, - {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, - {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, - {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, - {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, - {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, - {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, - {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, - {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, - {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, - {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, - {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, + {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, + {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, + {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, + {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, + {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, + {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, + {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, + {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"}, + {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"}, + {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"}, + {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"}, + {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"}, + {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"}, + {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, + {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, ] +[package.dependencies] +huggingface_hub = ">=0.16.4,<1.0" + [package.extras] -dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] -docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +dev = ["tokenizers[testing]"] +docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] [[package]] @@ -4706,6 +5144,75 @@ files = [ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "torch" +version = "2.2.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.2.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8d3bad336dd2c93c6bcb3268e8e9876185bda50ebde325ef211fb565c7d15273"}, + {file = "torch-2.2.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:5297f13370fdaca05959134b26a06a7f232ae254bf2e11a50eddec62525c9006"}, + {file = "torch-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:5f5dee8433798888ca1415055f5e3faf28a3bad660e4c29e1014acd3275ab11a"}, + {file = "torch-2.2.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b6d78338acabf1fb2e88bf4559d837d30230cf9c3e4337261f4d83200df1fcbe"}, + {file = "torch-2.2.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:6ab3ea2e29d1aac962e905142bbe50943758f55292f1b4fdfb6f4792aae3323e"}, + {file = "torch-2.2.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:d86664ec85902967d902e78272e97d1aff1d331f7619d398d3ffab1c9b8e9157"}, + {file = "torch-2.2.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d6227060f268894f92c61af0a44c0d8212e19cb98d05c20141c73312d923bc0a"}, + {file = "torch-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:77e990af75fb1675490deb374d36e726f84732cd5677d16f19124934b2409ce9"}, + {file = "torch-2.2.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:46085e328d9b738c261f470231e987930f4cc9472d9ffb7087c7a1343826ac51"}, + {file = "torch-2.2.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:2d9e7e5ecbb002257cf98fae13003abbd620196c35f85c9e34c2adfb961321ec"}, + {file = "torch-2.2.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:ada53aebede1c89570e56861b08d12ba4518a1f8b82d467c32665ec4d1f4b3c8"}, + {file = "torch-2.2.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:be21d4c41ecebed9e99430dac87de1439a8c7882faf23bba7fea3fea7b906ac1"}, + {file = "torch-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:79848f46196750367dcdf1d2132b722180b9d889571e14d579ae82d2f50596c5"}, + {file = "torch-2.2.1-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:7ee804847be6be0032fbd2d1e6742fea2814c92bebccb177f0d3b8e92b2d2b18"}, + {file = "torch-2.2.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:84b2fb322ab091039fdfe74e17442ff046b258eb5e513a28093152c5b07325a7"}, + {file = "torch-2.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5c0c83aa7d94569997f1f474595e808072d80b04d34912ce6f1a0e1c24b0c12a"}, + {file = "torch-2.2.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:91a1b598055ba06b2c386415d2e7f6ac818545e94c5def597a74754940188513"}, + {file = "torch-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:8f93ddf3001ecec16568390b507652644a3a103baa72de3ad3b9c530e3277098"}, + {file = "torch-2.2.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:0e8bdd4c77ac2584f33ee14c6cd3b12767b4da508ec4eed109520be7212d1069"}, + {file = "torch-2.2.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:6a21bcd7076677c97ca7db7506d683e4e9db137e8420eb4a68fb67c3668232a7"}, + {file = "torch-2.2.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f1b90ac61f862634039265cd0f746cc9879feee03ff962c803486301b778714b"}, + {file = "torch-2.2.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ed9e29eb94cd493b36bca9cb0b1fd7f06a0688215ad1e4b3ab4931726e0ec092"}, + {file = "torch-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c47bc25744c743f3835831a20efdcfd60aeb7c3f9804a213f61e45803d16c2a5"}, + {file = "torch-2.2.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:0952549bcb43448c8d860d5e3e947dd18cbab491b14638e21750cb3090d5ad3e"}, + {file = "torch-2.2.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:26bd2272ec46fc62dcf7d24b2fb284d44fcb7be9d529ebf336b9860350d674ed"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.2.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +typing-extensions = ">=4.8.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] + [[package]] name = "tornado" version = "6.4" @@ -4761,6 +5268,97 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "transformers" +version = "4.38.2" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "transformers-4.38.2-py3-none-any.whl", hash = "sha256:c4029cb9f01b3dd335e52f364c52d2b37c65b4c78e02e6a08b1919c5c928573e"}, + {file = "transformers-4.38.2.tar.gz", hash = "sha256:c5fc7ad682b8a50a48b2a4c05d4ea2de5567adb1bdd00053619dbe5960857dd5"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.19.3,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.1" +tokenizers = ">=0.14,<0.19" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.21.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] +docs-specific = ["hf-doc-builder"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +timm = ["timm"] +tokenizers = ["tokenizers (>=0.14,<0.19)"] +torch = ["accelerate (>=0.21.0)", "torch"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + +[[package]] +name = "triton" +version = "2.2.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, + {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, + {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab"}, + {file = "triton-2.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace"}, + {file = "triton-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.20)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] +tutorials = ["matplotlib", "pandas", "tabulate", "torch"] + [[package]] name = "typer" version = "0.9.0" @@ -5595,4 +6193,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "233e4dfd38ce1f291ed8e966663cce04ffedfa6b0363f3c92447b23467b983e6" +content-hash = "3d584715c2e1e090a222116cc007e4c6689c475f6ccc281796a221be59b615e2" diff --git a/pyproject.toml b/pyproject.toml index adc6f20368..cb8462de12 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -105,8 +105,11 @@ sphinx-automodapi = { version = "0.16.0", optional = true } [tool.poetry.group.dev.dependencies] pytest = "^6.2.5" +transformers = "^4.38.2" +torch = "^2.2.1" +pytest-mock = "^3.12.0" ruff = "^0.3.0" - +black = "^24.2.0" [tool.poetry.extras] chromadb = ["chromadb"] diff --git a/tests/modules/test_hf_model.py b/tests/modules/test_hf_model.py new file mode 100644 index 0000000000..0b06c80429 --- /dev/null +++ b/tests/modules/test_hf_model.py @@ -0,0 +1,31 @@ +from pytest_mock.plugin import MockerFixture +from transformers import AutoModelForSeq2SeqLM + +import dspy + + +class MockConfig: + def __init__(self, architectures: list[str]): + self.architectures = architectures + + +def test_load_gated_model(mocker: MockerFixture): + conf = MockConfig(architectures=["ConditionalGeneration"]) + mocker.patch("transformers.AutoModelForSeq2SeqLM.from_pretrained") + mocker.patch("transformers.AutoConfig.from_pretrained", return_value=conf) + mocker.patch("transformers.AutoTokenizer.from_pretrained") + + some_token = "asdfasdfasdf" + model = "google/gemma-7b" + _ = dspy.HFModel(model, token=some_token) + AutoModelForSeq2SeqLM.from_pretrained.assert_called_with(model, device_map="auto", token=some_token) + + +def test_load_ungated_model(mocker: MockerFixture): + conf = MockConfig(architectures=["ConditionalGeneration"]) + mocker.patch("transformers.AutoModelForSeq2SeqLM.from_pretrained") + mocker.patch("transformers.AutoConfig.from_pretrained", return_value=conf) + mocker.patch("transformers.AutoTokenizer.from_pretrained") + _ = dspy.HFModel("openai-community/gpt2") + # no token used in automodel + AutoModelForSeq2SeqLM.from_pretrained.assert_called_with("openai-community/gpt2", device_map="auto", token=None) From fc62d608f54f3461d9b188fc36b401060945c381 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 12:45:19 -0500 Subject: [PATCH 180/243] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 878601f558..4f29d0af6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ classifiers = [ ] # We have both project and tool.poetry.dependencies. Should we remove one? dependencies = [ - "anthropic~=0.18.0", "backoff~=2.2.1", "joblib~=1.3.2", "openai>=0.28.1,<2.0.0", @@ -35,6 +34,7 @@ dependencies = [ ] [project.optional-dependencies] +anthropic = ["anthropic~=0.18.0"], chromadb = ["chromadb~=0.4.14"] qdrant = ["qdrant-client~=1.6.2", "fastembed~=0.1.0"] marqo = ["marqo"] @@ -75,7 +75,6 @@ keywords = ["dspy", "ai", "language models", "llm", "openai"] [tool.poetry.dependencies] python = ">=3.9,<3.12" pydantic = "2.5.0" -anthropic = "^0.18.0" backoff = "^2.2.1" joblib = "^1.3.2" openai = "^0.28.1" @@ -86,6 +85,7 @@ tqdm = "^4.66.1" datasets = "^2.14.6" requests = "^2.31.0" optuna = "^3.4.0" +anthropic = { version = "^0.18.0", optional = true } chromadb = { version = "^0.4.14", optional = true } fastembed = { version = "^0.1.0", optional = true } marqo = { version = "*", optional = true } From e505dcdf518a4f9376cef3d3e007af28824ed0b0 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 12:48:32 -0500 Subject: [PATCH 181/243] Update pyproject.toml (Whoops left a comma) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4f29d0af6d..d31d44e18e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ ] [project.optional-dependencies] -anthropic = ["anthropic~=0.18.0"], +anthropic = ["anthropic~=0.18.0"] chromadb = ["chromadb~=0.4.14"] qdrant = ["qdrant-client~=1.6.2", "fastembed~=0.1.0"] marqo = ["marqo"] From 424ee2b15c14e0c00fb49ba6c880a07b9cdc2e72 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 12:49:24 -0500 Subject: [PATCH 182/243] Match version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d31d44e18e..0637ba7faf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "dspy-ai" -version = "2.3.7" +version = "2.4.1" description = "DSPy" readme = "README.md" authors = [{ name = "Omar Khattab", email = "okhattab@stanford.edu" }] From 638d8e5130014a12174e5b83c5b58eb3dd96180b Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 13:00:34 -0500 Subject: [PATCH 183/243] update linter --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0637ba7faf..713466775c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -173,7 +173,6 @@ exclude_lines = [ line-length = 120 indent-width = 4 target-version = "py39" -extend-unsafe-fixes = ["D"] [tool.ruff.lint] # List of rules: https://docs.astral.sh/ruff/rules From 829e573f7f5c97d9bd381ffc2b8115564071b975 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 13:03:17 -0500 Subject: [PATCH 184/243] run ruff linter --- dsp/modules/__init__.py | 2 +- dsp/modules/anthropic.py | 12 +++++------ dsp/modules/aws_lm.py | 11 +++------- dsp/modules/azure_openai.py | 1 - dsp/modules/azurecognitivesearch.py | 10 ++++----- dsp/modules/clarifai.py | 1 + dsp/modules/cohere.py | 5 ++--- dsp/modules/finetuning/finetune_hf.py | 6 ++---- dsp/modules/google.py | 5 ++--- dsp/modules/gpt3.py | 1 - dsp/modules/hf.py | 3 +-- dsp/modules/ollama.py | 1 - dsp/modules/pyserini.py | 25 ++++++++++------------- dsp/modules/sentence_vectorizer.py | 25 +++++++++-------------- dsp/primitives/demonstrate.py | 10 +++------ dsp/primitives/predict.py | 1 - dsp/utils/ann_utils.py | 6 ++---- dsp/utils/dpr.py | 15 ++++++-------- dsp/utils/settings.py | 4 +--- dsp/utils/utils.py | 20 ++++++------------ dspy/datasets/dataset.py | 8 +++----- dspy/predict/aggregation.py | 8 +++----- dspy/predict/langchain.py | 3 ++- dspy/primitives/assertions.py | 3 +-- dspy/primitives/module.py | 4 +--- dspy/primitives/program.py | 3 +-- dspy/primitives/python_interpreter.py | 4 ++-- dspy/retrieve/chromadb_rm.py | 7 ++----- dspy/retrieve/clarifai_rm.py | 4 ++-- dspy/retrieve/databricks_rm.py | 3 +-- dspy/retrieve/deeplake_rm.py | 8 ++------ dspy/retrieve/marqo_rm.py | 3 +-- dspy/retrieve/pgvector_rm.py | 6 ++---- dspy/retrieve/pinecone_rm.py | 7 ++----- dspy/retrieve/qdrant_rm.py | 4 ++-- dspy/retrieve/vectara_rm.py | 5 +++-- dspy/retrieve/weaviate_rm.py | 5 ++--- dspy/retrieve/weaviate_rm_test.py | 3 +-- dspy/retrieve/you_rm.py | 1 - dspy/teleprompt/ensemble.py | 1 - dspy/teleprompt/signature_opt.py | 5 +++-- dspy/teleprompt/signature_opt_bayesian.py | 6 ++++-- testing/tasks/biodex.py | 19 +++++------------ 43 files changed, 105 insertions(+), 179 deletions(-) diff --git a/dsp/modules/__init__.py b/dsp/modules/__init__.py index 06e1d6f074..fdf59eabc8 100644 --- a/dsp/modules/__init__.py +++ b/dsp/modules/__init__.py @@ -1,3 +1,4 @@ +from .anthropic import Claude from .azure_openai import AzureOpenAI from .bedrock import * from .cache_utils import * @@ -13,4 +14,3 @@ from .pyserini import * from .sbert import * from .sentence_vectorizer import * -from .anthropic import Claude diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 99c9f225a6..1e26175513 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -1,12 +1,11 @@ +import logging import os +from typing import Any, Optional + import backoff -import json -from typing import Optional, Any from anthropic import Anthropic, RateLimitError from dsp.modules.lm import LM -import logging - logger = logging.getLogger(__name__) @@ -23,7 +22,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """wrapper function that decides when to give up on retry""" + """Wrapper function that decides when to give up on retry""" if "rate limits" in details.message: return False return True @@ -36,7 +35,7 @@ def __init__( model: str = "claude-instant-1.2", api_key: Optional[str] = None, api_base: Optional[str] = None, - **kwargs + **kwargs, ): super().__init__(model) self.provider = "anthropic" @@ -105,7 +104,6 @@ def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): Returns: list[str]: list of completion choices """ - assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/aws_lm.py b/dsp/modules/aws_lm.py index a44e30a492..ee20e2c9a5 100644 --- a/dsp/modules/aws_lm.py +++ b/dsp/modules/aws_lm.py @@ -1,5 +1,4 @@ -""" -A generalized AWS LLM. +"""A generalized AWS LLM. """ from __future__ import annotations @@ -17,8 +16,7 @@ class AWSLM(LM): - """ - This class adds support for an AWS model + """This class adds support for an AWS model """ def __init__( @@ -34,7 +32,6 @@ def __init__( """_summary_ Args: - service_name (str): Used in context of invoking the boto3 API. region_name (str, optional): The AWS region where this LM is hosted. model (str, optional): An LM name, e.g., a bedrock name or an AWS endpoint. @@ -101,7 +98,6 @@ def _simple_api_call(self, formatted_prompt: str, **kwargs) -> str | list[str]: def basic_request(self, prompt, **kwargs) -> str | list[str]: """Query the endpoint.""" - # Remove any texts that are too long formatted_prompt: str if self._truncate_long_prompt_prompts: @@ -166,8 +162,7 @@ def __call__( return_sorted: bool = False, **kwargs, ) -> list[str]: - """ - Query the AWS LLM. + """Query the AWS LLM. There is only support for only_completed=True and return_sorted=False right now. diff --git a/dsp/modules/azure_openai.py b/dsp/modules/azure_openai.py index c90f634e4f..b9c9843954 100644 --- a/dsp/modules/azure_openai.py +++ b/dsp/modules/azure_openai.py @@ -193,7 +193,6 @@ def __call__( Returns: list[dict[str, Any]]: list of completion choices """ - assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/azurecognitivesearch.py b/dsp/modules/azurecognitivesearch.py index fedc59b015..232cd24b52 100644 --- a/dsp/modules/azurecognitivesearch.py +++ b/dsp/modules/azurecognitivesearch.py @@ -44,18 +44,16 @@ def __call__(self, query: str, k: int = 10) -> Union[list[str], list[dotdict]]: return [dotdict(psg) for psg in topk] def azure_search_request(key_content: str, key_score: str, client: SearchClient, query: str, top: int =1): - ''' - Search in Azure Cognitive Search Index - ''' + """Search in Azure Cognitive Search Index + """ results = client.search(search_text=query,top=top) results = process_azure_result(results, key_content, key_content) return results def process_azure_result(results:SearchItemPaged, content_key:str, content_score: str): - ''' - process received result from Azure Cognitive Search as dictionary array and map content and score to correct format - ''' + """Process received result from Azure Cognitive Search as dictionary array and map content and score to correct format + """ res = [] for result in results: tmp = {} diff --git a/dsp/modules/clarifai.py b/dsp/modules/clarifai.py index 2d5839c62b..3afc197793 100644 --- a/dsp/modules/clarifai.py +++ b/dsp/modules/clarifai.py @@ -11,6 +11,7 @@ class ClarifaiLLM(LM): model (str, optional): Clarifai URL of the model. Defaults to "Mistral-7B-Instruct". api_key (Optional[str], optional): CLARIFAI_PAT token. Defaults to None. **kwargs: Additional arguments to pass to the API provider. + Example: import dspy dspy.configure(lm=dspy.Clarifai(model=MODEL_URL, diff --git a/dsp/modules/cohere.py b/dsp/modules/cohere.py index 7308d59355..827ece89e8 100644 --- a/dsp/modules/cohere.py +++ b/dsp/modules/cohere.py @@ -23,7 +23,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """wrapper function that decides when to give up on retry""" + """Wrapper function that decides when to give up on retry""" if "rate limits" in details.message: return False return True @@ -42,8 +42,7 @@ def __init__( stop_sequences: list[str] = [], **kwargs, ): - """ - Parameters + """Parameters ---------- model : str Which pre-trained model from Cohere to use? diff --git a/dsp/modules/finetuning/finetune_hf.py b/dsp/modules/finetuning/finetune_hf.py index a22ab10fcb..3ee2d0c500 100644 --- a/dsp/modules/finetuning/finetune_hf.py +++ b/dsp/modules/finetuning/finetune_hf.py @@ -229,8 +229,7 @@ def _train_seq2seq(model, tokenizer, tokenized_dataset, metric, config): def smart_tokenizer_and_embedding_resize(special_tokens_dict, tokenizer, model): - """ - Resize tokenizer and embedding. + """Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64. """ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) @@ -249,8 +248,7 @@ def smart_tokenizer_and_embedding_resize(special_tokens_dict, tokenizer, model): @dataclass class DataCollatorForSupervisedDataset: - """ - Collate examples for supervised fine-tuning. + """Collate examples for supervised fine-tuning. """ tokenizer: PreTrainedTokenizer diff --git a/dsp/modules/google.py b/dsp/modules/google.py index 5d97534ea8..902f46d276 100644 --- a/dsp/modules/google.py +++ b/dsp/modules/google.py @@ -25,7 +25,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """wrapper function that decides when to give up on retry""" + """Wrapper function that decides when to give up on retry""" if "rate limits" in details.message: return False return True @@ -64,8 +64,7 @@ def __init__( safety_settings: Optional[Iterable] = BLOCK_ONLY_HIGH, **kwargs, ): - """ - Parameters + """Parameters ---------- model : str Which pre-trained model from Google to use? diff --git a/dsp/modules/gpt3.py b/dsp/modules/gpt3.py index d0b343af3c..2055b0756d 100644 --- a/dsp/modules/gpt3.py +++ b/dsp/modules/gpt3.py @@ -173,7 +173,6 @@ def __call__( Returns: list[dict[str, Any]]: list of completion choices """ - assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/hf.py b/dsp/modules/hf.py index aad0c0e36c..54a0ad75a5 100644 --- a/dsp/modules/hf.py +++ b/dsp/modules/hf.py @@ -28,7 +28,7 @@ def openai_to_hf(**kwargs): class HFModel(LM): def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool = False, hf_device_map: Literal["auto", "balanced", "balanced_low_0", "sequential"] = "auto"): - """wrapper for Hugging Face models + """Wrapper for Hugging Face models Args: model (str): HF model identifier to load and use @@ -37,7 +37,6 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool hf_device_map (str, optional): HF config strategy to load the model. Recommeded to use "auto", which will help loading large models using accelerate. Defaults to "auto". """ - super().__init__(model) self.provider = "hf" self.is_client = is_client diff --git a/dsp/modules/ollama.py b/dsp/modules/ollama.py index 27304d271e..a28c4a6466 100644 --- a/dsp/modules/ollama.py +++ b/dsp/modules/ollama.py @@ -164,7 +164,6 @@ def __call__( Returns: list[dict[str, Any]]: list of completion choices """ - assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/pyserini.py b/dsp/modules/pyserini.py index 5523a76e77..fcea328f12 100644 --- a/dsp/modules/pyserini.py +++ b/dsp/modules/pyserini.py @@ -15,21 +15,18 @@ def __init__(self, dataset: Dataset = None, id_field: str = '_id', text_fields: list[str] = ['text']) -> None: + """Args: + query_encoder (`str`): + Huggingface model to encode queries + index (`str`): + Either a prebuilt index from pyserini or a local path to a faiss index + dataset (`Dataset`): + Only required when using a local faiss index. The dataset should be the one that has been put into the faiss index. + id_field (`str`): + The name of the id field of the dataset used for retrieval. + text_fields (`list[str]`): + A list of the names of the text fields for the dataset used for retrieval. """ - Args: - - query_encoder (`str`): - Huggingface model to encode queries - index (`str`): - Either a prebuilt index from pyserini or a local path to a faiss index - dataset (`Dataset`): - Only required when using a local faiss index. The dataset should be the one that has been put into the faiss index. - id_field (`str`): - The name of the id field of the dataset used for retrieval. - text_fields (`list[str]`): - A list of the names of the text fields for the dataset used for retrieval. - """ - # Keep pyserini as an optional dependency from pyserini.prebuilt_index_info import FAISS_INDEX_INFO, IMPACT_INDEX_INFO, TF_INDEX_INFO from pyserini.search import FaissSearcher diff --git a/dsp/modules/sentence_vectorizer.py b/dsp/modules/sentence_vectorizer.py index b878420f70..25587e4a0e 100644 --- a/dsp/modules/sentence_vectorizer.py +++ b/dsp/modules/sentence_vectorizer.py @@ -6,12 +6,11 @@ class BaseSentenceVectorizer(abc.ABC): - ''' - Base Class for Vectorizers. The main purpose is to vectorize text (doc/query) + """Base Class for Vectorizers. The main purpose is to vectorize text (doc/query) for ANN/KNN indexes. `__call__` method takes `List[Example]` as a single input, then extracts `field_to_vectorize` from every Example and convert them into embeddings. You can customize extraction logic in the `_extract_text_from_examples` method. - ''' + """ # embeddings will be computed based on the string in this attribute of Example object field_to_vectorize = 'text_to_vectorize' @@ -29,12 +28,11 @@ def _extract_text_from_examples(self, inp_examples: List) -> List[str]: class SentenceTransformersVectorizer(BaseSentenceVectorizer): - ''' - Vectorizer based on `SentenceTransformers` models. You can pick any model from this link: + """Vectorizer based on `SentenceTransformers` models. You can pick any model from this link: https://huggingface.co/models?library=sentence-transformers More details about models: https://www.sbert.net/docs/pretrained_models.html - ''' + """ def __init__( self, model_name_or_path: str = 'all-MiniLM-L6-v2', @@ -93,10 +91,9 @@ def __call__(self, inp_examples: List) -> np.ndarray: class NaiveGetFieldVectorizer(BaseSentenceVectorizer): - ''' - If embeddings were precomputed, then we could just extract them from the proper field + """If embeddings were precomputed, then we could just extract them from the proper field (set by `field_with_embedding`) from each `Example`. - ''' + """ def __init__(self, field_with_embedding: str = 'vectorized'): self.field_with_embedding = field_with_embedding @@ -110,12 +107,11 @@ def __call__(self, inp_examples: List["Example"]) -> np.ndarray: class CohereVectorizer(BaseSentenceVectorizer): - ''' - This vectorizer uses the Cohere API to convert texts to embeddings. + """This vectorizer uses the Cohere API to convert texts to embeddings. More about the available models: https://docs.cohere.com/reference/embed `api_key` should be passed as an argument and can be retrieved from https://dashboard.cohere.com/api-keys - ''' + """ def __init__( self, api_key: str, @@ -160,11 +156,10 @@ def __call__(self, inp_examples: List["Example"]) -> np.ndarray: class OpenAIVectorizer(BaseSentenceVectorizer): - ''' - This vectorizer uses OpenAI API to convert texts to embeddings. Changing `model` is not + """This vectorizer uses OpenAI API to convert texts to embeddings. Changing `model` is not recommended. More about the model: https://openai.com/blog/new-and-improved-embedding-model/ `api_key` should be passed as an argument or as env variable (`OPENAI_API_KEY`). - ''' + """ def __init__( self, model: str = 'text-embedding-ada-002', diff --git a/dsp/primitives/demonstrate.py b/dsp/primitives/demonstrate.py index 7dfe126b53..99a25ee35e 100644 --- a/dsp/primitives/demonstrate.py +++ b/dsp/primitives/demonstrate.py @@ -90,7 +90,6 @@ def sample(train: list[Example], k: int): def all_but(train: list[Example], x: Example) -> list[Example]: """Removes the example x from the train set by comparing the question and history.""" - output = [ y for y in train @@ -127,15 +126,13 @@ def passage_has_answers(passage: str, answers: list[str]) -> bool: def cast_naive_get_only_question_text(inp_example: Example) -> Example: - """ - Extracts question as a field to vectorize with Vectorizer object. `question` field is used. + """Extracts question as a field to vectorize with Vectorizer object. `question` field is used. """ return inp_example.copy(text_to_vectorize=inp_example.question) def cast_naive_get_question_and_answer(inp_example: Example) -> Example: - """ - Extracts question and answer as fields to vectorize with Vectorizer object. + """Extracts question and answer as fields to vectorize with Vectorizer object. `question` and `answer` fields are used. They will be concatenated with the word "Answer" between. """ @@ -150,8 +147,7 @@ def knn( cast: Callable[[Example], Example] = cast_naive_get_only_question_text, **knn_args, ) -> Callable[[Example, int], list[Example]]: - """ - A function that vectorizes train data using `dsm.settings.vectorizer`, then build an ANN/KNN + """A function that vectorizes train data using `dsm.settings.vectorizer`, then build an ANN/KNN index to search similar questions among `train` samples. Args: diff --git a/dsp/primitives/predict.py b/dsp/primitives/predict.py index 41edf6c51c..5aa3420f41 100644 --- a/dsp/primitives/predict.py +++ b/dsp/primitives/predict.py @@ -199,7 +199,6 @@ def majority( def majority_vote_(completions: Completions, normalize: bool, prediction_field: str): """Core logic for majority vote.""" - if not dsp.settings.lm: raise AssertionError("No LM is loaded.") diff --git a/dsp/utils/ann_utils.py b/dsp/utils/ann_utils.py index dcd3f09ce1..ea0eff94c1 100644 --- a/dsp/utils/ann_utils.py +++ b/dsp/utils/ann_utils.py @@ -11,8 +11,7 @@ def determine_devices(max_gpu_devices: int = 0) -> Tuple[int, bool]: - """ - Determine which device we should use + """Determine which device we should use Args: max_gpu_devices: an integer value, define how many GPUs we'll use. -1 means all devices. 0 means there are no GPUs. Default is 0. @@ -87,8 +86,7 @@ def create_faiss_index( in_list_dist_type: str = 'L2', centroid_dist_type: str = 'L2', ) -> Index: - """ - Create IVF index (with IP or L2 dist), without adding data and training + """Create IVF index (with IP or L2 dist), without adding data and training Args: emb_dim: size of each embedding n_objects: size of a trainset for index. Used to determine optimal type diff --git a/dsp/utils/dpr.py b/dsp/utils/dpr.py index d4d18f84ff..8eaa231f4c 100644 --- a/dsp/utils/dpr.py +++ b/dsp/utils/dpr.py @@ -1,7 +1,6 @@ -""" - Source: DPR Implementation from Facebook Research - https://github.com/facebookresearch/DPR/tree/master/dpr - Original license: https://github.com/facebookresearch/DPR/blob/main/LICENSE +"""Source: DPR Implementation from Facebook Research +https://github.com/facebookresearch/DPR/tree/master/dpr +Original license: https://github.com/facebookresearch/DPR/blob/main/LICENSE """ import unicodedata @@ -146,9 +145,8 @@ class SimpleTokenizer(Tokenizer): NON_WS = r'[^\p{Z}\p{C}]' def __init__(self, **kwargs): - """ - Args: - annotators: None or empty set (only tokenizes). + """Args: + annotators: None or empty set (only tokenizes). """ self._regexp = regex.compile( '(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS), @@ -195,8 +193,7 @@ def has_answer(tokenized_answers, text): def locate_answers(tokenized_answers, text): - """ - Returns each occurrence of an answer as (offset, endpos) in terms of *characters*. + """Returns each occurrence of an answer as (offset, endpos) in terms of *characters*. """ tokenized_text = DPR_tokenize(text) occurrences = [] diff --git a/dsp/utils/settings.py b/dsp/utils/settings.py index 1de0ada7ec..a3e1e91c7b 100644 --- a/dsp/utils/settings.py +++ b/dsp/utils/settings.py @@ -10,10 +10,8 @@ class Settings: _instance = None def __new__(cls): + """Singleton Pattern. See https://python-patterns.guide/gang-of-four/singleton/ """ - Singleton Pattern. See https://python-patterns.guide/gang-of-four/singleton/ - """ - if cls._instance is None: cls._instance = super().__new__(cls) cls._instance.lock = threading.Lock() diff --git a/dsp/utils/utils.py b/dsp/utils/utils.py index d5a30b0e39..69e8d812af 100644 --- a/dsp/utils/utils.py +++ b/dsp/utils/utils.py @@ -47,10 +47,8 @@ def create_directory(path): def deduplicate(seq: list[str]) -> list[str]: + """Source: https://stackoverflow.com/a/480227/1493011 """ - Source: https://stackoverflow.com/a/480227/1493011 - """ - seen = set() return [x for x in seq if not (x in seen or seen.add(x))] @@ -121,11 +119,9 @@ def flatten(L): def zipstar(L, lazy=False): - """ - A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...]) + """A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...]) May return lists or tuples. """ - if len(L) == 0: return L @@ -167,10 +163,8 @@ def groupby_first_item(lst): def process_grouped_by_first_item(lst): + """Requires items in list to already be grouped by first item. """ - Requires items in list to already be grouped by first item. - """ - groups = defaultdict(list) started = False @@ -194,12 +188,10 @@ def process_grouped_by_first_item(lst): def grouper(iterable, n, fillvalue=None): + """Collect data into fixed-length chunks or blocks + Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + Source: https://docs.python.org/3/library/itertools.html#itertools-recipes """ - Collect data into fixed-length chunks or blocks - Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" - Source: https://docs.python.org/3/library/itertools.html#itertools-recipes - """ - args = [iter(iterable)] * n return itertools.zip_longest(*args, fillvalue=fillvalue) diff --git a/dspy/datasets/dataset.py b/dspy/datasets/dataset.py index c66c5edc23..69225d3b43 100644 --- a/dspy/datasets/dataset.py +++ b/dspy/datasets/dataset.py @@ -56,11 +56,9 @@ def test(self): return self._test_ def _shuffle_and_sample(self, split, data, size, seed=0): - ''' - The setting (seed=s, size=N) is always a subset - of the setting (seed=s, size=M) for N < M. - ''' - + """The setting (seed=s, size=N) is always a subset + of the setting (seed=s, size=M) for N < M. + """ data = list(data) # Shuffle the data irrespective of the requested size. diff --git a/dspy/predict/aggregation.py b/dspy/predict/aggregation.py index 4cb6df3f91..3f73ed020a 100644 --- a/dspy/predict/aggregation.py +++ b/dspy/predict/aggregation.py @@ -5,12 +5,10 @@ def majority(prediction_or_completions, normalize=default_normalize, field=None): + """Returns the most common completion for the target field (or the last field) in the signature. + When normalize returns None, that completion is ignored. + In case of a tie, earlier completion are prioritized. """ - Returns the most common completion for the target field (or the last field) in the signature. - When normalize returns None, that completion is ignored. - In case of a tie, earlier completion are prioritized. - """ - assert any(isinstance(prediction_or_completions, t) for t in [Prediction, Completions, list]) input_type = type(prediction_or_completions) diff --git a/dspy/predict/langchain.py b/dspy/predict/langchain.py index 86d439f50c..9cc00547cd 100644 --- a/dspy/predict/langchain.py +++ b/dspy/predict/langchain.py @@ -17,7 +17,8 @@ class Template2Signature(dspy.Signature): """You are a processor for prompts. I will give you a prompt template (Python f-string) for an arbitrary task for other LMs. -Your job is to prepare three modular pieces: (i) any essential task instructions or guidelines, (ii) a list of variable names for inputs, (iv) the variable name for output.""" + Your job is to prepare three modular pieces: (i) any essential task instructions or guidelines, (ii) a list of variable names for inputs, (iv) the variable name for output. + """ template = dspy.InputField(format=lambda x: f"```\n\n{x.strip()}\n\n```\n\nLet's now prepare three modular pieces.") essential_instructions = dspy.OutputField() diff --git a/dspy/primitives/assertions.py b/dspy/primitives/assertions.py index 19f1aea3a1..aba9c24921 100644 --- a/dspy/primitives/assertions.py +++ b/dspy/primitives/assertions.py @@ -326,8 +326,7 @@ def forward(self, *args, **kwargs): def assert_transform_module( module, assertion_handler=default_assertion_handler, **handler_args, ): - """ - Transform a module to handle assertions. + """Transform a module to handle assertions. """ if not getattr(module, "forward", False): raise ValueError( diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index bee80338f5..9c22c799cb 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -8,10 +8,8 @@ def __init__(self): pass def named_parameters(self): + """Unlike PyTorch, handles (non-recursive) lists of parameters too. """ - Unlike PyTorch, handles (non-recursive) lists of parameters too. - """ - from dspy.predict.parameter import Parameter visited = set() diff --git a/dspy/primitives/program.py b/dspy/primitives/program.py index aa499d9085..de286127fa 100644 --- a/dspy/primitives/program.py +++ b/dspy/primitives/program.py @@ -55,8 +55,7 @@ def map_named_predictors(self, func): return self def activate_assertions(self, handler=backtrack_handler, **handler_args): - """ - Activates assertions for the module. + """Activates assertions for the module. The default handler is the backtrack_handler. """ assert_transform_module(self, handler, **handler_args) diff --git a/dspy/primitives/python_interpreter.py b/dspy/primitives/python_interpreter.py index 1b7456c7b0..8264267f10 100644 --- a/dspy/primitives/python_interpreter.py +++ b/dspy/primitives/python_interpreter.py @@ -107,7 +107,7 @@ def __init__(self, action_space: Dict[str, Any], def execute(self, code: str, state: Optional[Dict[str, Any]] = None, fuzz_state: Optional[Dict[str, Any]] = None, keep_state: bool = True) -> Any: - r""" Execute the input python codes in a security environment. + r"""Execute the input python codes in a security environment. Args: code (str): Generated python code to be executed. @@ -585,7 +585,7 @@ def execute( represents the value of the last statement (excluding "import") in the code. This value could potentially be the desired result of the LLM-generated code. - """ + """ # NOTE: Only supports Python code for now. if not interpreter: interpreter = PythonInterpreter(action_space=globals()) diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index 07ef407d1d..f2f5aba206 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -1,5 +1,4 @@ -""" -Retriever model for chromadb +"""Retriever model for chromadb """ from typing import List, Optional, Union @@ -35,8 +34,7 @@ class ChromadbRM(dspy.Retrieve): - """ - A retrieval module that uses chromadb to return the top passages for a given query. + """A retrieval module that uses chromadb to return the top passages for a given query. Assumes that the chromadb index has been created and populated with the following metadata: - documents: The text of the passage @@ -94,7 +92,6 @@ def _init_chromadb( Returns: """ - self._chromadb_client = chromadb.Client( Settings( persist_directory=persist_directory, diff --git a/dspy/retrieve/clarifai_rm.py b/dspy/retrieve/clarifai_rm.py index 654234d2cf..3fc132502e 100644 --- a/dspy/retrieve/clarifai_rm.py +++ b/dspy/retrieve/clarifai_rm.py @@ -17,8 +17,7 @@ class ClarifaiRM(dspy.Retrieve): - """ - Retrieval module uses clarifai to return the Top K relevant pasages for the given query. + """Retrieval module uses clarifai to return the Top K relevant pasages for the given query. Assuming that you have ingested the source documents into clarifai App, where it is indexed and stored. Args: @@ -60,6 +59,7 @@ def forward( self, query_or_queries: Union[str, List[str]], k: Optional[int] = None, ) -> dspy.Prediction: """Uses clarifai-python SDK search function and retrieves top_k similar passages for given query, + Args: query_or_queries : single query or list of queries k : Top K relevant documents to return diff --git a/dspy/retrieve/databricks_rm.py b/dspy/retrieve/databricks_rm.py index c275bdddc2..5fea160c78 100644 --- a/dspy/retrieve/databricks_rm.py +++ b/dspy/retrieve/databricks_rm.py @@ -9,8 +9,7 @@ class DatabricksRM(dspy.Retrieve): - """ - A retrieval module that uses Databricks Vector Search Endpoint to return the top-k embeddings for a given query. + """A retrieval module that uses Databricks Vector Search Endpoint to return the top-k embeddings for a given query. Args: databricks_index_name (str): Databricks vector search index to query diff --git a/dspy/retrieve/deeplake_rm.py b/dspy/retrieve/deeplake_rm.py index 235108a912..176cda1fc6 100644 --- a/dspy/retrieve/deeplake_rm.py +++ b/dspy/retrieve/deeplake_rm.py @@ -1,5 +1,4 @@ -""" -Retriever model for deeplake +"""Retriever model for deeplake """ from collections import defaultdict @@ -23,9 +22,7 @@ class DeeplakeRM(dspy.Retrieve): - - """ - A retriever module that uses deeplake to return the top passages for a given query. + """A retriever module that uses deeplake to return the top passages for a given query. Assumes that a Deep Lake Vector Store has been created and populated with the following payload: - text: The text of the passage @@ -81,7 +78,6 @@ def embedding_function(self, texts, model="text-embedding-ada-002"): def forward( self, query_or_queries: Union[str, List[str]], k: Optional[int], ) -> dspy.Prediction: - """Search with DeepLake for self.k top passages for query Args: diff --git a/dspy/retrieve/marqo_rm.py b/dspy/retrieve/marqo_rm.py index 29c52fdb46..5ec932e6c9 100644 --- a/dspy/retrieve/marqo_rm.py +++ b/dspy/retrieve/marqo_rm.py @@ -12,8 +12,7 @@ ) class MarqoRM(dspy.Retrieve): - """ - A retrieval module that uses Marqo to return the top passages for a given query. + """A retrieval module that uses Marqo to return the top passages for a given query. Assumes that a Marqo index has been created and populated with the following payload: - document: The text of the passage diff --git a/dspy/retrieve/pgvector_rm.py b/dspy/retrieve/pgvector_rm.py index cf1773f171..295461b5af 100644 --- a/dspy/retrieve/pgvector_rm.py +++ b/dspy/retrieve/pgvector_rm.py @@ -15,8 +15,7 @@ class PgVectorRM(dspy.Retrieve): - """ - Implements a retriever that (as the name suggests) uses pgvector to retrieve passages, + """Implements a retriever that (as the name suggests) uses pgvector to retrieve passages, using a raw SQL query and a postgresql connection managed by psycopg2. It needs to register the pgvector extension with the psycopg2 connection @@ -65,8 +64,7 @@ def __init__( embedding_field: str = "embedding", fields: List[str] = ['text'], ): - """ - k = 20 is the number of paragraphs to retrieve + """K = 20 is the number of paragraphs to retrieve """ self.openai_client = openai_client diff --git a/dspy/retrieve/pinecone_rm.py b/dspy/retrieve/pinecone_rm.py index 0328bc6ba1..70589a0f69 100644 --- a/dspy/retrieve/pinecone_rm.py +++ b/dspy/retrieve/pinecone_rm.py @@ -1,5 +1,4 @@ -""" -Retriever model for Pinecone +"""Retriever model for Pinecone Author: Dhar Rawal (@drawal1) """ @@ -34,8 +33,7 @@ ERRORS = (openai.RateLimitError, openai.APIError) class PineconeRM(dspy.Retrieve): - """ - A retrieval module that uses Pinecone to return the top passages for a given query. + """A retrieval module that uses Pinecone to return the top passages for a given query. Assumes that the Pinecone index has been created and populated with the following metadata: - text: The text of the passage @@ -135,7 +133,6 @@ def _init_pinecone( Returns: pinecone.Index: The loaded index. """ - # Pinecone init overrides default if kwargs are present, so we need to exclude if None kwargs = {} if api_key: diff --git a/dspy/retrieve/qdrant_rm.py b/dspy/retrieve/qdrant_rm.py index 5c2af050b9..7ddaa47b79 100644 --- a/dspy/retrieve/qdrant_rm.py +++ b/dspy/retrieve/qdrant_rm.py @@ -14,8 +14,7 @@ class QdrantRM(dspy.Retrieve): - """ - A retrieval module that uses Qdrant to return the top passages for a given query. + """A retrieval module that uses Qdrant to return the top passages for a given query. Assumes that a Qdrant collection has been created and populated with the following payload: - document: The text of the passage @@ -59,6 +58,7 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. k (Optional[int]): The number of top passages to retrieve. Defaults to self.k. + Returns: dspy.Prediction: An object containing the retrieved passages. """ diff --git a/dspy/retrieve/vectara_rm.py b/dspy/retrieve/vectara_rm.py index 047c70d6c9..8899935204 100644 --- a/dspy/retrieve/vectara_rm.py +++ b/dspy/retrieve/vectara_rm.py @@ -15,8 +15,7 @@ def remove_snippet(s: str) -> str: return s.replace(START_SNIPPET, "").replace(END_SNIPPET, "") class VectaraRM(dspy.Retrieve): - """ - A retrieval module that uses Vectara to return the top passages for a given query. + """A retrieval module that uses Vectara to return the top passages for a given query. Assumes that a Vectara corpus has been created and populated with the following payload: - document: The text of the passage @@ -70,6 +69,7 @@ def _vectara_query( limit: int = 3, ) -> List[str]: """Query Vectara index to get for top k matching passages. + Args: query: query string """ @@ -135,6 +135,7 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. k (Optional[int]): The number of top passages to retrieve. Defaults to self.k. + Returns: dspy.Prediction: An object containing the retrieved passages. """ diff --git a/dspy/retrieve/weaviate_rm.py b/dspy/retrieve/weaviate_rm.py index 61e8d1ef06..4951032aad 100644 --- a/dspy/retrieve/weaviate_rm.py +++ b/dspy/retrieve/weaviate_rm.py @@ -12,8 +12,7 @@ class WeaviateRM(dspy.Retrieve): - """ - A retrieval module that uses Weaviate to return the top passages for a given query. + """A retrieval module that uses Weaviate to return the top passages for a given query. Assumes that a Weaviate collection has been created and populated with the following payload: - content: The text of the passage @@ -59,10 +58,10 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. k (Optional[int]): The number of top passages to retrieve. Defaults to self.k. + Returns: dspy.Prediction: An object containing the retrieved passages. """ - k = k if k is not None else self.k queries = ( [query_or_queries] diff --git a/dspy/retrieve/weaviate_rm_test.py b/dspy/retrieve/weaviate_rm_test.py index fa2c97237c..d638639b27 100644 --- a/dspy/retrieve/weaviate_rm_test.py +++ b/dspy/retrieve/weaviate_rm_test.py @@ -7,8 +7,7 @@ # Connect DSPy # Test this API -""" -from dspy.retrieve.weaviate_rm import WeaviateRM +"""from dspy.retrieve.weaviate_rm import WeaviateRM retriever_model = WeaviateRM("WeaviateBlogChunk", weaviate_client=weaviate_client) dspy.settings.configure(rm=retriever_model) diff --git a/dspy/retrieve/you_rm.py b/dspy/retrieve/you_rm.py index 25a62ccea8..8534a3645f 100644 --- a/dspy/retrieve/you_rm.py +++ b/dspy/retrieve/you_rm.py @@ -27,7 +27,6 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int] = No Returns: dspy.Prediction: An object containing the retrieved passages. """ - k = k if k is not None else self.k queries = ( diff --git a/dspy/teleprompt/ensemble.py b/dspy/teleprompt/ensemble.py index 5e0db9bcac..7ed13deb49 100644 --- a/dspy/teleprompt/ensemble.py +++ b/dspy/teleprompt/ensemble.py @@ -9,7 +9,6 @@ class Ensemble(Teleprompter): def __init__(self, *, reduce_fn=None, size=None, deterministic=False): """A common reduce_fn is dspy.majority.""" - assert deterministic is False, "TODO: Implement example hashing for deterministic ensemble." self.reduce_fn = reduce_fn diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index d71aa69a7d..47789ae9f1 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -41,7 +41,8 @@ class BasicGenerateInstruction(Signature): class GenerateInstructionGivenAttempts(dspy.Signature): """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. -Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" + Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative. + """ attempted_instructions = dspy.InputField(format=dsp.passages2text) proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") @@ -101,7 +102,7 @@ def _print_signature(self, predictor): def compile(self, student, *, devset, eval_kwargs): - """student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" + """Student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" module = student.deepcopy() evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) total_calls = 0 diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index e316462c94..87fc9ad4e3 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -58,7 +58,8 @@ class BasicGenerateInstructionWithDataObservations(Signature): class BasicGenerateInstructionWithExamples(dspy.Signature): ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. -Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""") + Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative. + """) # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") basic_instruction = dspy.InputField(desc="The initial instructions before optimization") # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") @@ -69,7 +70,8 @@ class BasicGenerateInstructionWithExamples(dspy.Signature): class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. -Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""") + Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative. + """) basic_instruction = dspy.InputField(desc="The initial instructions before optimization") observations = dspy.InputField(desc="Observations about the dataset and task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") diff --git a/testing/tasks/biodex.py b/testing/tasks/biodex.py index 6e51745b37..79d4bb84db 100644 --- a/testing/tasks/biodex.py +++ b/testing/tasks/biodex.py @@ -365,10 +365,8 @@ def base_ground(reaction, K): @lru_cache(maxsize=100000) def ground_v1(reaction, K=3): + """Prefers exact matches over fuzzy matches, when available. """ - Prefers exact matches over fuzzy matches, when available. - """ - exact_matches, fuzzy_matches = base_ground(reaction, K) matches = exact_matches[:1] or fuzzy_matches @@ -379,10 +377,8 @@ def ground_v1(reaction, K=3): @lru_cache(maxsize=100000) def ground_v2(reaction, K=1): + """When K=1 (default), returns exact matches (if available) or the best fuzzy match. """ - When K=1 (default), returns exact matches (if available) or the best fuzzy match. - """ - exact_matches, fuzzy_matches = base_ground(reaction, K) matches = [(match.score / 100.0, match.node.term) for match in (exact_matches[:1] + fuzzy_matches)] @@ -395,8 +391,7 @@ def ground_v2(reaction, K=1): @lru_cache(maxsize=100000) def ground_v4(reaction, K=3): - """ - Returns the best three matches (including one exact match, if available) and applies a prior. + """Returns the best three matches (including one exact match, if available) and applies a prior. """ exact_matches, fuzzy_matches = base_ground(reaction, K) @@ -408,10 +403,8 @@ def ground_v4(reaction, K=3): @lru_cache(maxsize=100000) def ground_v4b(reaction, K=3): + """Returns the best three matches (including one exact match, if available) and applies a prior. """ - Returns the best three matches (including one exact match, if available) and applies a prior. - """ - exact_matches, fuzzy_matches = base_ground(reaction, K) matches = [((match.score / 100.0) * math.log(match.node.count + 0.1), match.node.term) @@ -422,10 +415,8 @@ def ground_v4b(reaction, K=3): @lru_cache(maxsize=100000) def ground_v4c(reaction, K=3): + """Returns the best three matches (including one exact match, if available) and applies a prior. """ - Returns the best three matches (including one exact match, if available) and applies a prior. - """ - exact_matches, fuzzy_matches = base_ground(reaction, K) matches = [((match.score / 100.0) * (match.node.count + 0.1), match.node.term) From 79e3187a9f9b15f431b49bbb1691127d990a1b3d Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 13:05:46 -0500 Subject: [PATCH 185/243] update --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 713466775c..2516f5d987 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ ] [project.optional-dependencies] -anthropic = ["anthropic~=0.18.0"] +anthropic = ["anthropic~=0.18.0"], chromadb = ["chromadb~=0.4.14"] qdrant = ["qdrant-client~=1.6.2", "fastembed~=0.1.0"] marqo = ["marqo"] From 275f133dc9b8c30eb327afd584487f0114cbaf13 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 13:06:35 -0500 Subject: [PATCH 186/243] Revert "run ruff linter" This reverts commit 829e573f7f5c97d9bd381ffc2b8115564071b975. --- dsp/modules/__init__.py | 2 +- dsp/modules/anthropic.py | 12 ++++++----- dsp/modules/aws_lm.py | 11 +++++++--- dsp/modules/azure_openai.py | 1 + dsp/modules/azurecognitivesearch.py | 10 +++++---- dsp/modules/clarifai.py | 1 - dsp/modules/cohere.py | 5 +++-- dsp/modules/finetuning/finetune_hf.py | 6 ++++-- dsp/modules/google.py | 5 +++-- dsp/modules/gpt3.py | 1 + dsp/modules/hf.py | 3 ++- dsp/modules/ollama.py | 1 + dsp/modules/pyserini.py | 25 +++++++++++++---------- dsp/modules/sentence_vectorizer.py | 25 ++++++++++++++--------- dsp/primitives/demonstrate.py | 10 ++++++--- dsp/primitives/predict.py | 1 + dsp/utils/ann_utils.py | 6 ++++-- dsp/utils/dpr.py | 15 ++++++++------ dsp/utils/settings.py | 4 +++- dsp/utils/utils.py | 20 ++++++++++++------ dspy/datasets/dataset.py | 8 +++++--- dspy/predict/aggregation.py | 8 +++++--- dspy/predict/langchain.py | 3 +-- dspy/primitives/assertions.py | 3 ++- dspy/primitives/module.py | 4 +++- dspy/primitives/program.py | 3 ++- dspy/primitives/python_interpreter.py | 4 ++-- dspy/retrieve/chromadb_rm.py | 7 +++++-- dspy/retrieve/clarifai_rm.py | 4 ++-- dspy/retrieve/databricks_rm.py | 3 ++- dspy/retrieve/deeplake_rm.py | 8 ++++++-- dspy/retrieve/marqo_rm.py | 3 ++- dspy/retrieve/pgvector_rm.py | 6 ++++-- dspy/retrieve/pinecone_rm.py | 7 +++++-- dspy/retrieve/qdrant_rm.py | 4 ++-- dspy/retrieve/vectara_rm.py | 5 ++--- dspy/retrieve/weaviate_rm.py | 5 +++-- dspy/retrieve/weaviate_rm_test.py | 3 ++- dspy/retrieve/you_rm.py | 1 + dspy/teleprompt/ensemble.py | 1 + dspy/teleprompt/signature_opt.py | 5 ++--- dspy/teleprompt/signature_opt_bayesian.py | 6 ++---- testing/tasks/biodex.py | 19 ++++++++++++----- 43 files changed, 179 insertions(+), 105 deletions(-) diff --git a/dsp/modules/__init__.py b/dsp/modules/__init__.py index fdf59eabc8..06e1d6f074 100644 --- a/dsp/modules/__init__.py +++ b/dsp/modules/__init__.py @@ -1,4 +1,3 @@ -from .anthropic import Claude from .azure_openai import AzureOpenAI from .bedrock import * from .cache_utils import * @@ -14,3 +13,4 @@ from .pyserini import * from .sbert import * from .sentence_vectorizer import * +from .anthropic import Claude diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 1e26175513..99c9f225a6 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -1,11 +1,12 @@ -import logging import os -from typing import Any, Optional - import backoff +import json +from typing import Optional, Any from anthropic import Anthropic, RateLimitError from dsp.modules.lm import LM +import logging + logger = logging.getLogger(__name__) @@ -22,7 +23,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """Wrapper function that decides when to give up on retry""" + """wrapper function that decides when to give up on retry""" if "rate limits" in details.message: return False return True @@ -35,7 +36,7 @@ def __init__( model: str = "claude-instant-1.2", api_key: Optional[str] = None, api_base: Optional[str] = None, - **kwargs, + **kwargs ): super().__init__(model) self.provider = "anthropic" @@ -104,6 +105,7 @@ def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): Returns: list[str]: list of completion choices """ + assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/aws_lm.py b/dsp/modules/aws_lm.py index ee20e2c9a5..a44e30a492 100644 --- a/dsp/modules/aws_lm.py +++ b/dsp/modules/aws_lm.py @@ -1,4 +1,5 @@ -"""A generalized AWS LLM. +""" +A generalized AWS LLM. """ from __future__ import annotations @@ -16,7 +17,8 @@ class AWSLM(LM): - """This class adds support for an AWS model + """ + This class adds support for an AWS model """ def __init__( @@ -32,6 +34,7 @@ def __init__( """_summary_ Args: + service_name (str): Used in context of invoking the boto3 API. region_name (str, optional): The AWS region where this LM is hosted. model (str, optional): An LM name, e.g., a bedrock name or an AWS endpoint. @@ -98,6 +101,7 @@ def _simple_api_call(self, formatted_prompt: str, **kwargs) -> str | list[str]: def basic_request(self, prompt, **kwargs) -> str | list[str]: """Query the endpoint.""" + # Remove any texts that are too long formatted_prompt: str if self._truncate_long_prompt_prompts: @@ -162,7 +166,8 @@ def __call__( return_sorted: bool = False, **kwargs, ) -> list[str]: - """Query the AWS LLM. + """ + Query the AWS LLM. There is only support for only_completed=True and return_sorted=False right now. diff --git a/dsp/modules/azure_openai.py b/dsp/modules/azure_openai.py index b9c9843954..c90f634e4f 100644 --- a/dsp/modules/azure_openai.py +++ b/dsp/modules/azure_openai.py @@ -193,6 +193,7 @@ def __call__( Returns: list[dict[str, Any]]: list of completion choices """ + assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/azurecognitivesearch.py b/dsp/modules/azurecognitivesearch.py index 232cd24b52..fedc59b015 100644 --- a/dsp/modules/azurecognitivesearch.py +++ b/dsp/modules/azurecognitivesearch.py @@ -44,16 +44,18 @@ def __call__(self, query: str, k: int = 10) -> Union[list[str], list[dotdict]]: return [dotdict(psg) for psg in topk] def azure_search_request(key_content: str, key_score: str, client: SearchClient, query: str, top: int =1): - """Search in Azure Cognitive Search Index - """ + ''' + Search in Azure Cognitive Search Index + ''' results = client.search(search_text=query,top=top) results = process_azure_result(results, key_content, key_content) return results def process_azure_result(results:SearchItemPaged, content_key:str, content_score: str): - """Process received result from Azure Cognitive Search as dictionary array and map content and score to correct format - """ + ''' + process received result from Azure Cognitive Search as dictionary array and map content and score to correct format + ''' res = [] for result in results: tmp = {} diff --git a/dsp/modules/clarifai.py b/dsp/modules/clarifai.py index 3afc197793..2d5839c62b 100644 --- a/dsp/modules/clarifai.py +++ b/dsp/modules/clarifai.py @@ -11,7 +11,6 @@ class ClarifaiLLM(LM): model (str, optional): Clarifai URL of the model. Defaults to "Mistral-7B-Instruct". api_key (Optional[str], optional): CLARIFAI_PAT token. Defaults to None. **kwargs: Additional arguments to pass to the API provider. - Example: import dspy dspy.configure(lm=dspy.Clarifai(model=MODEL_URL, diff --git a/dsp/modules/cohere.py b/dsp/modules/cohere.py index 827ece89e8..7308d59355 100644 --- a/dsp/modules/cohere.py +++ b/dsp/modules/cohere.py @@ -23,7 +23,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """Wrapper function that decides when to give up on retry""" + """wrapper function that decides when to give up on retry""" if "rate limits" in details.message: return False return True @@ -42,7 +42,8 @@ def __init__( stop_sequences: list[str] = [], **kwargs, ): - """Parameters + """ + Parameters ---------- model : str Which pre-trained model from Cohere to use? diff --git a/dsp/modules/finetuning/finetune_hf.py b/dsp/modules/finetuning/finetune_hf.py index 3ee2d0c500..a22ab10fcb 100644 --- a/dsp/modules/finetuning/finetune_hf.py +++ b/dsp/modules/finetuning/finetune_hf.py @@ -229,7 +229,8 @@ def _train_seq2seq(model, tokenizer, tokenized_dataset, metric, config): def smart_tokenizer_and_embedding_resize(special_tokens_dict, tokenizer, model): - """Resize tokenizer and embedding. + """ + Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64. """ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) @@ -248,7 +249,8 @@ def smart_tokenizer_and_embedding_resize(special_tokens_dict, tokenizer, model): @dataclass class DataCollatorForSupervisedDataset: - """Collate examples for supervised fine-tuning. + """ + Collate examples for supervised fine-tuning. """ tokenizer: PreTrainedTokenizer diff --git a/dsp/modules/google.py b/dsp/modules/google.py index 902f46d276..5d97534ea8 100644 --- a/dsp/modules/google.py +++ b/dsp/modules/google.py @@ -25,7 +25,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """Wrapper function that decides when to give up on retry""" + """wrapper function that decides when to give up on retry""" if "rate limits" in details.message: return False return True @@ -64,7 +64,8 @@ def __init__( safety_settings: Optional[Iterable] = BLOCK_ONLY_HIGH, **kwargs, ): - """Parameters + """ + Parameters ---------- model : str Which pre-trained model from Google to use? diff --git a/dsp/modules/gpt3.py b/dsp/modules/gpt3.py index 2055b0756d..d0b343af3c 100644 --- a/dsp/modules/gpt3.py +++ b/dsp/modules/gpt3.py @@ -173,6 +173,7 @@ def __call__( Returns: list[dict[str, Any]]: list of completion choices """ + assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/hf.py b/dsp/modules/hf.py index 54a0ad75a5..aad0c0e36c 100644 --- a/dsp/modules/hf.py +++ b/dsp/modules/hf.py @@ -28,7 +28,7 @@ def openai_to_hf(**kwargs): class HFModel(LM): def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool = False, hf_device_map: Literal["auto", "balanced", "balanced_low_0", "sequential"] = "auto"): - """Wrapper for Hugging Face models + """wrapper for Hugging Face models Args: model (str): HF model identifier to load and use @@ -37,6 +37,7 @@ def __init__(self, model: str, checkpoint: Optional[str] = None, is_client: bool hf_device_map (str, optional): HF config strategy to load the model. Recommeded to use "auto", which will help loading large models using accelerate. Defaults to "auto". """ + super().__init__(model) self.provider = "hf" self.is_client = is_client diff --git a/dsp/modules/ollama.py b/dsp/modules/ollama.py index a28c4a6466..27304d271e 100644 --- a/dsp/modules/ollama.py +++ b/dsp/modules/ollama.py @@ -164,6 +164,7 @@ def __call__( Returns: list[dict[str, Any]]: list of completion choices """ + assert only_completed, "for now" assert return_sorted is False, "for now" diff --git a/dsp/modules/pyserini.py b/dsp/modules/pyserini.py index fcea328f12..5523a76e77 100644 --- a/dsp/modules/pyserini.py +++ b/dsp/modules/pyserini.py @@ -15,18 +15,21 @@ def __init__(self, dataset: Dataset = None, id_field: str = '_id', text_fields: list[str] = ['text']) -> None: - """Args: - query_encoder (`str`): - Huggingface model to encode queries - index (`str`): - Either a prebuilt index from pyserini or a local path to a faiss index - dataset (`Dataset`): - Only required when using a local faiss index. The dataset should be the one that has been put into the faiss index. - id_field (`str`): - The name of the id field of the dataset used for retrieval. - text_fields (`list[str]`): - A list of the names of the text fields for the dataset used for retrieval. """ + Args: + + query_encoder (`str`): + Huggingface model to encode queries + index (`str`): + Either a prebuilt index from pyserini or a local path to a faiss index + dataset (`Dataset`): + Only required when using a local faiss index. The dataset should be the one that has been put into the faiss index. + id_field (`str`): + The name of the id field of the dataset used for retrieval. + text_fields (`list[str]`): + A list of the names of the text fields for the dataset used for retrieval. + """ + # Keep pyserini as an optional dependency from pyserini.prebuilt_index_info import FAISS_INDEX_INFO, IMPACT_INDEX_INFO, TF_INDEX_INFO from pyserini.search import FaissSearcher diff --git a/dsp/modules/sentence_vectorizer.py b/dsp/modules/sentence_vectorizer.py index 25587e4a0e..b878420f70 100644 --- a/dsp/modules/sentence_vectorizer.py +++ b/dsp/modules/sentence_vectorizer.py @@ -6,11 +6,12 @@ class BaseSentenceVectorizer(abc.ABC): - """Base Class for Vectorizers. The main purpose is to vectorize text (doc/query) + ''' + Base Class for Vectorizers. The main purpose is to vectorize text (doc/query) for ANN/KNN indexes. `__call__` method takes `List[Example]` as a single input, then extracts `field_to_vectorize` from every Example and convert them into embeddings. You can customize extraction logic in the `_extract_text_from_examples` method. - """ + ''' # embeddings will be computed based on the string in this attribute of Example object field_to_vectorize = 'text_to_vectorize' @@ -28,11 +29,12 @@ def _extract_text_from_examples(self, inp_examples: List) -> List[str]: class SentenceTransformersVectorizer(BaseSentenceVectorizer): - """Vectorizer based on `SentenceTransformers` models. You can pick any model from this link: + ''' + Vectorizer based on `SentenceTransformers` models. You can pick any model from this link: https://huggingface.co/models?library=sentence-transformers More details about models: https://www.sbert.net/docs/pretrained_models.html - """ + ''' def __init__( self, model_name_or_path: str = 'all-MiniLM-L6-v2', @@ -91,9 +93,10 @@ def __call__(self, inp_examples: List) -> np.ndarray: class NaiveGetFieldVectorizer(BaseSentenceVectorizer): - """If embeddings were precomputed, then we could just extract them from the proper field + ''' + If embeddings were precomputed, then we could just extract them from the proper field (set by `field_with_embedding`) from each `Example`. - """ + ''' def __init__(self, field_with_embedding: str = 'vectorized'): self.field_with_embedding = field_with_embedding @@ -107,11 +110,12 @@ def __call__(self, inp_examples: List["Example"]) -> np.ndarray: class CohereVectorizer(BaseSentenceVectorizer): - """This vectorizer uses the Cohere API to convert texts to embeddings. + ''' + This vectorizer uses the Cohere API to convert texts to embeddings. More about the available models: https://docs.cohere.com/reference/embed `api_key` should be passed as an argument and can be retrieved from https://dashboard.cohere.com/api-keys - """ + ''' def __init__( self, api_key: str, @@ -156,10 +160,11 @@ def __call__(self, inp_examples: List["Example"]) -> np.ndarray: class OpenAIVectorizer(BaseSentenceVectorizer): - """This vectorizer uses OpenAI API to convert texts to embeddings. Changing `model` is not + ''' + This vectorizer uses OpenAI API to convert texts to embeddings. Changing `model` is not recommended. More about the model: https://openai.com/blog/new-and-improved-embedding-model/ `api_key` should be passed as an argument or as env variable (`OPENAI_API_KEY`). - """ + ''' def __init__( self, model: str = 'text-embedding-ada-002', diff --git a/dsp/primitives/demonstrate.py b/dsp/primitives/demonstrate.py index 99a25ee35e..7dfe126b53 100644 --- a/dsp/primitives/demonstrate.py +++ b/dsp/primitives/demonstrate.py @@ -90,6 +90,7 @@ def sample(train: list[Example], k: int): def all_but(train: list[Example], x: Example) -> list[Example]: """Removes the example x from the train set by comparing the question and history.""" + output = [ y for y in train @@ -126,13 +127,15 @@ def passage_has_answers(passage: str, answers: list[str]) -> bool: def cast_naive_get_only_question_text(inp_example: Example) -> Example: - """Extracts question as a field to vectorize with Vectorizer object. `question` field is used. + """ + Extracts question as a field to vectorize with Vectorizer object. `question` field is used. """ return inp_example.copy(text_to_vectorize=inp_example.question) def cast_naive_get_question_and_answer(inp_example: Example) -> Example: - """Extracts question and answer as fields to vectorize with Vectorizer object. + """ + Extracts question and answer as fields to vectorize with Vectorizer object. `question` and `answer` fields are used. They will be concatenated with the word "Answer" between. """ @@ -147,7 +150,8 @@ def knn( cast: Callable[[Example], Example] = cast_naive_get_only_question_text, **knn_args, ) -> Callable[[Example, int], list[Example]]: - """A function that vectorizes train data using `dsm.settings.vectorizer`, then build an ANN/KNN + """ + A function that vectorizes train data using `dsm.settings.vectorizer`, then build an ANN/KNN index to search similar questions among `train` samples. Args: diff --git a/dsp/primitives/predict.py b/dsp/primitives/predict.py index 5aa3420f41..41edf6c51c 100644 --- a/dsp/primitives/predict.py +++ b/dsp/primitives/predict.py @@ -199,6 +199,7 @@ def majority( def majority_vote_(completions: Completions, normalize: bool, prediction_field: str): """Core logic for majority vote.""" + if not dsp.settings.lm: raise AssertionError("No LM is loaded.") diff --git a/dsp/utils/ann_utils.py b/dsp/utils/ann_utils.py index ea0eff94c1..dcd3f09ce1 100644 --- a/dsp/utils/ann_utils.py +++ b/dsp/utils/ann_utils.py @@ -11,7 +11,8 @@ def determine_devices(max_gpu_devices: int = 0) -> Tuple[int, bool]: - """Determine which device we should use + """ + Determine which device we should use Args: max_gpu_devices: an integer value, define how many GPUs we'll use. -1 means all devices. 0 means there are no GPUs. Default is 0. @@ -86,7 +87,8 @@ def create_faiss_index( in_list_dist_type: str = 'L2', centroid_dist_type: str = 'L2', ) -> Index: - """Create IVF index (with IP or L2 dist), without adding data and training + """ + Create IVF index (with IP or L2 dist), without adding data and training Args: emb_dim: size of each embedding n_objects: size of a trainset for index. Used to determine optimal type diff --git a/dsp/utils/dpr.py b/dsp/utils/dpr.py index 8eaa231f4c..d4d18f84ff 100644 --- a/dsp/utils/dpr.py +++ b/dsp/utils/dpr.py @@ -1,6 +1,7 @@ -"""Source: DPR Implementation from Facebook Research -https://github.com/facebookresearch/DPR/tree/master/dpr -Original license: https://github.com/facebookresearch/DPR/blob/main/LICENSE +""" + Source: DPR Implementation from Facebook Research + https://github.com/facebookresearch/DPR/tree/master/dpr + Original license: https://github.com/facebookresearch/DPR/blob/main/LICENSE """ import unicodedata @@ -145,8 +146,9 @@ class SimpleTokenizer(Tokenizer): NON_WS = r'[^\p{Z}\p{C}]' def __init__(self, **kwargs): - """Args: - annotators: None or empty set (only tokenizes). + """ + Args: + annotators: None or empty set (only tokenizes). """ self._regexp = regex.compile( '(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS), @@ -193,7 +195,8 @@ def has_answer(tokenized_answers, text): def locate_answers(tokenized_answers, text): - """Returns each occurrence of an answer as (offset, endpos) in terms of *characters*. + """ + Returns each occurrence of an answer as (offset, endpos) in terms of *characters*. """ tokenized_text = DPR_tokenize(text) occurrences = [] diff --git a/dsp/utils/settings.py b/dsp/utils/settings.py index a3e1e91c7b..1de0ada7ec 100644 --- a/dsp/utils/settings.py +++ b/dsp/utils/settings.py @@ -10,8 +10,10 @@ class Settings: _instance = None def __new__(cls): - """Singleton Pattern. See https://python-patterns.guide/gang-of-four/singleton/ """ + Singleton Pattern. See https://python-patterns.guide/gang-of-four/singleton/ + """ + if cls._instance is None: cls._instance = super().__new__(cls) cls._instance.lock = threading.Lock() diff --git a/dsp/utils/utils.py b/dsp/utils/utils.py index 69e8d812af..d5a30b0e39 100644 --- a/dsp/utils/utils.py +++ b/dsp/utils/utils.py @@ -47,8 +47,10 @@ def create_directory(path): def deduplicate(seq: list[str]) -> list[str]: - """Source: https://stackoverflow.com/a/480227/1493011 """ + Source: https://stackoverflow.com/a/480227/1493011 + """ + seen = set() return [x for x in seq if not (x in seen or seen.add(x))] @@ -119,9 +121,11 @@ def flatten(L): def zipstar(L, lazy=False): - """A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...]) + """ + A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...]) May return lists or tuples. """ + if len(L) == 0: return L @@ -163,8 +167,10 @@ def groupby_first_item(lst): def process_grouped_by_first_item(lst): - """Requires items in list to already be grouped by first item. """ + Requires items in list to already be grouped by first item. + """ + groups = defaultdict(list) started = False @@ -188,10 +194,12 @@ def process_grouped_by_first_item(lst): def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks - Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" - Source: https://docs.python.org/3/library/itertools.html#itertools-recipes """ + Collect data into fixed-length chunks or blocks + Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" + Source: https://docs.python.org/3/library/itertools.html#itertools-recipes + """ + args = [iter(iterable)] * n return itertools.zip_longest(*args, fillvalue=fillvalue) diff --git a/dspy/datasets/dataset.py b/dspy/datasets/dataset.py index 69225d3b43..c66c5edc23 100644 --- a/dspy/datasets/dataset.py +++ b/dspy/datasets/dataset.py @@ -56,9 +56,11 @@ def test(self): return self._test_ def _shuffle_and_sample(self, split, data, size, seed=0): - """The setting (seed=s, size=N) is always a subset - of the setting (seed=s, size=M) for N < M. - """ + ''' + The setting (seed=s, size=N) is always a subset + of the setting (seed=s, size=M) for N < M. + ''' + data = list(data) # Shuffle the data irrespective of the requested size. diff --git a/dspy/predict/aggregation.py b/dspy/predict/aggregation.py index 3f73ed020a..4cb6df3f91 100644 --- a/dspy/predict/aggregation.py +++ b/dspy/predict/aggregation.py @@ -5,10 +5,12 @@ def majority(prediction_or_completions, normalize=default_normalize, field=None): - """Returns the most common completion for the target field (or the last field) in the signature. - When normalize returns None, that completion is ignored. - In case of a tie, earlier completion are prioritized. """ + Returns the most common completion for the target field (or the last field) in the signature. + When normalize returns None, that completion is ignored. + In case of a tie, earlier completion are prioritized. + """ + assert any(isinstance(prediction_or_completions, t) for t in [Prediction, Completions, list]) input_type = type(prediction_or_completions) diff --git a/dspy/predict/langchain.py b/dspy/predict/langchain.py index 9cc00547cd..86d439f50c 100644 --- a/dspy/predict/langchain.py +++ b/dspy/predict/langchain.py @@ -17,8 +17,7 @@ class Template2Signature(dspy.Signature): """You are a processor for prompts. I will give you a prompt template (Python f-string) for an arbitrary task for other LMs. - Your job is to prepare three modular pieces: (i) any essential task instructions or guidelines, (ii) a list of variable names for inputs, (iv) the variable name for output. - """ +Your job is to prepare three modular pieces: (i) any essential task instructions or guidelines, (ii) a list of variable names for inputs, (iv) the variable name for output.""" template = dspy.InputField(format=lambda x: f"```\n\n{x.strip()}\n\n```\n\nLet's now prepare three modular pieces.") essential_instructions = dspy.OutputField() diff --git a/dspy/primitives/assertions.py b/dspy/primitives/assertions.py index aba9c24921..19f1aea3a1 100644 --- a/dspy/primitives/assertions.py +++ b/dspy/primitives/assertions.py @@ -326,7 +326,8 @@ def forward(self, *args, **kwargs): def assert_transform_module( module, assertion_handler=default_assertion_handler, **handler_args, ): - """Transform a module to handle assertions. + """ + Transform a module to handle assertions. """ if not getattr(module, "forward", False): raise ValueError( diff --git a/dspy/primitives/module.py b/dspy/primitives/module.py index 9c22c799cb..bee80338f5 100644 --- a/dspy/primitives/module.py +++ b/dspy/primitives/module.py @@ -8,8 +8,10 @@ def __init__(self): pass def named_parameters(self): - """Unlike PyTorch, handles (non-recursive) lists of parameters too. """ + Unlike PyTorch, handles (non-recursive) lists of parameters too. + """ + from dspy.predict.parameter import Parameter visited = set() diff --git a/dspy/primitives/program.py b/dspy/primitives/program.py index de286127fa..aa499d9085 100644 --- a/dspy/primitives/program.py +++ b/dspy/primitives/program.py @@ -55,7 +55,8 @@ def map_named_predictors(self, func): return self def activate_assertions(self, handler=backtrack_handler, **handler_args): - """Activates assertions for the module. + """ + Activates assertions for the module. The default handler is the backtrack_handler. """ assert_transform_module(self, handler, **handler_args) diff --git a/dspy/primitives/python_interpreter.py b/dspy/primitives/python_interpreter.py index 8264267f10..1b7456c7b0 100644 --- a/dspy/primitives/python_interpreter.py +++ b/dspy/primitives/python_interpreter.py @@ -107,7 +107,7 @@ def __init__(self, action_space: Dict[str, Any], def execute(self, code: str, state: Optional[Dict[str, Any]] = None, fuzz_state: Optional[Dict[str, Any]] = None, keep_state: bool = True) -> Any: - r"""Execute the input python codes in a security environment. + r""" Execute the input python codes in a security environment. Args: code (str): Generated python code to be executed. @@ -585,7 +585,7 @@ def execute( represents the value of the last statement (excluding "import") in the code. This value could potentially be the desired result of the LLM-generated code. - """ + """ # NOTE: Only supports Python code for now. if not interpreter: interpreter = PythonInterpreter(action_space=globals()) diff --git a/dspy/retrieve/chromadb_rm.py b/dspy/retrieve/chromadb_rm.py index f2f5aba206..07ef407d1d 100644 --- a/dspy/retrieve/chromadb_rm.py +++ b/dspy/retrieve/chromadb_rm.py @@ -1,4 +1,5 @@ -"""Retriever model for chromadb +""" +Retriever model for chromadb """ from typing import List, Optional, Union @@ -34,7 +35,8 @@ class ChromadbRM(dspy.Retrieve): - """A retrieval module that uses chromadb to return the top passages for a given query. + """ + A retrieval module that uses chromadb to return the top passages for a given query. Assumes that the chromadb index has been created and populated with the following metadata: - documents: The text of the passage @@ -92,6 +94,7 @@ def _init_chromadb( Returns: """ + self._chromadb_client = chromadb.Client( Settings( persist_directory=persist_directory, diff --git a/dspy/retrieve/clarifai_rm.py b/dspy/retrieve/clarifai_rm.py index 3fc132502e..654234d2cf 100644 --- a/dspy/retrieve/clarifai_rm.py +++ b/dspy/retrieve/clarifai_rm.py @@ -17,7 +17,8 @@ class ClarifaiRM(dspy.Retrieve): - """Retrieval module uses clarifai to return the Top K relevant pasages for the given query. + """ + Retrieval module uses clarifai to return the Top K relevant pasages for the given query. Assuming that you have ingested the source documents into clarifai App, where it is indexed and stored. Args: @@ -59,7 +60,6 @@ def forward( self, query_or_queries: Union[str, List[str]], k: Optional[int] = None, ) -> dspy.Prediction: """Uses clarifai-python SDK search function and retrieves top_k similar passages for given query, - Args: query_or_queries : single query or list of queries k : Top K relevant documents to return diff --git a/dspy/retrieve/databricks_rm.py b/dspy/retrieve/databricks_rm.py index 5fea160c78..c275bdddc2 100644 --- a/dspy/retrieve/databricks_rm.py +++ b/dspy/retrieve/databricks_rm.py @@ -9,7 +9,8 @@ class DatabricksRM(dspy.Retrieve): - """A retrieval module that uses Databricks Vector Search Endpoint to return the top-k embeddings for a given query. + """ + A retrieval module that uses Databricks Vector Search Endpoint to return the top-k embeddings for a given query. Args: databricks_index_name (str): Databricks vector search index to query diff --git a/dspy/retrieve/deeplake_rm.py b/dspy/retrieve/deeplake_rm.py index 176cda1fc6..235108a912 100644 --- a/dspy/retrieve/deeplake_rm.py +++ b/dspy/retrieve/deeplake_rm.py @@ -1,4 +1,5 @@ -"""Retriever model for deeplake +""" +Retriever model for deeplake """ from collections import defaultdict @@ -22,7 +23,9 @@ class DeeplakeRM(dspy.Retrieve): - """A retriever module that uses deeplake to return the top passages for a given query. + + """ + A retriever module that uses deeplake to return the top passages for a given query. Assumes that a Deep Lake Vector Store has been created and populated with the following payload: - text: The text of the passage @@ -78,6 +81,7 @@ def embedding_function(self, texts, model="text-embedding-ada-002"): def forward( self, query_or_queries: Union[str, List[str]], k: Optional[int], ) -> dspy.Prediction: + """Search with DeepLake for self.k top passages for query Args: diff --git a/dspy/retrieve/marqo_rm.py b/dspy/retrieve/marqo_rm.py index 5ec932e6c9..29c52fdb46 100644 --- a/dspy/retrieve/marqo_rm.py +++ b/dspy/retrieve/marqo_rm.py @@ -12,7 +12,8 @@ ) class MarqoRM(dspy.Retrieve): - """A retrieval module that uses Marqo to return the top passages for a given query. + """ + A retrieval module that uses Marqo to return the top passages for a given query. Assumes that a Marqo index has been created and populated with the following payload: - document: The text of the passage diff --git a/dspy/retrieve/pgvector_rm.py b/dspy/retrieve/pgvector_rm.py index 295461b5af..cf1773f171 100644 --- a/dspy/retrieve/pgvector_rm.py +++ b/dspy/retrieve/pgvector_rm.py @@ -15,7 +15,8 @@ class PgVectorRM(dspy.Retrieve): - """Implements a retriever that (as the name suggests) uses pgvector to retrieve passages, + """ + Implements a retriever that (as the name suggests) uses pgvector to retrieve passages, using a raw SQL query and a postgresql connection managed by psycopg2. It needs to register the pgvector extension with the psycopg2 connection @@ -64,7 +65,8 @@ def __init__( embedding_field: str = "embedding", fields: List[str] = ['text'], ): - """K = 20 is the number of paragraphs to retrieve + """ + k = 20 is the number of paragraphs to retrieve """ self.openai_client = openai_client diff --git a/dspy/retrieve/pinecone_rm.py b/dspy/retrieve/pinecone_rm.py index 70589a0f69..0328bc6ba1 100644 --- a/dspy/retrieve/pinecone_rm.py +++ b/dspy/retrieve/pinecone_rm.py @@ -1,4 +1,5 @@ -"""Retriever model for Pinecone +""" +Retriever model for Pinecone Author: Dhar Rawal (@drawal1) """ @@ -33,7 +34,8 @@ ERRORS = (openai.RateLimitError, openai.APIError) class PineconeRM(dspy.Retrieve): - """A retrieval module that uses Pinecone to return the top passages for a given query. + """ + A retrieval module that uses Pinecone to return the top passages for a given query. Assumes that the Pinecone index has been created and populated with the following metadata: - text: The text of the passage @@ -133,6 +135,7 @@ def _init_pinecone( Returns: pinecone.Index: The loaded index. """ + # Pinecone init overrides default if kwargs are present, so we need to exclude if None kwargs = {} if api_key: diff --git a/dspy/retrieve/qdrant_rm.py b/dspy/retrieve/qdrant_rm.py index 7ddaa47b79..5c2af050b9 100644 --- a/dspy/retrieve/qdrant_rm.py +++ b/dspy/retrieve/qdrant_rm.py @@ -14,7 +14,8 @@ class QdrantRM(dspy.Retrieve): - """A retrieval module that uses Qdrant to return the top passages for a given query. + """ + A retrieval module that uses Qdrant to return the top passages for a given query. Assumes that a Qdrant collection has been created and populated with the following payload: - document: The text of the passage @@ -58,7 +59,6 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. k (Optional[int]): The number of top passages to retrieve. Defaults to self.k. - Returns: dspy.Prediction: An object containing the retrieved passages. """ diff --git a/dspy/retrieve/vectara_rm.py b/dspy/retrieve/vectara_rm.py index 8899935204..047c70d6c9 100644 --- a/dspy/retrieve/vectara_rm.py +++ b/dspy/retrieve/vectara_rm.py @@ -15,7 +15,8 @@ def remove_snippet(s: str) -> str: return s.replace(START_SNIPPET, "").replace(END_SNIPPET, "") class VectaraRM(dspy.Retrieve): - """A retrieval module that uses Vectara to return the top passages for a given query. + """ + A retrieval module that uses Vectara to return the top passages for a given query. Assumes that a Vectara corpus has been created and populated with the following payload: - document: The text of the passage @@ -69,7 +70,6 @@ def _vectara_query( limit: int = 3, ) -> List[str]: """Query Vectara index to get for top k matching passages. - Args: query: query string """ @@ -135,7 +135,6 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. k (Optional[int]): The number of top passages to retrieve. Defaults to self.k. - Returns: dspy.Prediction: An object containing the retrieved passages. """ diff --git a/dspy/retrieve/weaviate_rm.py b/dspy/retrieve/weaviate_rm.py index 4951032aad..61e8d1ef06 100644 --- a/dspy/retrieve/weaviate_rm.py +++ b/dspy/retrieve/weaviate_rm.py @@ -12,7 +12,8 @@ class WeaviateRM(dspy.Retrieve): - """A retrieval module that uses Weaviate to return the top passages for a given query. + """ + A retrieval module that uses Weaviate to return the top passages for a given query. Assumes that a Weaviate collection has been created and populated with the following payload: - content: The text of the passage @@ -58,10 +59,10 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int]) -> Args: query_or_queries (Union[str, List[str]]): The query or queries to search for. k (Optional[int]): The number of top passages to retrieve. Defaults to self.k. - Returns: dspy.Prediction: An object containing the retrieved passages. """ + k = k if k is not None else self.k queries = ( [query_or_queries] diff --git a/dspy/retrieve/weaviate_rm_test.py b/dspy/retrieve/weaviate_rm_test.py index d638639b27..fa2c97237c 100644 --- a/dspy/retrieve/weaviate_rm_test.py +++ b/dspy/retrieve/weaviate_rm_test.py @@ -7,7 +7,8 @@ # Connect DSPy # Test this API -"""from dspy.retrieve.weaviate_rm import WeaviateRM +""" +from dspy.retrieve.weaviate_rm import WeaviateRM retriever_model = WeaviateRM("WeaviateBlogChunk", weaviate_client=weaviate_client) dspy.settings.configure(rm=retriever_model) diff --git a/dspy/retrieve/you_rm.py b/dspy/retrieve/you_rm.py index 8534a3645f..25a62ccea8 100644 --- a/dspy/retrieve/you_rm.py +++ b/dspy/retrieve/you_rm.py @@ -27,6 +27,7 @@ def forward(self, query_or_queries: Union[str, List[str]], k: Optional[int] = No Returns: dspy.Prediction: An object containing the retrieved passages. """ + k = k if k is not None else self.k queries = ( diff --git a/dspy/teleprompt/ensemble.py b/dspy/teleprompt/ensemble.py index 7ed13deb49..5e0db9bcac 100644 --- a/dspy/teleprompt/ensemble.py +++ b/dspy/teleprompt/ensemble.py @@ -9,6 +9,7 @@ class Ensemble(Teleprompter): def __init__(self, *, reduce_fn=None, size=None, deterministic=False): """A common reduce_fn is dspy.majority.""" + assert deterministic is False, "TODO: Implement example hashing for deterministic ensemble." self.reduce_fn = reduce_fn diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index 47789ae9f1..d71aa69a7d 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -41,8 +41,7 @@ class BasicGenerateInstruction(Signature): class GenerateInstructionGivenAttempts(dspy.Signature): """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. - Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative. - """ +Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" attempted_instructions = dspy.InputField(format=dsp.passages2text) proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") @@ -102,7 +101,7 @@ def _print_signature(self, predictor): def compile(self, student, *, devset, eval_kwargs): - """Student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" + """student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" module = student.deepcopy() evaluate = Evaluate(devset=devset, metric=self.metric, **eval_kwargs) total_calls = 0 diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index 87fc9ad4e3..e316462c94 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -58,8 +58,7 @@ class BasicGenerateInstructionWithDataObservations(Signature): class BasicGenerateInstructionWithExamples(dspy.Signature): ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. - Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative. - """) +Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""") # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") basic_instruction = dspy.InputField(desc="The initial instructions before optimization") # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") @@ -70,8 +69,7 @@ class BasicGenerateInstructionWithExamples(dspy.Signature): class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. - Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative. - """) +Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""") basic_instruction = dspy.InputField(desc="The initial instructions before optimization") observations = dspy.InputField(desc="Observations about the dataset and task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") diff --git a/testing/tasks/biodex.py b/testing/tasks/biodex.py index 79d4bb84db..6e51745b37 100644 --- a/testing/tasks/biodex.py +++ b/testing/tasks/biodex.py @@ -365,8 +365,10 @@ def base_ground(reaction, K): @lru_cache(maxsize=100000) def ground_v1(reaction, K=3): - """Prefers exact matches over fuzzy matches, when available. """ + Prefers exact matches over fuzzy matches, when available. + """ + exact_matches, fuzzy_matches = base_ground(reaction, K) matches = exact_matches[:1] or fuzzy_matches @@ -377,8 +379,10 @@ def ground_v1(reaction, K=3): @lru_cache(maxsize=100000) def ground_v2(reaction, K=1): - """When K=1 (default), returns exact matches (if available) or the best fuzzy match. """ + When K=1 (default), returns exact matches (if available) or the best fuzzy match. + """ + exact_matches, fuzzy_matches = base_ground(reaction, K) matches = [(match.score / 100.0, match.node.term) for match in (exact_matches[:1] + fuzzy_matches)] @@ -391,7 +395,8 @@ def ground_v2(reaction, K=1): @lru_cache(maxsize=100000) def ground_v4(reaction, K=3): - """Returns the best three matches (including one exact match, if available) and applies a prior. + """ + Returns the best three matches (including one exact match, if available) and applies a prior. """ exact_matches, fuzzy_matches = base_ground(reaction, K) @@ -403,8 +408,10 @@ def ground_v4(reaction, K=3): @lru_cache(maxsize=100000) def ground_v4b(reaction, K=3): - """Returns the best three matches (including one exact match, if available) and applies a prior. """ + Returns the best three matches (including one exact match, if available) and applies a prior. + """ + exact_matches, fuzzy_matches = base_ground(reaction, K) matches = [((match.score / 100.0) * math.log(match.node.count + 0.1), match.node.term) @@ -415,8 +422,10 @@ def ground_v4b(reaction, K=3): @lru_cache(maxsize=100000) def ground_v4c(reaction, K=3): - """Returns the best three matches (including one exact match, if available) and applies a prior. """ + Returns the best three matches (including one exact match, if available) and applies a prior. + """ + exact_matches, fuzzy_matches = base_ground(reaction, K) matches = [((match.score / 100.0) * (match.node.count + 0.1), match.node.term) From 472b166420fccc8adff053a33c7deaf75ae5cae8 Mon Sep 17 00:00:00 2001 From: Connor Shorten Date: Sat, 9 Mar 2024 13:10:08 -0500 Subject: [PATCH 187/243] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2516f5d987..713466775c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ ] [project.optional-dependencies] -anthropic = ["anthropic~=0.18.0"], +anthropic = ["anthropic~=0.18.0"] chromadb = ["chromadb~=0.4.14"] qdrant = ["qdrant-client~=1.6.2", "fastembed~=0.1.0"] marqo = ["marqo"] From 226a0d94672f03a5d33215c5efce473a1a3419be Mon Sep 17 00:00:00 2001 From: arnavsinghvi11 <54859892+arnavsinghvi11@users.noreply.github.com> Date: Sat, 9 Mar 2024 11:47:35 -0800 Subject: [PATCH 188/243] Update databricks_rm.py -added support for doc_ids --- dspy/retrieve/databricks_rm.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/dspy/retrieve/databricks_rm.py b/dspy/retrieve/databricks_rm.py index c275bdddc2..721a3553f0 100644 --- a/dspy/retrieve/databricks_rm.py +++ b/dspy/retrieve/databricks_rm.py @@ -1,7 +1,7 @@ import os from collections import defaultdict -from typing import List, Union - +from typing import List, Union, Optional +import json import requests import dspy @@ -19,6 +19,8 @@ class DatabricksRM(dspy.Retrieve): columns (list[str]): Column names to include in response filters_json (str, optional): JSON string for query filters k (int, optional): Number of top embeddings to retrieve. Defaults to 3. + docs_id_column_name (str, optional): Column name for retrieved doc_ids to return. + text_column_name (str, optional): Column name for retrieved text to return. Examples: Below is a code snippet that shows how to configure Databricks Vector Search endpoints: @@ -39,7 +41,7 @@ class DatabricksRM(dspy.Retrieve): ) #Creating Vector Search Index using Python SDK - #Example for Direct Vector Acces Index + #Example for Direct Vector Access Index index = client.create_direct_access_index( endpoint_name="your_databricks_host_url", @@ -65,7 +67,7 @@ class DatabricksRM(dspy.Retrieve): self.retrieve = DatabricksRM(query=[1, 2, 3], query_type = 'vector') ``` """ - def __init__(self, databricks_index_name = None, databricks_endpoint = None, databricks_token = None, columns = None, filters_json = None, k = 3): + def __init__(self, databricks_index_name = None, databricks_endpoint = None, databricks_token = None, columns = None, filters_json = None, k = 3, docs_id_column_name = 'id', text_column_name = 'text'): super().__init__(k=k) if not databricks_token and not os.environ.get("DATABRICKS_TOKEN"): raise ValueError("You must supply databricks_token or set environment variable DATABRICKS_TOKEN") @@ -81,6 +83,8 @@ def __init__(self, databricks_index_name = None, databricks_endpoint = None, dat self.columns = columns self.filters_json = filters_json self.k = k + self.docs_id_column_name = docs_id_column_name + self.text_column_name = text_column_name def forward(self, query: Union[str, List[float]], query_type: str = 'vector') -> dspy.Prediction: """Search with Databricks Vector Search Client for self.k top results for query @@ -120,14 +124,20 @@ def forward(self, query: Union[str, List[float]], query_type: str = 'vector') -> results = response.json() docs = defaultdict(float) + doc_ids = [] text, score = None, None for data_row in results["result"]["data_array"]: for col, val in zip(results["manifest"]["columns"], data_row): - if col["name"] == 'text': + if col["name"] == self.docs_id_column_name: + if self.docs_id_column_name == 'metadata': + docs_dict = json.loads(val) + doc_ids.append(str(docs_dict["document_id"])) + else: + doc_ids.append(str(val)) text = val if col["name"] == 'score': score = val docs[text] += score sorted_docs = sorted(docs.items(), key=lambda x: x[1], reverse=True)[:self.k] - return Prediction(docs=[doc for doc, _ in sorted_docs]) + return Prediction(docs=[doc for doc, _ in sorted_docs], doc_ids = doc_ids) From e7549fbf4daac1b352b112f26c77237bb9ad361d Mon Sep 17 00:00:00 2001 From: Arnav Singhvi Date: Sat, 9 Mar 2024 11:50:25 -0800 Subject: [PATCH 189/243] Update databricks_rm.py --- dspy/retrieve/databricks_rm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dspy/retrieve/databricks_rm.py b/dspy/retrieve/databricks_rm.py index 721a3553f0..ecddddb855 100644 --- a/dspy/retrieve/databricks_rm.py +++ b/dspy/retrieve/databricks_rm.py @@ -1,7 +1,8 @@ +import json import os from collections import defaultdict -from typing import List, Union, Optional -import json +from typing import List, Union + import requests import dspy From 61ac2f7950b08f97dee215e112e1bd840759b3ee Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Sat, 9 Mar 2024 14:12:56 -0600 Subject: [PATCH 190/243] Add ruff rule back --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 713466775c..0637ba7faf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -173,6 +173,7 @@ exclude_lines = [ line-length = 120 indent-width = 4 target-version = "py39" +extend-unsafe-fixes = ["D"] [tool.ruff.lint] # List of rules: https://docs.astral.sh/ruff/rules From b8fe312863849140d4aad2b497d99a9adc321efc Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Sat, 9 Mar 2024 13:51:27 -0600 Subject: [PATCH 191/243] Make anthropic import optional for tests --- dsp/modules/anthropic.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 99c9f225a6..859e43665b 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -2,11 +2,16 @@ import backoff import json from typing import Optional, Any -from anthropic import Anthropic, RateLimitError from dsp.modules.lm import LM import logging +try: + import anthropic + anthropic_rate_limit = anthropic.RateLimitError +except ImportError: + anthropic_rate_limit = Exception + logger = logging.getLogger(__name__) @@ -39,6 +44,12 @@ def __init__( **kwargs ): super().__init__(model) + + try: + from anthropic import Anthropic, RateLimitError + except ImportError as err: + raise ImportError("Claude requires `pip install anthropic`.") from err + self.provider = "anthropic" self.api_key = api_key = os.environ.get("ANTHROPIC_API_KEY") if api_key is None else api_key self.api_base = BASE_URL if api_base is None else api_base @@ -84,7 +95,7 @@ def basic_request(self, prompt: str, **kwargs): @backoff.on_exception( backoff.expo, - (RateLimitError), + (anthropic_rate_limit), max_time=1000, max_tries=8, on_backoff=backoff_hdlr, From 89036644d4570ca033590796b8c094febd993fe6 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Sat, 9 Mar 2024 14:14:12 -0600 Subject: [PATCH 192/243] Apply ruff fix --- dsp/modules/__init__.py | 2 +- dsp/modules/anthropic.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dsp/modules/__init__.py b/dsp/modules/__init__.py index 06e1d6f074..fdf59eabc8 100644 --- a/dsp/modules/__init__.py +++ b/dsp/modules/__init__.py @@ -1,3 +1,4 @@ +from .anthropic import Claude from .azure_openai import AzureOpenAI from .bedrock import * from .cache_utils import * @@ -13,4 +14,3 @@ from .pyserini import * from .sbert import * from .sentence_vectorizer import * -from .anthropic import Claude diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 859e43665b..68f30ce7a1 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -1,10 +1,10 @@ +import logging import os +from typing import Any, Optional + import backoff -import json -from typing import Optional, Any from dsp.modules.lm import LM -import logging try: import anthropic @@ -41,7 +41,7 @@ def __init__( model: str = "claude-instant-1.2", api_key: Optional[str] = None, api_base: Optional[str] = None, - **kwargs + **kwargs, ): super().__init__(model) From e7443d65f634dbd834e4f38e9980a7967237c0ac Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Sat, 9 Mar 2024 14:15:24 -0600 Subject: [PATCH 193/243] Update poetry.lock --- poetry.lock | 572 ++++++++++++++++++++++++++++------------------------ 1 file changed, 304 insertions(+), 268 deletions(-) diff --git a/poetry.lock b/poetry.lock index 30ec769817..670e11592d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -151,6 +151,30 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] +[[package]] +name = "anthropic" +version = "0.18.1" +description = "The official Python library for the anthropic API" +optional = true +python-versions = ">=3.7" +files = [ + {file = "anthropic-0.18.1-py3-none-any.whl", hash = "sha256:b85aee64f619ce1b1964ba733a09adc4053e7bc4e6d4186001229ec191099dcf"}, + {file = "anthropic-0.18.1.tar.gz", hash = "sha256:f5d1caafd43f6cc933a79753a93531605095f040a384f6a900c3de9c3fb6694e"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tokenizers = ">=0.13.0" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + [[package]] name = "anyio" version = "4.3.0" @@ -366,13 +390,13 @@ lxml = ["lxml"] [[package]] name = "cachetools" -version = "5.3.2" +version = "5.3.3" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" files = [ - {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, - {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, ] [[package]] @@ -755,20 +779,20 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "datasets" -version = "2.17.1" +version = "2.18.0" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" files = [ - {file = "datasets-2.17.1-py3-none-any.whl", hash = "sha256:346974daf2fe9c14ddb35646896b2308b95e7dc27709d1a6e25273573b140cf8"}, - {file = "datasets-2.17.1.tar.gz", hash = "sha256:66ec24077807f374f379b62ab0256c4dcb7c38a57ff1529a22993e8d95f2f9f1"}, + {file = "datasets-2.18.0-py3-none-any.whl", hash = "sha256:f1bbf0e2896917a914de01cbd37075b14deea3837af87ad0d9f697388ccaeb50"}, + {file = "datasets-2.18.0.tar.gz", hash = "sha256:cdf8b8c6abf7316377ba4f49f9589a4c74556d6b481afd0abd2284f3d69185cb"}, ] [package.dependencies] aiohttp = "*" dill = ">=0.3.0,<0.3.9" filelock = "*" -fsspec = {version = ">=2023.1.0,<=2023.10.0", extras = ["http"]} +fsspec = {version = ">=2023.1.0,<=2024.2.0", extras = ["http"]} huggingface-hub = ">=0.19.4" multiprocess = "*" numpy = ">=1.17" @@ -785,11 +809,11 @@ xxhash = "*" apache-beam = ["apache-beam (>=2.26.0)"] audio = ["librosa", "soundfile (>=0.12.1)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.1.5)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos", "torch", "transformers"] jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] -quality = ["ruff (>=0.1.5)"] +quality = ["ruff (>=0.3.0)"] s3 = ["s3fs"] tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"] tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] @@ -871,6 +895,17 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = true +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + [[package]] name = "dnspython" version = "2.6.1" @@ -999,13 +1034,13 @@ typing = ["typing-extensions (>=4.8)"] [[package]] name = "flatbuffers" -version = "23.5.26" +version = "24.3.7" description = "The FlatBuffers serialization format for Python" optional = true python-versions = "*" files = [ - {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, - {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, + {file = "flatbuffers-24.3.7-py2.py3-none-any.whl", hash = "sha256:80c4f5dcad0ee76b7e349671a0d657f2fbba927a0244f88dd3f5ed6a3694e1fc"}, + {file = "flatbuffers-24.3.7.tar.gz", hash = "sha256:0895c22b9a6019ff2f4de2e5e2f7cd15914043e6e7033a94c0c6369422690f22"}, ] [[package]] @@ -1096,18 +1131,17 @@ files = [ [[package]] name = "fsspec" -version = "2023.10.0" +version = "2024.2.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"}, - {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"}, + {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, + {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, ] [package.dependencies] aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} -requests = {version = "*", optional = true, markers = "extra == \"http\""} [package.extras] abfs = ["adlfs"] @@ -1124,7 +1158,7 @@ github = ["requests"] gs = ["gcsfs"] gui = ["panel"] hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] libarchive = ["libarchive-c"] oci = ["ocifs"] s3 = ["s3fs"] @@ -1169,13 +1203,13 @@ dev = ["flake8", "markdown", "twine", "wheel"] [[package]] name = "google-auth" -version = "2.28.1" +version = "2.28.2" description = "Google Authentication Library" optional = true python-versions = ">=3.7" files = [ - {file = "google-auth-2.28.1.tar.gz", hash = "sha256:34fc3046c257cedcf1622fc4b31fc2be7923d9b4d44973d481125ecc50d83885"}, - {file = "google_auth-2.28.1-py2.py3-none-any.whl", hash = "sha256:25141e2d7a14bfcba945f5e9827f98092716e99482562f15306e5b026e21aa72"}, + {file = "google-auth-2.28.2.tar.gz", hash = "sha256:80b8b4969aa9ed5938c7828308f20f035bc79f9d8fb8120bf9dc8db20b41ba30"}, + {file = "google_auth-2.28.2-py2.py3-none-any.whl", hash = "sha256:9fd67bbcd40f16d9d42f950228e9cf02a2ded4ae49198b27432d0cded5a74c38"}, ] [package.dependencies] @@ -1280,13 +1314,13 @@ test = ["objgraph", "psutil"] [[package]] name = "griffe" -version = "0.41.0" +version = "0.41.3" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-0.41.0-py3-none-any.whl", hash = "sha256:8aa7fc6eb00cb80af9c0198178c6b7110cb59fa2c5187bb13ea25eebbe4dd928"}, - {file = "griffe-0.41.0.tar.gz", hash = "sha256:850128c3198c18713eaf0a6cc8572e590a16b1965f72a4e871e66cf84740903f"}, + {file = "griffe-0.41.3-py3-none-any.whl", hash = "sha256:27b4610f1ba6e5d039e9f0a2c97232e13463df75e53cb1833e0679f3377b9de2"}, + {file = "griffe-0.41.3.tar.gz", hash = "sha256:9edcfa9f57f4d9c5fcc6d5ce067c67a685b7101a21a7d11848ce0437368e474c"}, ] [package.dependencies] @@ -1294,135 +1328,135 @@ colorama = ">=0.4" [[package]] name = "grpcio" -version = "1.62.0" +version = "1.62.1" description = "HTTP/2-based RPC framework" optional = true python-versions = ">=3.7" files = [ - {file = "grpcio-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:136ffd79791b1eddda8d827b607a6285474ff8a1a5735c4947b58c481e5e4271"}, - {file = "grpcio-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d6a56ba703be6b6267bf19423d888600c3f574ac7c2cc5e6220af90662a4d6b0"}, - {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4cd356211579043fce9f52acc861e519316fff93980a212c8109cca8f47366b6"}, - {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e803e9b58d8f9b4ff0ea991611a8d51b31c68d2e24572cd1fe85e99e8cc1b4f8"}, - {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4c04fe33039b35b97c02d2901a164bbbb2f21fb9c4e2a45a959f0b044c3512c"}, - {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:95370c71b8c9062f9ea033a0867c4c73d6f0ff35113ebd2618171ec1f1e903e0"}, - {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c912688acc05e4ff012c8891803659d6a8a8b5106f0f66e0aed3fb7e77898fa6"}, - {file = "grpcio-1.62.0-cp310-cp310-win32.whl", hash = "sha256:821a44bd63d0f04e33cf4ddf33c14cae176346486b0df08b41a6132b976de5fc"}, - {file = "grpcio-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:81531632f93fece32b2762247c4c169021177e58e725494f9a746ca62c83acaa"}, - {file = "grpcio-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3fa15850a6aba230eed06b236287c50d65a98f05054a0f01ccedf8e1cc89d57f"}, - {file = "grpcio-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:36df33080cd7897623feff57831eb83c98b84640b016ce443305977fac7566fb"}, - {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7a195531828b46ea9c4623c47e1dc45650fc7206f8a71825898dd4c9004b0928"}, - {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab140a3542bbcea37162bdfc12ce0d47a3cda3f2d91b752a124cc9fe6776a9e2"}, - {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f9d6c3223914abb51ac564dc9c3782d23ca445d2864321b9059d62d47144021"}, - {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fbe0c20ce9a1cff75cfb828b21f08d0a1ca527b67f2443174af6626798a754a4"}, - {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38f69de9c28c1e7a8fd24e4af4264726637b72f27c2099eaea6e513e7142b47e"}, - {file = "grpcio-1.62.0-cp311-cp311-win32.whl", hash = "sha256:ce1aafdf8d3f58cb67664f42a617af0e34555fe955450d42c19e4a6ad41c84bd"}, - {file = "grpcio-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:eef1d16ac26c5325e7d39f5452ea98d6988c700c427c52cbc7ce3201e6d93334"}, - {file = "grpcio-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8aab8f90b2a41208c0a071ec39a6e5dbba16fd827455aaa070fec241624ccef8"}, - {file = "grpcio-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:62aa1659d8b6aad7329ede5d5b077e3d71bf488d85795db517118c390358d5f6"}, - {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0d7ae7fc7dbbf2d78d6323641ded767d9ec6d121aaf931ec4a5c50797b886532"}, - {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f359d635ee9428f0294bea062bb60c478a8ddc44b0b6f8e1f42997e5dc12e2ee"}, - {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d48e5b1f8f4204889f1acf30bb57c30378e17c8d20df5acbe8029e985f735c"}, - {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:662d3df5314ecde3184cf87ddd2c3a66095b3acbb2d57a8cada571747af03873"}, - {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92cdb616be44c8ac23a57cce0243af0137a10aa82234f23cd46e69e115071388"}, - {file = "grpcio-1.62.0-cp312-cp312-win32.whl", hash = "sha256:0b9179478b09ee22f4a36b40ca87ad43376acdccc816ce7c2193a9061bf35701"}, - {file = "grpcio-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:614c3ed234208e76991992342bab725f379cc81c7dd5035ee1de2f7e3f7a9842"}, - {file = "grpcio-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:7e1f51e2a460b7394670fdb615e26d31d3260015154ea4f1501a45047abe06c9"}, - {file = "grpcio-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:bcff647e7fe25495e7719f779cc219bbb90b9e79fbd1ce5bda6aae2567f469f2"}, - {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:56ca7ba0b51ed0de1646f1735154143dcbdf9ec2dbe8cc6645def299bb527ca1"}, - {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e84bfb2a734e4a234b116be208d6f0214e68dcf7804306f97962f93c22a1839"}, - {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c1488b31a521fbba50ae86423f5306668d6f3a46d124f7819c603979fc538c4"}, - {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98d8f4eb91f1ce0735bf0b67c3b2a4fea68b52b2fd13dc4318583181f9219b4b"}, - {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b3d3d755cfa331d6090e13aac276d4a3fb828bf935449dc16c3d554bf366136b"}, - {file = "grpcio-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a33f2bfd8a58a02aab93f94f6c61279be0f48f99fcca20ebaee67576cd57307b"}, - {file = "grpcio-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:5e709f7c8028ce0443bddc290fb9c967c1e0e9159ef7a030e8c21cac1feabd35"}, - {file = "grpcio-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:2f3d9a4d0abb57e5f49ed5039d3ed375826c2635751ab89dcc25932ff683bbb6"}, - {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:62ccb92f594d3d9fcd00064b149a0187c246b11e46ff1b7935191f169227f04c"}, - {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:921148f57c2e4b076af59a815467d399b7447f6e0ee10ef6d2601eb1e9c7f402"}, - {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f897b16190b46bc4d4aaf0a32a4b819d559a37a756d7c6b571e9562c360eed72"}, - {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1bc8449084fe395575ed24809752e1dc4592bb70900a03ca42bf236ed5bf008f"}, - {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81d444e5e182be4c7856cd33a610154fe9ea1726bd071d07e7ba13fafd202e38"}, - {file = "grpcio-1.62.0-cp38-cp38-win32.whl", hash = "sha256:88f41f33da3840b4a9bbec68079096d4caf629e2c6ed3a72112159d570d98ebe"}, - {file = "grpcio-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc2836cb829895ee190813446dce63df67e6ed7b9bf76060262c55fcd097d270"}, - {file = "grpcio-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fcc98cff4084467839d0a20d16abc2a76005f3d1b38062464d088c07f500d170"}, - {file = "grpcio-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:0d3dee701e48ee76b7d6fbbba18ba8bc142e5b231ef7d3d97065204702224e0e"}, - {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:b7a6be562dd18e5d5bec146ae9537f20ae1253beb971c0164f1e8a2f5a27e829"}, - {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29cb592c4ce64a023712875368bcae13938c7f03e99f080407e20ffe0a9aa33b"}, - {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eda79574aec8ec4d00768dcb07daba60ed08ef32583b62b90bbf274b3c279f7"}, - {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7eea57444a354ee217fda23f4b479a4cdfea35fb918ca0d8a0e73c271e52c09c"}, - {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0e97f37a3b7c89f9125b92d22e9c8323f4e76e7993ba7049b9f4ccbe8bae958a"}, - {file = "grpcio-1.62.0-cp39-cp39-win32.whl", hash = "sha256:39cd45bd82a2e510e591ca2ddbe22352e8413378852ae814549c162cf3992a93"}, - {file = "grpcio-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:b71c65427bf0ec6a8b48c68c17356cb9fbfc96b1130d20a07cb462f4e4dcdcd5"}, - {file = "grpcio-1.62.0.tar.gz", hash = "sha256:748496af9238ac78dcd98cce65421f1adce28c3979393e3609683fcd7f3880d7"}, + {file = "grpcio-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e"}, + {file = "grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5"}, + {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3"}, + {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70"}, + {file = "grpcio-1.62.1-cp310-cp310-win32.whl", hash = "sha256:4187201a53f8561c015bc745b81a1b2d278967b8de35f3399b84b0695e281d5f"}, + {file = "grpcio-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:844d1f3fb11bd1ed362d3fdc495d0770cfab75761836193af166fee113421d66"}, + {file = "grpcio-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:833379943d1728a005e44103f17ecd73d058d37d95783eb8f0b28ddc1f54d7b2"}, + {file = "grpcio-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:c7fcc6a32e7b7b58f5a7d27530669337a5d587d4066060bcb9dee7a8c833dfb7"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:fa7d28eb4d50b7cbe75bb8b45ed0da9a1dc5b219a0af59449676a29c2eed9698"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48f7135c3de2f298b833be8b4ae20cafe37091634e91f61f5a7eb3d61ec6f660"}, + {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71f11fd63365ade276c9d4a7b7df5c136f9030e3457107e1791b3737a9b9ed6a"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b49fd8fe9f9ac23b78437da94c54aa7e9996fbb220bac024a67469ce5d0825f"}, + {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:482ae2ae78679ba9ed5752099b32e5fe580443b4f798e1b71df412abf43375db"}, + {file = "grpcio-1.62.1-cp311-cp311-win32.whl", hash = "sha256:1faa02530b6c7426404372515fe5ddf66e199c2ee613f88f025c6f3bd816450c"}, + {file = "grpcio-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bd90b8c395f39bc82a5fb32a0173e220e3f401ff697840f4003e15b96d1befc"}, + {file = "grpcio-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b134d5d71b4e0837fff574c00e49176051a1c532d26c052a1e43231f252d813b"}, + {file = "grpcio-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d1f6c96573dc09d50dbcbd91dbf71d5cf97640c9427c32584010fbbd4c0e0037"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:359f821d4578f80f41909b9ee9b76fb249a21035a061a327f91c953493782c31"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a485f0c2010c696be269184bdb5ae72781344cb4e60db976c59d84dd6354fac9"}, + {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b50b09b4dc01767163d67e1532f948264167cd27f49e9377e3556c3cba1268e1"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3227c667dccbe38f2c4d943238b887bac588d97c104815aecc62d2fd976e014b"}, + {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3952b581eb121324853ce2b191dae08badb75cd493cb4e0243368aa9e61cfd41"}, + {file = "grpcio-1.62.1-cp312-cp312-win32.whl", hash = "sha256:83a17b303425104d6329c10eb34bba186ffa67161e63fa6cdae7776ff76df73f"}, + {file = "grpcio-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:6696ffe440333a19d8d128e88d440f91fb92c75a80ce4b44d55800e656a3ef1d"}, + {file = "grpcio-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:e3393b0823f938253370ebef033c9fd23d27f3eae8eb9a8f6264900c7ea3fb5a"}, + {file = "grpcio-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83e7ccb85a74beaeae2634f10eb858a0ed1a63081172649ff4261f929bacfd22"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:882020c87999d54667a284c7ddf065b359bd00251fcd70279ac486776dbf84ec"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a10383035e864f386fe096fed5c47d27a2bf7173c56a6e26cffaaa5a361addb1"}, + {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:960edebedc6b9ada1ef58e1c71156f28689978188cd8cff3b646b57288a927d9"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:23e2e04b83f347d0aadde0c9b616f4726c3d76db04b438fd3904b289a725267f"}, + {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:978121758711916d34fe57c1f75b79cdfc73952f1481bb9583399331682d36f7"}, + {file = "grpcio-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9084086190cc6d628f282e5615f987288b95457292e969b9205e45b442276407"}, + {file = "grpcio-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:22bccdd7b23c420a27fd28540fb5dcbc97dc6be105f7698cb0e7d7a420d0e362"}, + {file = "grpcio-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:8999bf1b57172dbc7c3e4bb3c732658e918f5c333b2942243f10d0d653953ba9"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:d9e52558b8b8c2f4ac05ac86344a7417ccdd2b460a59616de49eb6933b07a0bd"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1714e7bc935780bc3de1b3fcbc7674209adf5208ff825799d579ffd6cd0bd505"}, + {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8842ccbd8c0e253c1f189088228f9b433f7a93b7196b9e5b6f87dba393f5d5d"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f1e7b36bdff50103af95a80923bf1853f6823dd62f2d2a2524b66ed74103e49"}, + {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bba97b8e8883a8038606480d6b6772289f4c907f6ba780fa1f7b7da7dfd76f06"}, + {file = "grpcio-1.62.1-cp38-cp38-win32.whl", hash = "sha256:a7f615270fe534548112a74e790cd9d4f5509d744dd718cd442bf016626c22e4"}, + {file = "grpcio-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:e6c8c8693df718c5ecbc7babb12c69a4e3677fd11de8886f05ab22d4e6b1c43b"}, + {file = "grpcio-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:73db2dc1b201d20ab7083e7041946910bb991e7e9761a0394bbc3c2632326483"}, + {file = "grpcio-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:407b26b7f7bbd4f4751dbc9767a1f0716f9fe72d3d7e96bb3ccfc4aace07c8de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f8de7c8cef9261a2d0a62edf2ccea3d741a523c6b8a6477a340a1f2e417658de"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd5c8a1af40ec305d001c60236308a67e25419003e9bb3ebfab5695a8d0b369"}, + {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0477cb31da67846a33b1a75c611f88bfbcd427fe17701b6317aefceee1b96f"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:60dcd824df166ba266ee0cfaf35a31406cd16ef602b49f5d4dfb21f014b0dedd"}, + {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:973c49086cabab773525f6077f95e5a993bfc03ba8fc32e32f2c279497780585"}, + {file = "grpcio-1.62.1-cp39-cp39-win32.whl", hash = "sha256:12859468e8918d3bd243d213cd6fd6ab07208195dc140763c00dfe901ce1e1b4"}, + {file = "grpcio-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7209117bbeebdfa5d898205cc55153a51285757902dd73c47de498ad4d11332"}, + {file = "grpcio-1.62.1.tar.gz", hash = "sha256:6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.62.0)"] +protobuf = ["grpcio-tools (>=1.62.1)"] [[package]] name = "grpcio-tools" -version = "1.62.0" +version = "1.62.1" description = "Protobuf code generator for gRPC" optional = true python-versions = ">=3.7" files = [ - {file = "grpcio-tools-1.62.0.tar.gz", hash = "sha256:7fca6ecfbbf0549058bb29dcc6e435d885b878d07701e77ac58e1e1f591736dc"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:465c51ebaa184ee3bb619cd5bfaf562bbdde166f2822a6935461e6a741f5ac19"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:0d9c9a4832f52c4597d6dc12d9ab3109c3bd0ee1686b8bf6d64f9eab4145e3cb"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:5a482d9625209023481e631c29a6df1392bfc49f9accfa880dabbacff642559a"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74196beed18383d53ff3e2412a6c1eefa3ff109e987be240368496bc3dcabc8b"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75aca28cbeb605c59b5689a7e000fbc2bd659d2f322c58461f3912f00069f6da"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:523adf731fa4c5af0bf7ee2edb65e8c7ef4d9df9951461d6a18fe096688efd2d"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:791aa220f8f1936e65bc079e9eb954fa0202a1f16e28b83956e59d17dface127"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-win32.whl", hash = "sha256:5dacc691b18d2c294ea971720ff980a1e2d68a3f7ddcd2f0670b3204e81c4b18"}, - {file = "grpcio_tools-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:6999a4e705b03aacad46e625feb7610e47ec88dbd51220c2282b6334f90721fc"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:19b74e141937c885c9e56b6a7dfa190ca7d583bd48bce9171dd65bbf108b9271"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:17c16e9a89c0b9f4ff2b143f232c5256383453ce7b55fe981598f9517adc8252"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:3730b1cd998a0cffc817602cc55e51f268fa97b3e38fa4bee578e3741474547a"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14201950513636f515dd455a06890e3a21d115b943cf6a8f5af67ad1413cfa1f"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f74e0053360e0eadd75193c0c379b6d7f51d074ebbff856bd41780e1a028b38d"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d5959e3df126931d28cd94dd5f0a708b7dd96019de80ab715fb922fd0c8a838d"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1927934dfba4658a97c2dab267e53ed239264d40fdd5b295fc317693543db85b"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-win32.whl", hash = "sha256:2f5bd22203e64e1732e149bfdd3083716d038abca294e4e2852159b3d893f9ec"}, - {file = "grpcio_tools-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:cd1f4caeca614b04db803566473f7db0971e7a88268f95e4a529b0ace699b949"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f0884eaf6a2bbd7b03fea456e808909ee48dd4f7f455519d67defda791116368"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:6b900ae319b6f9ac1be0ca572dfb41c23a7ff6fcbf36e3be6d3054e1e4c60de6"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:3bbe79b134dfb7c98cf60e4962e31039bef824834cc7034bdf1886a2ed1097f9"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77196c7ac8741d4a2aebb023bcc2964ac65ca44180fd791640889ab2afed3e47"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b65288ebe12e38dd3650fea65d82fcce0d35df1ae4a770b525c10119ee71962f"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52b216c458458f6c292e12428916e80974c5113abc505a61e7b0b9f8932a785d"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88aa62303278aec45bbb26bf679269c7890346c37140ae30e39da1070c341e11"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-win32.whl", hash = "sha256:bb6802d63e42734d2baf02e1343377fe18590ed6a1f5ffbdebbbe0f8331f176b"}, - {file = "grpcio_tools-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:d5652d3a52a2e8e1d9bdf28fbd15e21b166e31b968cd7c8c604bf31611c0bb5b"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:84e27206bd884be83a7fdcef8be3c90eb1591341c0ba9b0d25ec9db1043ba2f2"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:5eb63d9207b02a0fa30216907e1e7705cc2670f933e77236c6e0eb966ad3b4bf"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:95e49839d49e79187c43cd63af5c206dc5743a01d7d3d2f039772fa743cbb30c"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ae5cd2f89e33a529790bf8aa59a459484edb05e4f58d4cf78836b9dfa1fab43"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e1fd7301d762bf5984b7e7fb62fce82cff864d75f0a57e15cfd07ae1bd79133"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e38d5800151e6804d500e329f7ddfb615c50eee0c1607593e3147a4b21037e40"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:563a75924109e75809b2919e68d7e6ae7872e63d20258aae7899b14f6ff9e18b"}, - {file = "grpcio_tools-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8934715577c9cc0c792b8a77f7d0dd2bb60e951161b10c5f46b60856673240"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:ed6cf7ff4a10c46f85340f9c68982f9efb29f51ee4b66828310fcdf3c2d7ffd1"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:1faa5006fe9e7b9e65c47bc23f7cd333fdcdd4ba35d44080303848266db5ab05"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:3b526dc5566161a3a17599753838b9cfbdd4cb15b6ad419aae8a5d12053fa8ae"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09db3688efd3499ce3c0b02c0bac0656abdab4cb99716f81ad879c08b92c56e"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:006ea0cc16e8bf8f307326e0556e1384f24abb402cc4e6a720aa1dfe8f268647"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b46ba0b6552b4375ede65e0c89491af532635347f78d52a72f8a027529e713ed"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f561c86fe13cff3be16f297cc05e1aa1274294524743a4cf91d971866fbb0"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-win32.whl", hash = "sha256:c85391e06620d6e16a56341caae5007d0c6219beba065e1e288f2523fba6a335"}, - {file = "grpcio_tools-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:679cf2507090e010da73e5001665c76de2a5927b2e2110e459222b1c81cb10c2"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:0e87f105f1d152934759f8975ed002d5ce057b3cdf1cc6cb63fe6008671a27b9"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:bf9f281f528e0220558d57e09b4518dec148dcb250d78bd9cbb27e09edabb3f9"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:711314cb4c6c8b3d51bafaee380ffa5012bd0567ed68f1b0b1fc07492b27acab"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54bb570bd963905de3bda596b35e06026552705edebbb2cb737b57aa5252b9e5"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dce5f04676cf94e6e2d13d7f91ac2de79097d86675bc4d404a3c24dcc0332c88"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:98ddf871c614cc0ed331c7159ebbbf5040be562279677d3bb97c2e6083539f72"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f3aaf3b20c0f7063856b2432335af8f76cf580f898e04085548cde28332d6833"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-win32.whl", hash = "sha256:3dee3be61d9032f777a9b4e2696ea3d0748a582cb99c672b5d41ca66821e8c87"}, - {file = "grpcio_tools-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:f54b5181784464bd3573ae7dbcf053da18a4b7a75fe19960791f383be3d035ca"}, + {file = "grpcio-tools-1.62.1.tar.gz", hash = "sha256:a4991e5ee8a97ab791296d3bf7e8700b1445635cc1828cc98df945ca1802d7f2"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:f2b404bcae7e2ef9b0b9803b2a95119eb7507e6dc80ea4a64a78be052c30cebc"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:fdd987a580b4474769adfd40144486f54bcc73838d5ec5d3647a17883ea78e76"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:07af1a6442e2313cff22af93c2c4dd37ae32b5239b38e0d99e2cbf93de65429f"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41384c9ee18e61ef20cad2774ef71bd8854b63efce263b5177aa06fccb84df1f"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c38006f7702d2ff52122e4c77a47348709374050c76216e84b30a9f06e45afa"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:08fecc3c5b4e6dd3278f2b9d12837e423c7dcff551ca1e587018b4a0fc5f8019"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a01e8dcd0f041f6fa6d815c54a2017d032950e310c41d514a8bc041e872c4d12"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-win32.whl", hash = "sha256:dd933b8e0b3c13fe3543d58f849a6a5e0d7987688cb6801834278378c724f695"}, + {file = "grpcio_tools-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b04844a9382f1bde4b4174e476e654ab3976168d2469cb4b29e352f4f35a5aa"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:024380536ba71a96cdf736f0954f6ad03f5da609c09edbcc2ca02fdd639e0eed"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:21f14b99e0cd38ad56754cc0b62b2bf3cf75f9f7fc40647da54669e0da0726fe"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:975ac5fb482c23f3608c16e06a43c8bab4d79c2e2564cdbc25cf753c6e998775"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50739aaab0c8076ad5957204e71f2e0c9876e11fd8338f7f09de12c2d75163c5"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598c54318f0326cf5020aa43fc95a15e933aba4a71943d3bff2677d2d21ddfa1"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f309bdb33a61f8e049480d41498ee2e525cfb5e959958b326abfdf552bf9b9cb"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f358effd3c11d66c150e0227f983d54a5cd30e14038566dadcf25f9f6844e6e8"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-win32.whl", hash = "sha256:b76aead9b73f1650a091870fe4e9ed15ac4d8ed136f962042367255199c23594"}, + {file = "grpcio_tools-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:d66a5d47eaa427039752fa0a83a425ff2a487b6a0ac30556fd3be2f3a27a0130"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:575535d039b97d63e6a9abee626d6c7cd47bd8cb73dd00a5c84a98254a2164a4"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:22644c90e43d1a888477899af917979e17364fdd6e9bbb92679cd6a54c4d36c3"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:156d3e1b227c16e903003a56881dbe60e40f2b4bd66f0bc3b27c53e466e6384d"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ad7c5691625a85327e5b683443baf73ae790fd5afc938252041ed5cd665e377"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e140bbc08eea8abf51c0274f45fb1e8350220e64758998d7f3c7f985a0b2496"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7444fcab861911525470d398e5638b70d5cbea3b4674a3de92b5c58c5c515d4d"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e643cd14a5d1e59865cba68a5a6f0175d987f36c5f4cb0db80dee9ed60b4c174"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-win32.whl", hash = "sha256:1344a773d2caa9bb7fbea7e879b84f33740c808c34a5bd2a2768e526117a6b44"}, + {file = "grpcio_tools-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:2eea1db3748b2f37b4dce84d8e0c15d9bc811094807cabafe7b0ea47f424dfd5"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:45d2e6cf04d27286b6f73e6e20ba3f0a1f6d8f5535e5dcb1356200419bb457f4"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:46ae58e6926773e7315e9005f0f17aacedbc0895a8752bec087d24efa2f1fb21"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:4c28086df31478023a36f45e50767872ab3aed2419afff09814cb61c88b77db4"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4fba5b339f4797548591036c9481e6895bf920fab7d3dc664d2697f8fb7c0bf"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23eb3d47f78f509fcd201749b1f1e44b76f447913f7fbb3b8bae20f109086295"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd5d47707bd6bc2b707ece765c362d2a1d2e8f6cd92b04c99fab49a929f3610c"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1924a6a943df7c73b9ef0048302327c75962b567451479710da729ead241228"}, + {file = "grpcio_tools-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:fe71ca30aabe42591e84ecb9694c0297dc699cc20c5b24d2cb267fb0fc01f947"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:1819fd055c1ae672d1d725ec75eefd1f700c18acba0ed9332202be31d69c401d"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:5dbe1f7481dd14b6d477b4bace96d275090bc7636b9883975a08b802c94e7b78"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:771c051c5ece27ad03e4f2e33624a925f0ad636c01757ab7dbb04a37964af4ba"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:98209c438b38b6f1276dbc27b1c04e346a75bfaafe72a25a548f2dc5ce71d226"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2152308e5321cb90fb45aaa84d03d6dedb19735a8779aaf36c624f97b831842d"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ed1f27dc2b2262c8b8d9036276619c1bb18791311c16ccbf1f31b660f2aad7cf"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2744947b6c5e907af21133431809ccca535a037356864e32c122efed8cb9de1f"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-win32.whl", hash = "sha256:13b20e269d14ad629ff9a2c9a2450f3dbb119d5948de63b27ffe624fa7aea85a"}, + {file = "grpcio_tools-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:999823758e9eacd0095863d06cd6d388be769f80c9abb65cdb11c4f2cfce3fea"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:941f8a5c31986053e75fa466bcfa743c2bf1b513b7978cf1f4ab4e96a8219d27"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b9c02c88c77ef6057c6cbeea8922d7c2424aabf46bfc40ddf42a32765ba91061"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:6abd4eb3ccb444383a40156139acc3aaa73745d395139cb6bc8e2a3429e1e627"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:449503213d142f8470b331a1c2f346f8457f16c7fe20f531bc2500e271f7c14c"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a11bcf609d00cfc9baed77ab308223cabc1f0b22a05774a26dd4c94c0c80f1f"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:5d7bdea33354b55acf40bb4dd3ba7324d6f1ef6b4a1a4da0807591f8c7e87b9a"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d03b645852d605f43003020e78fe6d573cae6ee6b944193e36b8b317e7549a20"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-win32.whl", hash = "sha256:52b185dfc3bf32e70929310367dbc66185afba60492a6a75a9b1141d407e160c"}, + {file = "grpcio_tools-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:63a273b70896d3640b7a883eb4a080c3c263d91662d870a2e9c84b7bbd978e7b"}, ] [package.dependencies] -grpcio = ">=1.62.0" +grpcio = ">=1.62.1" protobuf = ">=4.21.6,<5.0dev" setuptools = "*" @@ -1559,13 +1593,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.20.3" +version = "0.21.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.20.3-py3-none-any.whl", hash = "sha256:d988ae4f00d3e307b0c80c6a05ca6dbb7edba8bba3079f74cda7d9c2e562a7b6"}, - {file = "huggingface_hub-0.20.3.tar.gz", hash = "sha256:94e7f8e074475fbc67d6a71957b678e1b4a74ff1b64a644fd6cbb83da962d05d"}, + {file = "huggingface_hub-0.21.4-py3-none-any.whl", hash = "sha256:df37c2c37fc6c82163cdd8a67ede261687d80d1e262526d6c0ce73b6b3630a7b"}, + {file = "huggingface_hub-0.21.4.tar.gz", hash = "sha256:e1f4968c93726565a80edf6dc309763c7b546d0cfe79aa221206034d50155531"}, ] [package.dependencies] @@ -1582,11 +1616,12 @@ all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", cli = ["InquirerPy (==0.3.4)"] dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] tensorflow = ["graphviz", "pydot", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] +torch = ["safetensors", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -1638,32 +1673,32 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.0.1" +version = "7.0.2" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, - {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, + {file = "importlib_metadata-7.0.2-py3-none-any.whl", hash = "sha256:f4bc4c0c070c490abf4ce96d715f68e95923320370efb66143df00199bb6c100"}, + {file = "importlib_metadata-7.0.2.tar.gz", hash = "sha256:198f568f3230878cb1b44fbd7975f87906c22336dba2e4a7f05278c281fbd792"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "importlib-resources" -version = "6.1.2" +version = "6.1.3" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.1.2-py3-none-any.whl", hash = "sha256:9a0a862501dc38b68adebc82970140c9e4209fc99601782925178f8386339938"}, - {file = "importlib_resources-6.1.2.tar.gz", hash = "sha256:308abf8474e2dba5f867d279237cd4076482c3de7104a40b41426370e891549b"}, + {file = "importlib_resources-6.1.3-py3-none-any.whl", hash = "sha256:4c0269e3580fe2634d364b39b38b961540a7738c02cb984e98add8b4221d793d"}, + {file = "importlib_resources-6.1.3.tar.gz", hash = "sha256:56fb4525197b78544a3354ea27793952ab93f935bb4bf746b846bb1015020f2b"}, ] [package.dependencies] @@ -1671,7 +1706,7 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] +testing = ["jaraco.collections", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] [[package]] name = "iniconfig" @@ -1686,13 +1721,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.2" +version = "6.29.3" description = "IPython Kernel for Jupyter" optional = true python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.2-py3-none-any.whl", hash = "sha256:50384f5c577a260a1d53f1f59a828c7266d321c9b7d00d345693783f66616055"}, - {file = "ipykernel-6.29.2.tar.gz", hash = "sha256:3bade28004e3ff624ed57974948116670604ac5f676d12339693f3142176d3f0"}, + {file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"}, + {file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"}, ] [package.dependencies] @@ -1715,7 +1750,7 @@ cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] pyqt5 = ["pyqt5"] pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (==0.23.4)", "pytest-cov", "pytest-timeout"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] [[package]] name = "ipython" @@ -2249,17 +2284,18 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "0.5.0" +version = "1.0.1" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_autorefs-0.5.0-py3-none-any.whl", hash = "sha256:7930fcb8ac1249f10e683967aeaddc0af49d90702af111a5e390e8b20b3d97ff"}, - {file = "mkdocs_autorefs-0.5.0.tar.gz", hash = "sha256:9a5054a94c08d28855cfab967ada10ed5be76e2bfad642302a610b252c3274c0"}, + {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, + {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, ] [package.dependencies] Markdown = ">=3.3" +markupsafe = ">=2.0.1" mkdocs = ">=1.1" [[package]] @@ -2278,13 +2314,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.11" +version = "9.5.13" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.11-py3-none-any.whl", hash = "sha256:788ee0f3e036dca2dc20298d65e480297d348a44c9d7b2ee05c5262983e66072"}, - {file = "mkdocs_material-9.5.11.tar.gz", hash = "sha256:7af7f8af0dea16175558f3fb9245d26c83a17199baa5f157755e63d7437bf971"}, + {file = "mkdocs_material-9.5.13-py3-none-any.whl", hash = "sha256:5cbe17fee4e3b4980c8420a04cc762d8dc052ef1e10532abd4fce88e5ea9ce6a"}, + {file = "mkdocs_material-9.5.13.tar.gz", hash = "sha256:d8e4caae576312a88fd2609b81cf43d233cdbe36860d67a68702b018b425bd87"}, ] [package.dependencies] @@ -2318,13 +2354,13 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.0" +version = "0.24.1" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.24.0-py3-none-any.whl", hash = "sha256:f4908560c10f587326d8f5165d1908817b2e280bbf707607f601c996366a2264"}, - {file = "mkdocstrings-0.24.0.tar.gz", hash = "sha256:222b1165be41257b494a9d29b14135d2b7ca43f38161d5b10caae03b87bd4f7e"}, + {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, + {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, ] [package.dependencies] @@ -3112,13 +3148,13 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "posthog" -version = "3.4.2" +version = "3.5.0" description = "Integrate PostHog into any python application." optional = true python-versions = "*" files = [ - {file = "posthog-3.4.2-py2.py3-none-any.whl", hash = "sha256:c7e79b2e585d16e93749874bcbcdad78d857037398ce0d8d6c474a04d0bd3bbe"}, - {file = "posthog-3.4.2.tar.gz", hash = "sha256:f0eafa663fbc4a942b49b6168a62a890635407044bbc7593051dcb9cc1208873"}, + {file = "posthog-3.5.0-py2.py3-none-any.whl", hash = "sha256:3c672be7ba6f95d555ea207d4486c171d06657eb34b3ce25eb043bfe7b6b5b76"}, + {file = "posthog-3.5.0.tar.gz", hash = "sha256:8f7e3b2c6e8714d0c0c542a2109b83a7549f63b7113a133ab2763a89245ef2ef"}, ] [package.dependencies] @@ -3280,47 +3316,47 @@ files = [ [[package]] name = "pyarrow" -version = "15.0.0" +version = "15.0.1" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"}, - {file = "pyarrow-15.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e"}, - {file = "pyarrow-15.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3"}, - {file = "pyarrow-15.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5"}, - {file = "pyarrow-15.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e"}, - {file = "pyarrow-15.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99"}, - {file = "pyarrow-15.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351"}, - {file = "pyarrow-15.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f"}, - {file = "pyarrow-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177"}, - {file = "pyarrow-15.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac"}, - {file = "pyarrow-15.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7"}, - {file = "pyarrow-15.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b"}, - {file = "pyarrow-15.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f"}, - {file = "pyarrow-15.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed"}, - {file = "pyarrow-15.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6"}, - {file = "pyarrow-15.0.0.tar.gz", hash = "sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83"}, + {file = "pyarrow-15.0.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:c2ddb3be5ea938c329a84171694fc230b241ce1b6b0ff1a0280509af51c375fa"}, + {file = "pyarrow-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7543ea88a0ff72f8e6baaf9bfdbec2c62aeabdbede9e4a571c71cc3bc43b6302"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1519e218a6941fc074e4501088d891afcb2adf77c236e03c34babcf3d6a0d1c7"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28cafa86e1944761970d3b3fc0411b14ff9b5c2b73cd22aaf470d7a3976335f5"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:be5c3d463e33d03eab496e1af7916b1d44001c08f0f458ad27dc16093a020638"}, + {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:47b1eda15d3aa3f49a07b1808648e1397e5dc6a80a30bf87faa8e2d02dad7ac3"}, + {file = "pyarrow-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e524a31be7db22deebbbcf242b189063ab9a7652c62471d296b31bc6e3cae77b"}, + {file = "pyarrow-15.0.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:a476fefe8bdd56122fb0d4881b785413e025858803cc1302d0d788d3522b374d"}, + {file = "pyarrow-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:309e6191be385f2e220586bfdb643f9bb21d7e1bc6dd0a6963dc538e347b2431"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83bc586903dbeb4365cbc72b602f99f70b96c5882e5dfac5278813c7d624ca3c"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07e652daac6d8b05280cd2af31c0fb61a4490ec6a53dc01588014d9fa3fdbee9"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:abad2e08652df153a72177ce20c897d083b0c4ebeec051239e2654ddf4d3c996"}, + {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cde663352bc83ad75ba7b3206e049ca1a69809223942362a8649e37bd22f9e3b"}, + {file = "pyarrow-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:1b6e237dd7a08482a8b8f3f6512d258d2460f182931832a8c6ef3953203d31e1"}, + {file = "pyarrow-15.0.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:7bd167536ee23192760b8c731d39b7cfd37914c27fd4582335ffd08450ff799d"}, + {file = "pyarrow-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c08bb31eb2984ba5c3747d375bb522e7e536b8b25b149c9cb5e1c49b0ccb736"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0f9c1d630ed2524bd1ddf28ec92780a7b599fd54704cd653519f7ff5aec177a"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5186048493395220550bca7b524420471aac2d77af831f584ce132680f55c3df"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:31dc30c7ec8958da3a3d9f31d6c3630429b2091ede0ecd0d989fd6bec129f0e4"}, + {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3f111a014fb8ac2297b43a74bf4495cc479a332908f7ee49cb7cbd50714cb0c1"}, + {file = "pyarrow-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a6d1f7c15d7f68f08490d0cb34611497c74285b8a6bbeab4ef3fc20117310983"}, + {file = "pyarrow-15.0.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:9ad931b996f51c2f978ed517b55cb3c6078272fb4ec579e3da5a8c14873b698d"}, + {file = "pyarrow-15.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:738f6b53ab1c2f66b2bde8a1d77e186aeaab702d849e0dfa1158c9e2c030add3"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c1c3fc16bc74e33bf8f1e5a212938ed8d88e902f372c4dac6b5bad328567d2f"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1fa92512128f6c1b8dde0468c1454dd70f3bff623970e370d52efd4d24fd0be"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:b4157f307c202cbbdac147d9b07447a281fa8e63494f7fc85081da351ec6ace9"}, + {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:b75e7da26f383787f80ad76143b44844ffa28648fcc7099a83df1538c078d2f2"}, + {file = "pyarrow-15.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:3a99eac76ae14096c209850935057b9e8ce97a78397c5cde8724674774f34e5d"}, + {file = "pyarrow-15.0.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:dd532d3177e031e9b2d2df19fd003d0cc0520d1747659fcabbd4d9bb87de508c"}, + {file = "pyarrow-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce8c89848fd37e5313fc2ce601483038ee5566db96ba0808d5883b2e2e55dc53"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:862eac5e5f3b6477f7a92b2f27e560e1f4e5e9edfca9ea9da8a7478bb4abd5ce"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f0ea3a29cd5cb99bf14c1c4533eceaa00ea8fb580950fb5a89a5c771a994a4e"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bb902f780cfd624b2e8fd8501fadab17618fdb548532620ef3d91312aaf0888a"}, + {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:4f87757f02735a6bb4ad2e1b98279ac45d53b748d5baf52401516413007c6999"}, + {file = "pyarrow-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:efd3816c7fbfcbd406ac0f69873cebb052effd7cdc153ae5836d1b00845845d7"}, + {file = "pyarrow-15.0.1.tar.gz", hash = "sha256:21d812548d39d490e0c6928a7c663f37b96bf764034123d4b4ab4530ecc757a9"}, ] [package.dependencies] @@ -3541,13 +3577,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.7" +version = "10.7.1" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, - {file = "pymdown_extensions-10.7.tar.gz", hash = "sha256:c0d64d5cf62566f59e6b2b690a4095c931107c250a8c8e1351c1de5f6b036deb"}, + {file = "pymdown_extensions-10.7.1-py3-none-any.whl", hash = "sha256:f5cc7000d7ff0d1ce9395d216017fa4df3dde800afb1fb72d1c7d3fd35e710f4"}, + {file = "pymdown_extensions-10.7.1.tar.gz", hash = "sha256:c70e146bdd83c744ffc766b4671999796aba18842b268510a329f7f64700d584"}, ] [package.dependencies] @@ -3559,13 +3595,13 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pyparsing" -version = "3.1.1" +version = "3.1.2" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, ] [package.extras] @@ -3618,13 +3654,13 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xm [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -3859,13 +3895,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qdrant-client" -version = "1.7.3" +version = "1.8.0" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.7.3-py3-none-any.whl", hash = "sha256:b062420ba55eb847652c7d2a26404fb1986bea13aa785763024013f96a7a915c"}, - {file = "qdrant_client-1.7.3.tar.gz", hash = "sha256:7b809be892cdc5137ae80ea3335da40c06499ad0b0072b5abc6bad79da1d29fc"}, + {file = "qdrant_client-1.8.0-py3-none-any.whl", hash = "sha256:fa28d3eb64c0c57ec029c7c85c71f6c72c197f92502022655741f3632c518e29"}, + {file = "qdrant_client-1.8.0.tar.gz", hash = "sha256:2a1a3f2cbacc7adba85644cf6cfdee20401cf25764b32da479c81fb63e178d15"}, ] [package.dependencies] @@ -3878,7 +3914,7 @@ pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<3" [package.extras] -fastembed = ["fastembed (==0.1.1)"] +fastembed = ["fastembed (==0.2.2)"] [[package]] name = "referencing" @@ -4450,60 +4486,60 @@ test = ["pytest"] [[package]] name = "sqlalchemy" -version = "2.0.27" +version = "2.0.28" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d04e579e911562f1055d26dab1868d3e0bb905db3bccf664ee8ad109f035618a"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fa67d821c1fd268a5a87922ef4940442513b4e6c377553506b9db3b83beebbd8"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c7a596d0be71b7baa037f4ac10d5e057d276f65a9a611c46970f012752ebf2d"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:954d9735ee9c3fa74874c830d089a815b7b48df6f6b6e357a74130e478dbd951"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5cd20f58c29bbf2680039ff9f569fa6d21453fbd2fa84dbdb4092f006424c2e6"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:03f448ffb731b48323bda68bcc93152f751436ad6037f18a42b7e16af9e91c07"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-win32.whl", hash = "sha256:d997c5938a08b5e172c30583ba6b8aad657ed9901fc24caf3a7152eeccb2f1b4"}, - {file = "SQLAlchemy-2.0.27-cp310-cp310-win_amd64.whl", hash = "sha256:eb15ef40b833f5b2f19eeae65d65e191f039e71790dd565c2af2a3783f72262f"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c5bad7c60a392850d2f0fee8f355953abaec878c483dd7c3836e0089f046bf6"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3012ab65ea42de1be81fff5fb28d6db893ef978950afc8130ba707179b4284a"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbcd77c4d94b23e0753c5ed8deba8c69f331d4fd83f68bfc9db58bc8983f49cd"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d177b7e82f6dd5e1aebd24d9c3297c70ce09cd1d5d37b43e53f39514379c029c"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:680b9a36029b30cf063698755d277885d4a0eab70a2c7c6e71aab601323cba45"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1306102f6d9e625cebaca3d4c9c8f10588735ef877f0360b5cdb4fdfd3fd7131"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-win32.whl", hash = "sha256:5b78aa9f4f68212248aaf8943d84c0ff0f74efc65a661c2fc68b82d498311fd5"}, - {file = "SQLAlchemy-2.0.27-cp311-cp311-win_amd64.whl", hash = "sha256:15e19a84b84528f52a68143439d0c7a3a69befcd4f50b8ef9b7b69d2628ae7c4"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0de1263aac858f288a80b2071990f02082c51d88335a1db0d589237a3435fe71"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce850db091bf7d2a1f2fdb615220b968aeff3849007b1204bf6e3e50a57b3d32"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dfc936870507da96aebb43e664ae3a71a7b96278382bcfe84d277b88e379b18"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4fbe6a766301f2e8a4519f4500fe74ef0a8509a59e07a4085458f26228cd7cc"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4535c49d961fe9a77392e3a630a626af5baa967172d42732b7a43496c8b28876"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0fb3bffc0ced37e5aa4ac2416f56d6d858f46d4da70c09bb731a246e70bff4d5"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-win32.whl", hash = "sha256:7f470327d06400a0aa7926b375b8e8c3c31d335e0884f509fe272b3c700a7254"}, - {file = "SQLAlchemy-2.0.27-cp312-cp312-win_amd64.whl", hash = "sha256:f9374e270e2553653d710ece397df67db9d19c60d2647bcd35bfc616f1622dcd"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e97cf143d74a7a5a0f143aa34039b4fecf11343eed66538610debc438685db4a"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7b5a3e2120982b8b6bd1d5d99e3025339f7fb8b8267551c679afb39e9c7c7f1"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e36aa62b765cf9f43a003233a8c2d7ffdeb55bc62eaa0a0380475b228663a38f"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5ada0438f5b74c3952d916c199367c29ee4d6858edff18eab783b3978d0db16d"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b1d9d1bfd96eef3c3faedb73f486c89e44e64e40e5bfec304ee163de01cf996f"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-win32.whl", hash = "sha256:ca891af9f3289d24a490a5fde664ea04fe2f4984cd97e26de7442a4251bd4b7c"}, - {file = "SQLAlchemy-2.0.27-cp37-cp37m-win_amd64.whl", hash = "sha256:fd8aafda7cdff03b905d4426b714601c0978725a19efc39f5f207b86d188ba01"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec1f5a328464daf7a1e4e385e4f5652dd9b1d12405075ccba1df842f7774b4fc"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad862295ad3f644e3c2c0d8b10a988e1600d3123ecb48702d2c0f26771f1c396"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48217be1de7d29a5600b5c513f3f7664b21d32e596d69582be0a94e36b8309cb"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e56afce6431450442f3ab5973156289bd5ec33dd618941283847c9fd5ff06bf"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:611068511b5531304137bcd7fe8117c985d1b828eb86043bd944cebb7fae3910"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b86abba762ecfeea359112b2bb4490802b340850bbee1948f785141a5e020de8"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-win32.whl", hash = "sha256:30d81cc1192dc693d49d5671cd40cdec596b885b0ce3b72f323888ab1c3863d5"}, - {file = "SQLAlchemy-2.0.27-cp38-cp38-win_amd64.whl", hash = "sha256:120af1e49d614d2525ac247f6123841589b029c318b9afbfc9e2b70e22e1827d"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d07ee7793f2aeb9b80ec8ceb96bc8cc08a2aec8a1b152da1955d64e4825fcbac"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb0845e934647232b6ff5150df37ceffd0b67b754b9fdbb095233deebcddbd4a"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fc19ae2e07a067663dd24fca55f8ed06a288384f0e6e3910420bf4b1270cc51"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b90053be91973a6fb6020a6e44382c97739736a5a9d74e08cc29b196639eb979"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2f5c9dfb0b9ab5e3a8a00249534bdd838d943ec4cfb9abe176a6c33408430230"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33e8bde8fff203de50399b9039c4e14e42d4d227759155c21f8da4a47fc8053c"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-win32.whl", hash = "sha256:d873c21b356bfaf1589b89090a4011e6532582b3a8ea568a00e0c3aab09399dd"}, - {file = "SQLAlchemy-2.0.27-cp39-cp39-win_amd64.whl", hash = "sha256:ff2f1b7c963961d41403b650842dc2039175b906ab2093635d8319bef0b7d620"}, - {file = "SQLAlchemy-2.0.27-py3-none-any.whl", hash = "sha256:1ab4e0448018d01b142c916cc7119ca573803a4745cfe341b8f95657812700ac"}, - {file = "SQLAlchemy-2.0.27.tar.gz", hash = "sha256:86a6ed69a71fe6b88bf9331594fa390a2adda4a49b5c06f98e47bf0d392534f8"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, + {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46a3d4e7a472bfff2d28db838669fc437964e8af8df8ee1e4548e92710929adc"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3dd67b5d69794cfe82862c002512683b3db038b99002171f624712fa71aeaa"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61e2e41656a673b777e2f0cbbe545323dbe0d32312f590b1bc09da1de6c2a02"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0315d9125a38026227f559488fe7f7cee1bd2fbc19f9fd637739dc50bb6380b2"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af8ce2d31679006e7b747d30a89cd3ac1ec304c3d4c20973f0f4ad58e2d1c4c9"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81ba314a08c7ab701e621b7ad079c0c933c58cdef88593c59b90b996e8b58fa5"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win32.whl", hash = "sha256:1ee8bd6d68578e517943f5ebff3afbd93fc65f7ef8f23becab9fa8fb315afb1d"}, + {file = "SQLAlchemy-2.0.28-cp311-cp311-win_amd64.whl", hash = "sha256:ad7acbe95bac70e4e687a4dc9ae3f7a2f467aa6597049eeb6d4a662ecd990bb6"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d3499008ddec83127ab286c6f6ec82a34f39c9817f020f75eca96155f9765097"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b66fcd38659cab5d29e8de5409cdf91e9986817703e1078b2fdaad731ea66f5"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea30da1e76cb1acc5b72e204a920a3a7678d9d52f688f087dc08e54e2754c67"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:124202b4e0edea7f08a4db8c81cc7859012f90a0d14ba2bf07c099aff6e96462"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e23b88c69497a6322b5796c0781400692eca1ae5532821b39ce81a48c395aae9"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b6303bfd78fb3221847723104d152e5972c22367ff66edf09120fcde5ddc2e2"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, + {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, + {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, + {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, + {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, + {file = "SQLAlchemy-2.0.28-py3-none-any.whl", hash = "sha256:78bb7e8da0183a8301352d569900d9d3594c48ac21dc1c2ec6b3121ed8b6c986"}, + {file = "SQLAlchemy-2.0.28.tar.gz", hash = "sha256:dd53b6c4e6d960600fd6532b79ee28e2da489322fcf6648738134587faf767b6"}, ] [package.dependencies] @@ -4870,13 +4906,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.27.1" +version = "0.28.0" description = "The lightning-fast ASGI server." optional = true python-versions = ">=3.8" files = [ - {file = "uvicorn-0.27.1-py3-none-any.whl", hash = "sha256:5c89da2f3895767472a35556e539fd59f7edbe9b1e9c0e1c99eebeadc61838e4"}, - {file = "uvicorn-0.27.1.tar.gz", hash = "sha256:3d9a267296243532db80c83a959a3400502165ade2c1338dea4e67915fd4745a"}, + {file = "uvicorn-0.28.0-py3-none-any.whl", hash = "sha256:6623abbbe6176204a4226e67607b4d52cc60ff62cda0ff177613645cefa2ece1"}, + {file = "uvicorn-0.28.0.tar.gz", hash = "sha256:cab4473b5d1eaeb5a0f6375ac4bc85007ffc75c3cc1768816d9e5d589857b067"}, ] [package.dependencies] @@ -5568,4 +5604,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "8b4cc583653becb3be9f5bc4c34cdf5ced146ba32157a5bb4bdc7885291c0403" +content-hash = "f7a5ab7c85e79920d41e45e9bbd17f0dbc1180c52d027235a656c270d9e79346" From d078598ea733ecb0c753e0840d0dc337c80630c0 Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Sat, 9 Mar 2024 14:18:17 -0600 Subject: [PATCH 194/243] Revert "Update poetry.lock" This reverts commit e7443d65f634dbd834e4f38e9980a7967237c0ac. --- poetry.lock | 572 ++++++++++++++++++++++++---------------------------- 1 file changed, 268 insertions(+), 304 deletions(-) diff --git a/poetry.lock b/poetry.lock index 670e11592d..30ec769817 100644 --- a/poetry.lock +++ b/poetry.lock @@ -151,30 +151,6 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] -[[package]] -name = "anthropic" -version = "0.18.1" -description = "The official Python library for the anthropic API" -optional = true -python-versions = ">=3.7" -files = [ - {file = "anthropic-0.18.1-py3-none-any.whl", hash = "sha256:b85aee64f619ce1b1964ba733a09adc4053e7bc4e6d4186001229ec191099dcf"}, - {file = "anthropic-0.18.1.tar.gz", hash = "sha256:f5d1caafd43f6cc933a79753a93531605095f040a384f6a900c3de9c3fb6694e"}, -] - -[package.dependencies] -anyio = ">=3.5.0,<5" -distro = ">=1.7.0,<2" -httpx = ">=0.23.0,<1" -pydantic = ">=1.9.0,<3" -sniffio = "*" -tokenizers = ">=0.13.0" -typing-extensions = ">=4.7,<5" - -[package.extras] -bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] -vertex = ["google-auth (>=2,<3)"] - [[package]] name = "anyio" version = "4.3.0" @@ -390,13 +366,13 @@ lxml = ["lxml"] [[package]] name = "cachetools" -version = "5.3.3" +version = "5.3.2" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" files = [ - {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, - {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, + {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, + {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, ] [[package]] @@ -779,20 +755,20 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "datasets" -version = "2.18.0" +version = "2.17.1" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" files = [ - {file = "datasets-2.18.0-py3-none-any.whl", hash = "sha256:f1bbf0e2896917a914de01cbd37075b14deea3837af87ad0d9f697388ccaeb50"}, - {file = "datasets-2.18.0.tar.gz", hash = "sha256:cdf8b8c6abf7316377ba4f49f9589a4c74556d6b481afd0abd2284f3d69185cb"}, + {file = "datasets-2.17.1-py3-none-any.whl", hash = "sha256:346974daf2fe9c14ddb35646896b2308b95e7dc27709d1a6e25273573b140cf8"}, + {file = "datasets-2.17.1.tar.gz", hash = "sha256:66ec24077807f374f379b62ab0256c4dcb7c38a57ff1529a22993e8d95f2f9f1"}, ] [package.dependencies] aiohttp = "*" dill = ">=0.3.0,<0.3.9" filelock = "*" -fsspec = {version = ">=2023.1.0,<=2024.2.0", extras = ["http"]} +fsspec = {version = ">=2023.1.0,<=2023.10.0", extras = ["http"]} huggingface-hub = ">=0.19.4" multiprocess = "*" numpy = ">=1.17" @@ -809,11 +785,11 @@ xxhash = "*" apache-beam = ["apache-beam (>=2.26.0)"] audio = ["librosa", "soundfile (>=0.12.1)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.1.5)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos", "torch", "transformers"] jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] -quality = ["ruff (>=0.3.0)"] +quality = ["ruff (>=0.1.5)"] s3 = ["s3fs"] tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"] tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] @@ -895,17 +871,6 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] -[[package]] -name = "distro" -version = "1.9.0" -description = "Distro - an OS platform information API" -optional = true -python-versions = ">=3.6" -files = [ - {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, - {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, -] - [[package]] name = "dnspython" version = "2.6.1" @@ -1034,13 +999,13 @@ typing = ["typing-extensions (>=4.8)"] [[package]] name = "flatbuffers" -version = "24.3.7" +version = "23.5.26" description = "The FlatBuffers serialization format for Python" optional = true python-versions = "*" files = [ - {file = "flatbuffers-24.3.7-py2.py3-none-any.whl", hash = "sha256:80c4f5dcad0ee76b7e349671a0d657f2fbba927a0244f88dd3f5ed6a3694e1fc"}, - {file = "flatbuffers-24.3.7.tar.gz", hash = "sha256:0895c22b9a6019ff2f4de2e5e2f7cd15914043e6e7033a94c0c6369422690f22"}, + {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, + {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, ] [[package]] @@ -1131,17 +1096,18 @@ files = [ [[package]] name = "fsspec" -version = "2024.2.0" +version = "2023.10.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, - {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, + {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"}, + {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"}, ] [package.dependencies] aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} +requests = {version = "*", optional = true, markers = "extra == \"http\""} [package.extras] abfs = ["adlfs"] @@ -1158,7 +1124,7 @@ github = ["requests"] gs = ["gcsfs"] gui = ["panel"] hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] libarchive = ["libarchive-c"] oci = ["ocifs"] s3 = ["s3fs"] @@ -1203,13 +1169,13 @@ dev = ["flake8", "markdown", "twine", "wheel"] [[package]] name = "google-auth" -version = "2.28.2" +version = "2.28.1" description = "Google Authentication Library" optional = true python-versions = ">=3.7" files = [ - {file = "google-auth-2.28.2.tar.gz", hash = "sha256:80b8b4969aa9ed5938c7828308f20f035bc79f9d8fb8120bf9dc8db20b41ba30"}, - {file = "google_auth-2.28.2-py2.py3-none-any.whl", hash = "sha256:9fd67bbcd40f16d9d42f950228e9cf02a2ded4ae49198b27432d0cded5a74c38"}, + {file = "google-auth-2.28.1.tar.gz", hash = "sha256:34fc3046c257cedcf1622fc4b31fc2be7923d9b4d44973d481125ecc50d83885"}, + {file = "google_auth-2.28.1-py2.py3-none-any.whl", hash = "sha256:25141e2d7a14bfcba945f5e9827f98092716e99482562f15306e5b026e21aa72"}, ] [package.dependencies] @@ -1314,13 +1280,13 @@ test = ["objgraph", "psutil"] [[package]] name = "griffe" -version = "0.41.3" +version = "0.41.0" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-0.41.3-py3-none-any.whl", hash = "sha256:27b4610f1ba6e5d039e9f0a2c97232e13463df75e53cb1833e0679f3377b9de2"}, - {file = "griffe-0.41.3.tar.gz", hash = "sha256:9edcfa9f57f4d9c5fcc6d5ce067c67a685b7101a21a7d11848ce0437368e474c"}, + {file = "griffe-0.41.0-py3-none-any.whl", hash = "sha256:8aa7fc6eb00cb80af9c0198178c6b7110cb59fa2c5187bb13ea25eebbe4dd928"}, + {file = "griffe-0.41.0.tar.gz", hash = "sha256:850128c3198c18713eaf0a6cc8572e590a16b1965f72a4e871e66cf84740903f"}, ] [package.dependencies] @@ -1328,135 +1294,135 @@ colorama = ">=0.4" [[package]] name = "grpcio" -version = "1.62.1" +version = "1.62.0" description = "HTTP/2-based RPC framework" optional = true python-versions = ">=3.7" files = [ - {file = "grpcio-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:179bee6f5ed7b5f618844f760b6acf7e910988de77a4f75b95bbfaa8106f3c1e"}, - {file = "grpcio-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:48611e4fa010e823ba2de8fd3f77c1322dd60cb0d180dc6630a7e157b205f7ea"}, - {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b2a0e71b0a2158aa4bce48be9f8f9eb45cbd17c78c7443616d00abbe2a509f6d"}, - {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbe80577c7880911d3ad65e5ecc997416c98f354efeba2f8d0f9112a67ed65a5"}, - {file = "grpcio-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f6c693d446964e3292425e1d16e21a97a48ba9172f2d0df9d7b640acb99243"}, - {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:77c339403db5a20ef4fed02e4d1a9a3d9866bf9c0afc77a42234677313ea22f3"}, - {file = "grpcio-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b5a4ea906db7dec694098435d84bf2854fe158eb3cd51e1107e571246d4d1d70"}, - {file = "grpcio-1.62.1-cp310-cp310-win32.whl", hash = "sha256:4187201a53f8561c015bc745b81a1b2d278967b8de35f3399b84b0695e281d5f"}, - {file = "grpcio-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:844d1f3fb11bd1ed362d3fdc495d0770cfab75761836193af166fee113421d66"}, - {file = "grpcio-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:833379943d1728a005e44103f17ecd73d058d37d95783eb8f0b28ddc1f54d7b2"}, - {file = "grpcio-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:c7fcc6a32e7b7b58f5a7d27530669337a5d587d4066060bcb9dee7a8c833dfb7"}, - {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:fa7d28eb4d50b7cbe75bb8b45ed0da9a1dc5b219a0af59449676a29c2eed9698"}, - {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48f7135c3de2f298b833be8b4ae20cafe37091634e91f61f5a7eb3d61ec6f660"}, - {file = "grpcio-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71f11fd63365ade276c9d4a7b7df5c136f9030e3457107e1791b3737a9b9ed6a"}, - {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b49fd8fe9f9ac23b78437da94c54aa7e9996fbb220bac024a67469ce5d0825f"}, - {file = "grpcio-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:482ae2ae78679ba9ed5752099b32e5fe580443b4f798e1b71df412abf43375db"}, - {file = "grpcio-1.62.1-cp311-cp311-win32.whl", hash = "sha256:1faa02530b6c7426404372515fe5ddf66e199c2ee613f88f025c6f3bd816450c"}, - {file = "grpcio-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bd90b8c395f39bc82a5fb32a0173e220e3f401ff697840f4003e15b96d1befc"}, - {file = "grpcio-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b134d5d71b4e0837fff574c00e49176051a1c532d26c052a1e43231f252d813b"}, - {file = "grpcio-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d1f6c96573dc09d50dbcbd91dbf71d5cf97640c9427c32584010fbbd4c0e0037"}, - {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:359f821d4578f80f41909b9ee9b76fb249a21035a061a327f91c953493782c31"}, - {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a485f0c2010c696be269184bdb5ae72781344cb4e60db976c59d84dd6354fac9"}, - {file = "grpcio-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b50b09b4dc01767163d67e1532f948264167cd27f49e9377e3556c3cba1268e1"}, - {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3227c667dccbe38f2c4d943238b887bac588d97c104815aecc62d2fd976e014b"}, - {file = "grpcio-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3952b581eb121324853ce2b191dae08badb75cd493cb4e0243368aa9e61cfd41"}, - {file = "grpcio-1.62.1-cp312-cp312-win32.whl", hash = "sha256:83a17b303425104d6329c10eb34bba186ffa67161e63fa6cdae7776ff76df73f"}, - {file = "grpcio-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:6696ffe440333a19d8d128e88d440f91fb92c75a80ce4b44d55800e656a3ef1d"}, - {file = "grpcio-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:e3393b0823f938253370ebef033c9fd23d27f3eae8eb9a8f6264900c7ea3fb5a"}, - {file = "grpcio-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83e7ccb85a74beaeae2634f10eb858a0ed1a63081172649ff4261f929bacfd22"}, - {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:882020c87999d54667a284c7ddf065b359bd00251fcd70279ac486776dbf84ec"}, - {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a10383035e864f386fe096fed5c47d27a2bf7173c56a6e26cffaaa5a361addb1"}, - {file = "grpcio-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:960edebedc6b9ada1ef58e1c71156f28689978188cd8cff3b646b57288a927d9"}, - {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:23e2e04b83f347d0aadde0c9b616f4726c3d76db04b438fd3904b289a725267f"}, - {file = "grpcio-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:978121758711916d34fe57c1f75b79cdfc73952f1481bb9583399331682d36f7"}, - {file = "grpcio-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9084086190cc6d628f282e5615f987288b95457292e969b9205e45b442276407"}, - {file = "grpcio-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:22bccdd7b23c420a27fd28540fb5dcbc97dc6be105f7698cb0e7d7a420d0e362"}, - {file = "grpcio-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:8999bf1b57172dbc7c3e4bb3c732658e918f5c333b2942243f10d0d653953ba9"}, - {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:d9e52558b8b8c2f4ac05ac86344a7417ccdd2b460a59616de49eb6933b07a0bd"}, - {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1714e7bc935780bc3de1b3fcbc7674209adf5208ff825799d579ffd6cd0bd505"}, - {file = "grpcio-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8842ccbd8c0e253c1f189088228f9b433f7a93b7196b9e5b6f87dba393f5d5d"}, - {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f1e7b36bdff50103af95a80923bf1853f6823dd62f2d2a2524b66ed74103e49"}, - {file = "grpcio-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bba97b8e8883a8038606480d6b6772289f4c907f6ba780fa1f7b7da7dfd76f06"}, - {file = "grpcio-1.62.1-cp38-cp38-win32.whl", hash = "sha256:a7f615270fe534548112a74e790cd9d4f5509d744dd718cd442bf016626c22e4"}, - {file = "grpcio-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:e6c8c8693df718c5ecbc7babb12c69a4e3677fd11de8886f05ab22d4e6b1c43b"}, - {file = "grpcio-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:73db2dc1b201d20ab7083e7041946910bb991e7e9761a0394bbc3c2632326483"}, - {file = "grpcio-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:407b26b7f7bbd4f4751dbc9767a1f0716f9fe72d3d7e96bb3ccfc4aace07c8de"}, - {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f8de7c8cef9261a2d0a62edf2ccea3d741a523c6b8a6477a340a1f2e417658de"}, - {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd5c8a1af40ec305d001c60236308a67e25419003e9bb3ebfab5695a8d0b369"}, - {file = "grpcio-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0477cb31da67846a33b1a75c611f88bfbcd427fe17701b6317aefceee1b96f"}, - {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:60dcd824df166ba266ee0cfaf35a31406cd16ef602b49f5d4dfb21f014b0dedd"}, - {file = "grpcio-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:973c49086cabab773525f6077f95e5a993bfc03ba8fc32e32f2c279497780585"}, - {file = "grpcio-1.62.1-cp39-cp39-win32.whl", hash = "sha256:12859468e8918d3bd243d213cd6fd6ab07208195dc140763c00dfe901ce1e1b4"}, - {file = "grpcio-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7209117bbeebdfa5d898205cc55153a51285757902dd73c47de498ad4d11332"}, - {file = "grpcio-1.62.1.tar.gz", hash = "sha256:6c455e008fa86d9e9a9d85bb76da4277c0d7d9668a3bfa70dbe86e9f3c759947"}, + {file = "grpcio-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:136ffd79791b1eddda8d827b607a6285474ff8a1a5735c4947b58c481e5e4271"}, + {file = "grpcio-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d6a56ba703be6b6267bf19423d888600c3f574ac7c2cc5e6220af90662a4d6b0"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4cd356211579043fce9f52acc861e519316fff93980a212c8109cca8f47366b6"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e803e9b58d8f9b4ff0ea991611a8d51b31c68d2e24572cd1fe85e99e8cc1b4f8"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4c04fe33039b35b97c02d2901a164bbbb2f21fb9c4e2a45a959f0b044c3512c"}, + {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:95370c71b8c9062f9ea033a0867c4c73d6f0ff35113ebd2618171ec1f1e903e0"}, + {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c912688acc05e4ff012c8891803659d6a8a8b5106f0f66e0aed3fb7e77898fa6"}, + {file = "grpcio-1.62.0-cp310-cp310-win32.whl", hash = "sha256:821a44bd63d0f04e33cf4ddf33c14cae176346486b0df08b41a6132b976de5fc"}, + {file = "grpcio-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:81531632f93fece32b2762247c4c169021177e58e725494f9a746ca62c83acaa"}, + {file = "grpcio-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3fa15850a6aba230eed06b236287c50d65a98f05054a0f01ccedf8e1cc89d57f"}, + {file = "grpcio-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:36df33080cd7897623feff57831eb83c98b84640b016ce443305977fac7566fb"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7a195531828b46ea9c4623c47e1dc45650fc7206f8a71825898dd4c9004b0928"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab140a3542bbcea37162bdfc12ce0d47a3cda3f2d91b752a124cc9fe6776a9e2"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f9d6c3223914abb51ac564dc9c3782d23ca445d2864321b9059d62d47144021"}, + {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fbe0c20ce9a1cff75cfb828b21f08d0a1ca527b67f2443174af6626798a754a4"}, + {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38f69de9c28c1e7a8fd24e4af4264726637b72f27c2099eaea6e513e7142b47e"}, + {file = "grpcio-1.62.0-cp311-cp311-win32.whl", hash = "sha256:ce1aafdf8d3f58cb67664f42a617af0e34555fe955450d42c19e4a6ad41c84bd"}, + {file = "grpcio-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:eef1d16ac26c5325e7d39f5452ea98d6988c700c427c52cbc7ce3201e6d93334"}, + {file = "grpcio-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8aab8f90b2a41208c0a071ec39a6e5dbba16fd827455aaa070fec241624ccef8"}, + {file = "grpcio-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:62aa1659d8b6aad7329ede5d5b077e3d71bf488d85795db517118c390358d5f6"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0d7ae7fc7dbbf2d78d6323641ded767d9ec6d121aaf931ec4a5c50797b886532"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f359d635ee9428f0294bea062bb60c478a8ddc44b0b6f8e1f42997e5dc12e2ee"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d48e5b1f8f4204889f1acf30bb57c30378e17c8d20df5acbe8029e985f735c"}, + {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:662d3df5314ecde3184cf87ddd2c3a66095b3acbb2d57a8cada571747af03873"}, + {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92cdb616be44c8ac23a57cce0243af0137a10aa82234f23cd46e69e115071388"}, + {file = "grpcio-1.62.0-cp312-cp312-win32.whl", hash = "sha256:0b9179478b09ee22f4a36b40ca87ad43376acdccc816ce7c2193a9061bf35701"}, + {file = "grpcio-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:614c3ed234208e76991992342bab725f379cc81c7dd5035ee1de2f7e3f7a9842"}, + {file = "grpcio-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:7e1f51e2a460b7394670fdb615e26d31d3260015154ea4f1501a45047abe06c9"}, + {file = "grpcio-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:bcff647e7fe25495e7719f779cc219bbb90b9e79fbd1ce5bda6aae2567f469f2"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:56ca7ba0b51ed0de1646f1735154143dcbdf9ec2dbe8cc6645def299bb527ca1"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e84bfb2a734e4a234b116be208d6f0214e68dcf7804306f97962f93c22a1839"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c1488b31a521fbba50ae86423f5306668d6f3a46d124f7819c603979fc538c4"}, + {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98d8f4eb91f1ce0735bf0b67c3b2a4fea68b52b2fd13dc4318583181f9219b4b"}, + {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b3d3d755cfa331d6090e13aac276d4a3fb828bf935449dc16c3d554bf366136b"}, + {file = "grpcio-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a33f2bfd8a58a02aab93f94f6c61279be0f48f99fcca20ebaee67576cd57307b"}, + {file = "grpcio-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:5e709f7c8028ce0443bddc290fb9c967c1e0e9159ef7a030e8c21cac1feabd35"}, + {file = "grpcio-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:2f3d9a4d0abb57e5f49ed5039d3ed375826c2635751ab89dcc25932ff683bbb6"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:62ccb92f594d3d9fcd00064b149a0187c246b11e46ff1b7935191f169227f04c"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:921148f57c2e4b076af59a815467d399b7447f6e0ee10ef6d2601eb1e9c7f402"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f897b16190b46bc4d4aaf0a32a4b819d559a37a756d7c6b571e9562c360eed72"}, + {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1bc8449084fe395575ed24809752e1dc4592bb70900a03ca42bf236ed5bf008f"}, + {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81d444e5e182be4c7856cd33a610154fe9ea1726bd071d07e7ba13fafd202e38"}, + {file = "grpcio-1.62.0-cp38-cp38-win32.whl", hash = "sha256:88f41f33da3840b4a9bbec68079096d4caf629e2c6ed3a72112159d570d98ebe"}, + {file = "grpcio-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc2836cb829895ee190813446dce63df67e6ed7b9bf76060262c55fcd097d270"}, + {file = "grpcio-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fcc98cff4084467839d0a20d16abc2a76005f3d1b38062464d088c07f500d170"}, + {file = "grpcio-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:0d3dee701e48ee76b7d6fbbba18ba8bc142e5b231ef7d3d97065204702224e0e"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:b7a6be562dd18e5d5bec146ae9537f20ae1253beb971c0164f1e8a2f5a27e829"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29cb592c4ce64a023712875368bcae13938c7f03e99f080407e20ffe0a9aa33b"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eda79574aec8ec4d00768dcb07daba60ed08ef32583b62b90bbf274b3c279f7"}, + {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7eea57444a354ee217fda23f4b479a4cdfea35fb918ca0d8a0e73c271e52c09c"}, + {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0e97f37a3b7c89f9125b92d22e9c8323f4e76e7993ba7049b9f4ccbe8bae958a"}, + {file = "grpcio-1.62.0-cp39-cp39-win32.whl", hash = "sha256:39cd45bd82a2e510e591ca2ddbe22352e8413378852ae814549c162cf3992a93"}, + {file = "grpcio-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:b71c65427bf0ec6a8b48c68c17356cb9fbfc96b1130d20a07cb462f4e4dcdcd5"}, + {file = "grpcio-1.62.0.tar.gz", hash = "sha256:748496af9238ac78dcd98cce65421f1adce28c3979393e3609683fcd7f3880d7"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.62.1)"] +protobuf = ["grpcio-tools (>=1.62.0)"] [[package]] name = "grpcio-tools" -version = "1.62.1" +version = "1.62.0" description = "Protobuf code generator for gRPC" optional = true python-versions = ">=3.7" files = [ - {file = "grpcio-tools-1.62.1.tar.gz", hash = "sha256:a4991e5ee8a97ab791296d3bf7e8700b1445635cc1828cc98df945ca1802d7f2"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:f2b404bcae7e2ef9b0b9803b2a95119eb7507e6dc80ea4a64a78be052c30cebc"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:fdd987a580b4474769adfd40144486f54bcc73838d5ec5d3647a17883ea78e76"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:07af1a6442e2313cff22af93c2c4dd37ae32b5239b38e0d99e2cbf93de65429f"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41384c9ee18e61ef20cad2774ef71bd8854b63efce263b5177aa06fccb84df1f"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c38006f7702d2ff52122e4c77a47348709374050c76216e84b30a9f06e45afa"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:08fecc3c5b4e6dd3278f2b9d12837e423c7dcff551ca1e587018b4a0fc5f8019"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a01e8dcd0f041f6fa6d815c54a2017d032950e310c41d514a8bc041e872c4d12"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-win32.whl", hash = "sha256:dd933b8e0b3c13fe3543d58f849a6a5e0d7987688cb6801834278378c724f695"}, - {file = "grpcio_tools-1.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b04844a9382f1bde4b4174e476e654ab3976168d2469cb4b29e352f4f35a5aa"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:024380536ba71a96cdf736f0954f6ad03f5da609c09edbcc2ca02fdd639e0eed"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:21f14b99e0cd38ad56754cc0b62b2bf3cf75f9f7fc40647da54669e0da0726fe"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:975ac5fb482c23f3608c16e06a43c8bab4d79c2e2564cdbc25cf753c6e998775"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50739aaab0c8076ad5957204e71f2e0c9876e11fd8338f7f09de12c2d75163c5"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598c54318f0326cf5020aa43fc95a15e933aba4a71943d3bff2677d2d21ddfa1"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f309bdb33a61f8e049480d41498ee2e525cfb5e959958b326abfdf552bf9b9cb"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f358effd3c11d66c150e0227f983d54a5cd30e14038566dadcf25f9f6844e6e8"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-win32.whl", hash = "sha256:b76aead9b73f1650a091870fe4e9ed15ac4d8ed136f962042367255199c23594"}, - {file = "grpcio_tools-1.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:d66a5d47eaa427039752fa0a83a425ff2a487b6a0ac30556fd3be2f3a27a0130"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:575535d039b97d63e6a9abee626d6c7cd47bd8cb73dd00a5c84a98254a2164a4"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:22644c90e43d1a888477899af917979e17364fdd6e9bbb92679cd6a54c4d36c3"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:156d3e1b227c16e903003a56881dbe60e40f2b4bd66f0bc3b27c53e466e6384d"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ad7c5691625a85327e5b683443baf73ae790fd5afc938252041ed5cd665e377"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e140bbc08eea8abf51c0274f45fb1e8350220e64758998d7f3c7f985a0b2496"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7444fcab861911525470d398e5638b70d5cbea3b4674a3de92b5c58c5c515d4d"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e643cd14a5d1e59865cba68a5a6f0175d987f36c5f4cb0db80dee9ed60b4c174"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-win32.whl", hash = "sha256:1344a773d2caa9bb7fbea7e879b84f33740c808c34a5bd2a2768e526117a6b44"}, - {file = "grpcio_tools-1.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:2eea1db3748b2f37b4dce84d8e0c15d9bc811094807cabafe7b0ea47f424dfd5"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:45d2e6cf04d27286b6f73e6e20ba3f0a1f6d8f5535e5dcb1356200419bb457f4"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:46ae58e6926773e7315e9005f0f17aacedbc0895a8752bec087d24efa2f1fb21"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:4c28086df31478023a36f45e50767872ab3aed2419afff09814cb61c88b77db4"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4fba5b339f4797548591036c9481e6895bf920fab7d3dc664d2697f8fb7c0bf"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23eb3d47f78f509fcd201749b1f1e44b76f447913f7fbb3b8bae20f109086295"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd5d47707bd6bc2b707ece765c362d2a1d2e8f6cd92b04c99fab49a929f3610c"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1924a6a943df7c73b9ef0048302327c75962b567451479710da729ead241228"}, - {file = "grpcio_tools-1.62.1-cp37-cp37m-win_amd64.whl", hash = "sha256:fe71ca30aabe42591e84ecb9694c0297dc699cc20c5b24d2cb267fb0fc01f947"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:1819fd055c1ae672d1d725ec75eefd1f700c18acba0ed9332202be31d69c401d"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:5dbe1f7481dd14b6d477b4bace96d275090bc7636b9883975a08b802c94e7b78"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:771c051c5ece27ad03e4f2e33624a925f0ad636c01757ab7dbb04a37964af4ba"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:98209c438b38b6f1276dbc27b1c04e346a75bfaafe72a25a548f2dc5ce71d226"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2152308e5321cb90fb45aaa84d03d6dedb19735a8779aaf36c624f97b831842d"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ed1f27dc2b2262c8b8d9036276619c1bb18791311c16ccbf1f31b660f2aad7cf"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2744947b6c5e907af21133431809ccca535a037356864e32c122efed8cb9de1f"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-win32.whl", hash = "sha256:13b20e269d14ad629ff9a2c9a2450f3dbb119d5948de63b27ffe624fa7aea85a"}, - {file = "grpcio_tools-1.62.1-cp38-cp38-win_amd64.whl", hash = "sha256:999823758e9eacd0095863d06cd6d388be769f80c9abb65cdb11c4f2cfce3fea"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:941f8a5c31986053e75fa466bcfa743c2bf1b513b7978cf1f4ab4e96a8219d27"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b9c02c88c77ef6057c6cbeea8922d7c2424aabf46bfc40ddf42a32765ba91061"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:6abd4eb3ccb444383a40156139acc3aaa73745d395139cb6bc8e2a3429e1e627"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:449503213d142f8470b331a1c2f346f8457f16c7fe20f531bc2500e271f7c14c"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a11bcf609d00cfc9baed77ab308223cabc1f0b22a05774a26dd4c94c0c80f1f"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:5d7bdea33354b55acf40bb4dd3ba7324d6f1ef6b4a1a4da0807591f8c7e87b9a"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d03b645852d605f43003020e78fe6d573cae6ee6b944193e36b8b317e7549a20"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-win32.whl", hash = "sha256:52b185dfc3bf32e70929310367dbc66185afba60492a6a75a9b1141d407e160c"}, - {file = "grpcio_tools-1.62.1-cp39-cp39-win_amd64.whl", hash = "sha256:63a273b70896d3640b7a883eb4a080c3c263d91662d870a2e9c84b7bbd978e7b"}, + {file = "grpcio-tools-1.62.0.tar.gz", hash = "sha256:7fca6ecfbbf0549058bb29dcc6e435d885b878d07701e77ac58e1e1f591736dc"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:465c51ebaa184ee3bb619cd5bfaf562bbdde166f2822a6935461e6a741f5ac19"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:0d9c9a4832f52c4597d6dc12d9ab3109c3bd0ee1686b8bf6d64f9eab4145e3cb"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:5a482d9625209023481e631c29a6df1392bfc49f9accfa880dabbacff642559a"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74196beed18383d53ff3e2412a6c1eefa3ff109e987be240368496bc3dcabc8b"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75aca28cbeb605c59b5689a7e000fbc2bd659d2f322c58461f3912f00069f6da"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:523adf731fa4c5af0bf7ee2edb65e8c7ef4d9df9951461d6a18fe096688efd2d"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:791aa220f8f1936e65bc079e9eb954fa0202a1f16e28b83956e59d17dface127"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-win32.whl", hash = "sha256:5dacc691b18d2c294ea971720ff980a1e2d68a3f7ddcd2f0670b3204e81c4b18"}, + {file = "grpcio_tools-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:6999a4e705b03aacad46e625feb7610e47ec88dbd51220c2282b6334f90721fc"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:19b74e141937c885c9e56b6a7dfa190ca7d583bd48bce9171dd65bbf108b9271"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:17c16e9a89c0b9f4ff2b143f232c5256383453ce7b55fe981598f9517adc8252"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:3730b1cd998a0cffc817602cc55e51f268fa97b3e38fa4bee578e3741474547a"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14201950513636f515dd455a06890e3a21d115b943cf6a8f5af67ad1413cfa1f"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f74e0053360e0eadd75193c0c379b6d7f51d074ebbff856bd41780e1a028b38d"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d5959e3df126931d28cd94dd5f0a708b7dd96019de80ab715fb922fd0c8a838d"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1927934dfba4658a97c2dab267e53ed239264d40fdd5b295fc317693543db85b"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-win32.whl", hash = "sha256:2f5bd22203e64e1732e149bfdd3083716d038abca294e4e2852159b3d893f9ec"}, + {file = "grpcio_tools-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:cd1f4caeca614b04db803566473f7db0971e7a88268f95e4a529b0ace699b949"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f0884eaf6a2bbd7b03fea456e808909ee48dd4f7f455519d67defda791116368"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:6b900ae319b6f9ac1be0ca572dfb41c23a7ff6fcbf36e3be6d3054e1e4c60de6"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:3bbe79b134dfb7c98cf60e4962e31039bef824834cc7034bdf1886a2ed1097f9"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77196c7ac8741d4a2aebb023bcc2964ac65ca44180fd791640889ab2afed3e47"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b65288ebe12e38dd3650fea65d82fcce0d35df1ae4a770b525c10119ee71962f"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52b216c458458f6c292e12428916e80974c5113abc505a61e7b0b9f8932a785d"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88aa62303278aec45bbb26bf679269c7890346c37140ae30e39da1070c341e11"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-win32.whl", hash = "sha256:bb6802d63e42734d2baf02e1343377fe18590ed6a1f5ffbdebbbe0f8331f176b"}, + {file = "grpcio_tools-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:d5652d3a52a2e8e1d9bdf28fbd15e21b166e31b968cd7c8c604bf31611c0bb5b"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:84e27206bd884be83a7fdcef8be3c90eb1591341c0ba9b0d25ec9db1043ba2f2"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:5eb63d9207b02a0fa30216907e1e7705cc2670f933e77236c6e0eb966ad3b4bf"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:95e49839d49e79187c43cd63af5c206dc5743a01d7d3d2f039772fa743cbb30c"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ae5cd2f89e33a529790bf8aa59a459484edb05e4f58d4cf78836b9dfa1fab43"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e1fd7301d762bf5984b7e7fb62fce82cff864d75f0a57e15cfd07ae1bd79133"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e38d5800151e6804d500e329f7ddfb615c50eee0c1607593e3147a4b21037e40"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:563a75924109e75809b2919e68d7e6ae7872e63d20258aae7899b14f6ff9e18b"}, + {file = "grpcio_tools-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8934715577c9cc0c792b8a77f7d0dd2bb60e951161b10c5f46b60856673240"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:ed6cf7ff4a10c46f85340f9c68982f9efb29f51ee4b66828310fcdf3c2d7ffd1"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:1faa5006fe9e7b9e65c47bc23f7cd333fdcdd4ba35d44080303848266db5ab05"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:3b526dc5566161a3a17599753838b9cfbdd4cb15b6ad419aae8a5d12053fa8ae"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09db3688efd3499ce3c0b02c0bac0656abdab4cb99716f81ad879c08b92c56e"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:006ea0cc16e8bf8f307326e0556e1384f24abb402cc4e6a720aa1dfe8f268647"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b46ba0b6552b4375ede65e0c89491af532635347f78d52a72f8a027529e713ed"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f561c86fe13cff3be16f297cc05e1aa1274294524743a4cf91d971866fbb0"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-win32.whl", hash = "sha256:c85391e06620d6e16a56341caae5007d0c6219beba065e1e288f2523fba6a335"}, + {file = "grpcio_tools-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:679cf2507090e010da73e5001665c76de2a5927b2e2110e459222b1c81cb10c2"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:0e87f105f1d152934759f8975ed002d5ce057b3cdf1cc6cb63fe6008671a27b9"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:bf9f281f528e0220558d57e09b4518dec148dcb250d78bd9cbb27e09edabb3f9"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:711314cb4c6c8b3d51bafaee380ffa5012bd0567ed68f1b0b1fc07492b27acab"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54bb570bd963905de3bda596b35e06026552705edebbb2cb737b57aa5252b9e5"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dce5f04676cf94e6e2d13d7f91ac2de79097d86675bc4d404a3c24dcc0332c88"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:98ddf871c614cc0ed331c7159ebbbf5040be562279677d3bb97c2e6083539f72"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f3aaf3b20c0f7063856b2432335af8f76cf580f898e04085548cde28332d6833"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-win32.whl", hash = "sha256:3dee3be61d9032f777a9b4e2696ea3d0748a582cb99c672b5d41ca66821e8c87"}, + {file = "grpcio_tools-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:f54b5181784464bd3573ae7dbcf053da18a4b7a75fe19960791f383be3d035ca"}, ] [package.dependencies] -grpcio = ">=1.62.1" +grpcio = ">=1.62.0" protobuf = ">=4.21.6,<5.0dev" setuptools = "*" @@ -1593,13 +1559,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.21.4" +version = "0.20.3" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.21.4-py3-none-any.whl", hash = "sha256:df37c2c37fc6c82163cdd8a67ede261687d80d1e262526d6c0ce73b6b3630a7b"}, - {file = "huggingface_hub-0.21.4.tar.gz", hash = "sha256:e1f4968c93726565a80edf6dc309763c7b546d0cfe79aa221206034d50155531"}, + {file = "huggingface_hub-0.20.3-py3-none-any.whl", hash = "sha256:d988ae4f00d3e307b0c80c6a05ca6dbb7edba8bba3079f74cda7d9c2e562a7b6"}, + {file = "huggingface_hub-0.20.3.tar.gz", hash = "sha256:94e7f8e074475fbc67d6a71957b678e1b4a74ff1b64a644fd6cbb83da962d05d"}, ] [package.dependencies] @@ -1616,12 +1582,11 @@ all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", cli = ["InquirerPy (==0.3.4)"] dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] tensorflow = ["graphviz", "pydot", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["safetensors", "torch"] +torch = ["torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -1673,32 +1638,32 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.0.2" +version = "7.0.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.2-py3-none-any.whl", hash = "sha256:f4bc4c0c070c490abf4ce96d715f68e95923320370efb66143df00199bb6c100"}, - {file = "importlib_metadata-7.0.2.tar.gz", hash = "sha256:198f568f3230878cb1b44fbd7975f87906c22336dba2e4a7f05278c281fbd792"}, + {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, + {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "importlib-resources" -version = "6.1.3" +version = "6.1.2" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.1.3-py3-none-any.whl", hash = "sha256:4c0269e3580fe2634d364b39b38b961540a7738c02cb984e98add8b4221d793d"}, - {file = "importlib_resources-6.1.3.tar.gz", hash = "sha256:56fb4525197b78544a3354ea27793952ab93f935bb4bf746b846bb1015020f2b"}, + {file = "importlib_resources-6.1.2-py3-none-any.whl", hash = "sha256:9a0a862501dc38b68adebc82970140c9e4209fc99601782925178f8386339938"}, + {file = "importlib_resources-6.1.2.tar.gz", hash = "sha256:308abf8474e2dba5f867d279237cd4076482c3de7104a40b41426370e891549b"}, ] [package.dependencies] @@ -1706,7 +1671,7 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["jaraco.collections", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] [[package]] name = "iniconfig" @@ -1721,13 +1686,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.29.3" +version = "6.29.2" description = "IPython Kernel for Jupyter" optional = true python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"}, - {file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"}, + {file = "ipykernel-6.29.2-py3-none-any.whl", hash = "sha256:50384f5c577a260a1d53f1f59a828c7266d321c9b7d00d345693783f66616055"}, + {file = "ipykernel-6.29.2.tar.gz", hash = "sha256:3bade28004e3ff624ed57974948116670604ac5f676d12339693f3142176d3f0"}, ] [package.dependencies] @@ -1750,7 +1715,7 @@ cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] pyqt5 = ["pyqt5"] pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (==0.23.4)", "pytest-cov", "pytest-timeout"] [[package]] name = "ipython" @@ -2284,18 +2249,17 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "1.0.1" +version = "0.5.0" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_autorefs-1.0.1-py3-none-any.whl", hash = "sha256:aacdfae1ab197780fb7a2dac92ad8a3d8f7ca8049a9cbe56a4218cd52e8da570"}, - {file = "mkdocs_autorefs-1.0.1.tar.gz", hash = "sha256:f684edf847eced40b570b57846b15f0bf57fb93ac2c510450775dcf16accb971"}, + {file = "mkdocs_autorefs-0.5.0-py3-none-any.whl", hash = "sha256:7930fcb8ac1249f10e683967aeaddc0af49d90702af111a5e390e8b20b3d97ff"}, + {file = "mkdocs_autorefs-0.5.0.tar.gz", hash = "sha256:9a5054a94c08d28855cfab967ada10ed5be76e2bfad642302a610b252c3274c0"}, ] [package.dependencies] Markdown = ">=3.3" -markupsafe = ">=2.0.1" mkdocs = ">=1.1" [[package]] @@ -2314,13 +2278,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.5.13" +version = "9.5.11" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.13-py3-none-any.whl", hash = "sha256:5cbe17fee4e3b4980c8420a04cc762d8dc052ef1e10532abd4fce88e5ea9ce6a"}, - {file = "mkdocs_material-9.5.13.tar.gz", hash = "sha256:d8e4caae576312a88fd2609b81cf43d233cdbe36860d67a68702b018b425bd87"}, + {file = "mkdocs_material-9.5.11-py3-none-any.whl", hash = "sha256:788ee0f3e036dca2dc20298d65e480297d348a44c9d7b2ee05c5262983e66072"}, + {file = "mkdocs_material-9.5.11.tar.gz", hash = "sha256:7af7f8af0dea16175558f3fb9245d26c83a17199baa5f157755e63d7437bf971"}, ] [package.dependencies] @@ -2354,13 +2318,13 @@ files = [ [[package]] name = "mkdocstrings" -version = "0.24.1" +version = "0.24.0" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocstrings-0.24.1-py3-none-any.whl", hash = "sha256:b4206f9a2ca8a648e222d5a0ca1d36ba7dee53c88732818de183b536f9042b5d"}, - {file = "mkdocstrings-0.24.1.tar.gz", hash = "sha256:cc83f9a1c8724fc1be3c2fa071dd73d91ce902ef6a79710249ec8d0ee1064401"}, + {file = "mkdocstrings-0.24.0-py3-none-any.whl", hash = "sha256:f4908560c10f587326d8f5165d1908817b2e280bbf707607f601c996366a2264"}, + {file = "mkdocstrings-0.24.0.tar.gz", hash = "sha256:222b1165be41257b494a9d29b14135d2b7ca43f38161d5b10caae03b87bd4f7e"}, ] [package.dependencies] @@ -3148,13 +3112,13 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "posthog" -version = "3.5.0" +version = "3.4.2" description = "Integrate PostHog into any python application." optional = true python-versions = "*" files = [ - {file = "posthog-3.5.0-py2.py3-none-any.whl", hash = "sha256:3c672be7ba6f95d555ea207d4486c171d06657eb34b3ce25eb043bfe7b6b5b76"}, - {file = "posthog-3.5.0.tar.gz", hash = "sha256:8f7e3b2c6e8714d0c0c542a2109b83a7549f63b7113a133ab2763a89245ef2ef"}, + {file = "posthog-3.4.2-py2.py3-none-any.whl", hash = "sha256:c7e79b2e585d16e93749874bcbcdad78d857037398ce0d8d6c474a04d0bd3bbe"}, + {file = "posthog-3.4.2.tar.gz", hash = "sha256:f0eafa663fbc4a942b49b6168a62a890635407044bbc7593051dcb9cc1208873"}, ] [package.dependencies] @@ -3316,47 +3280,47 @@ files = [ [[package]] name = "pyarrow" -version = "15.0.1" +version = "15.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-15.0.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:c2ddb3be5ea938c329a84171694fc230b241ce1b6b0ff1a0280509af51c375fa"}, - {file = "pyarrow-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7543ea88a0ff72f8e6baaf9bfdbec2c62aeabdbede9e4a571c71cc3bc43b6302"}, - {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1519e218a6941fc074e4501088d891afcb2adf77c236e03c34babcf3d6a0d1c7"}, - {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28cafa86e1944761970d3b3fc0411b14ff9b5c2b73cd22aaf470d7a3976335f5"}, - {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:be5c3d463e33d03eab496e1af7916b1d44001c08f0f458ad27dc16093a020638"}, - {file = "pyarrow-15.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:47b1eda15d3aa3f49a07b1808648e1397e5dc6a80a30bf87faa8e2d02dad7ac3"}, - {file = "pyarrow-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e524a31be7db22deebbbcf242b189063ab9a7652c62471d296b31bc6e3cae77b"}, - {file = "pyarrow-15.0.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:a476fefe8bdd56122fb0d4881b785413e025858803cc1302d0d788d3522b374d"}, - {file = "pyarrow-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:309e6191be385f2e220586bfdb643f9bb21d7e1bc6dd0a6963dc538e347b2431"}, - {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83bc586903dbeb4365cbc72b602f99f70b96c5882e5dfac5278813c7d624ca3c"}, - {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07e652daac6d8b05280cd2af31c0fb61a4490ec6a53dc01588014d9fa3fdbee9"}, - {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:abad2e08652df153a72177ce20c897d083b0c4ebeec051239e2654ddf4d3c996"}, - {file = "pyarrow-15.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cde663352bc83ad75ba7b3206e049ca1a69809223942362a8649e37bd22f9e3b"}, - {file = "pyarrow-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:1b6e237dd7a08482a8b8f3f6512d258d2460f182931832a8c6ef3953203d31e1"}, - {file = "pyarrow-15.0.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:7bd167536ee23192760b8c731d39b7cfd37914c27fd4582335ffd08450ff799d"}, - {file = "pyarrow-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c08bb31eb2984ba5c3747d375bb522e7e536b8b25b149c9cb5e1c49b0ccb736"}, - {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0f9c1d630ed2524bd1ddf28ec92780a7b599fd54704cd653519f7ff5aec177a"}, - {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5186048493395220550bca7b524420471aac2d77af831f584ce132680f55c3df"}, - {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:31dc30c7ec8958da3a3d9f31d6c3630429b2091ede0ecd0d989fd6bec129f0e4"}, - {file = "pyarrow-15.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3f111a014fb8ac2297b43a74bf4495cc479a332908f7ee49cb7cbd50714cb0c1"}, - {file = "pyarrow-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a6d1f7c15d7f68f08490d0cb34611497c74285b8a6bbeab4ef3fc20117310983"}, - {file = "pyarrow-15.0.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:9ad931b996f51c2f978ed517b55cb3c6078272fb4ec579e3da5a8c14873b698d"}, - {file = "pyarrow-15.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:738f6b53ab1c2f66b2bde8a1d77e186aeaab702d849e0dfa1158c9e2c030add3"}, - {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c1c3fc16bc74e33bf8f1e5a212938ed8d88e902f372c4dac6b5bad328567d2f"}, - {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1fa92512128f6c1b8dde0468c1454dd70f3bff623970e370d52efd4d24fd0be"}, - {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:b4157f307c202cbbdac147d9b07447a281fa8e63494f7fc85081da351ec6ace9"}, - {file = "pyarrow-15.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:b75e7da26f383787f80ad76143b44844ffa28648fcc7099a83df1538c078d2f2"}, - {file = "pyarrow-15.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:3a99eac76ae14096c209850935057b9e8ce97a78397c5cde8724674774f34e5d"}, - {file = "pyarrow-15.0.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:dd532d3177e031e9b2d2df19fd003d0cc0520d1747659fcabbd4d9bb87de508c"}, - {file = "pyarrow-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce8c89848fd37e5313fc2ce601483038ee5566db96ba0808d5883b2e2e55dc53"}, - {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:862eac5e5f3b6477f7a92b2f27e560e1f4e5e9edfca9ea9da8a7478bb4abd5ce"}, - {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f0ea3a29cd5cb99bf14c1c4533eceaa00ea8fb580950fb5a89a5c771a994a4e"}, - {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bb902f780cfd624b2e8fd8501fadab17618fdb548532620ef3d91312aaf0888a"}, - {file = "pyarrow-15.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:4f87757f02735a6bb4ad2e1b98279ac45d53b748d5baf52401516413007c6999"}, - {file = "pyarrow-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:efd3816c7fbfcbd406ac0f69873cebb052effd7cdc153ae5836d1b00845845d7"}, - {file = "pyarrow-15.0.1.tar.gz", hash = "sha256:21d812548d39d490e0c6928a7c663f37b96bf764034123d4b4ab4530ecc757a9"}, + {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"}, + {file = "pyarrow-15.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e"}, + {file = "pyarrow-15.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3"}, + {file = "pyarrow-15.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5"}, + {file = "pyarrow-15.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e"}, + {file = "pyarrow-15.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99"}, + {file = "pyarrow-15.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351"}, + {file = "pyarrow-15.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f"}, + {file = "pyarrow-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177"}, + {file = "pyarrow-15.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac"}, + {file = "pyarrow-15.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7"}, + {file = "pyarrow-15.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b"}, + {file = "pyarrow-15.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f"}, + {file = "pyarrow-15.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed"}, + {file = "pyarrow-15.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6"}, + {file = "pyarrow-15.0.0.tar.gz", hash = "sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83"}, ] [package.dependencies] @@ -3577,13 +3541,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.7.1" +version = "10.7" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.7.1-py3-none-any.whl", hash = "sha256:f5cc7000d7ff0d1ce9395d216017fa4df3dde800afb1fb72d1c7d3fd35e710f4"}, - {file = "pymdown_extensions-10.7.1.tar.gz", hash = "sha256:c70e146bdd83c744ffc766b4671999796aba18842b268510a329f7f64700d584"}, + {file = "pymdown_extensions-10.7-py3-none-any.whl", hash = "sha256:6ca215bc57bc12bf32b414887a68b810637d039124ed9b2e5bd3325cbb2c050c"}, + {file = "pymdown_extensions-10.7.tar.gz", hash = "sha256:c0d64d5cf62566f59e6b2b690a4095c931107c250a8c8e1351c1de5f6b036deb"}, ] [package.dependencies] @@ -3595,13 +3559,13 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pyparsing" -version = "3.1.2" +version = "3.1.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, - {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, + {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, + {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, ] [package.extras] @@ -3654,13 +3618,13 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xm [[package]] name = "python-dateutil" -version = "2.9.0.post0" +version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] @@ -3895,13 +3859,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qdrant-client" -version = "1.8.0" +version = "1.7.3" description = "Client library for the Qdrant vector search engine" optional = true python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.8.0-py3-none-any.whl", hash = "sha256:fa28d3eb64c0c57ec029c7c85c71f6c72c197f92502022655741f3632c518e29"}, - {file = "qdrant_client-1.8.0.tar.gz", hash = "sha256:2a1a3f2cbacc7adba85644cf6cfdee20401cf25764b32da479c81fb63e178d15"}, + {file = "qdrant_client-1.7.3-py3-none-any.whl", hash = "sha256:b062420ba55eb847652c7d2a26404fb1986bea13aa785763024013f96a7a915c"}, + {file = "qdrant_client-1.7.3.tar.gz", hash = "sha256:7b809be892cdc5137ae80ea3335da40c06499ad0b0072b5abc6bad79da1d29fc"}, ] [package.dependencies] @@ -3914,7 +3878,7 @@ pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<3" [package.extras] -fastembed = ["fastembed (==0.2.2)"] +fastembed = ["fastembed (==0.1.1)"] [[package]] name = "referencing" @@ -4486,60 +4450,60 @@ test = ["pytest"] [[package]] name = "sqlalchemy" -version = "2.0.28" +version = "2.0.27" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46a3d4e7a472bfff2d28db838669fc437964e8af8df8ee1e4548e92710929adc"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3dd67b5d69794cfe82862c002512683b3db038b99002171f624712fa71aeaa"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61e2e41656a673b777e2f0cbbe545323dbe0d32312f590b1bc09da1de6c2a02"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0315d9125a38026227f559488fe7f7cee1bd2fbc19f9fd637739dc50bb6380b2"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af8ce2d31679006e7b747d30a89cd3ac1ec304c3d4c20973f0f4ad58e2d1c4c9"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:81ba314a08c7ab701e621b7ad079c0c933c58cdef88593c59b90b996e8b58fa5"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-win32.whl", hash = "sha256:1ee8bd6d68578e517943f5ebff3afbd93fc65f7ef8f23becab9fa8fb315afb1d"}, - {file = "SQLAlchemy-2.0.28-cp311-cp311-win_amd64.whl", hash = "sha256:ad7acbe95bac70e4e687a4dc9ae3f7a2f467aa6597049eeb6d4a662ecd990bb6"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d3499008ddec83127ab286c6f6ec82a34f39c9817f020f75eca96155f9765097"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b66fcd38659cab5d29e8de5409cdf91e9986817703e1078b2fdaad731ea66f5"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea30da1e76cb1acc5b72e204a920a3a7678d9d52f688f087dc08e54e2754c67"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:124202b4e0edea7f08a4db8c81cc7859012f90a0d14ba2bf07c099aff6e96462"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e23b88c69497a6322b5796c0781400692eca1ae5532821b39ce81a48c395aae9"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b6303bfd78fb3221847723104d152e5972c22367ff66edf09120fcde5ddc2e2"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, - {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, - {file = "SQLAlchemy-2.0.28-py3-none-any.whl", hash = "sha256:78bb7e8da0183a8301352d569900d9d3594c48ac21dc1c2ec6b3121ed8b6c986"}, - {file = "SQLAlchemy-2.0.28.tar.gz", hash = "sha256:dd53b6c4e6d960600fd6532b79ee28e2da489322fcf6648738134587faf767b6"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d04e579e911562f1055d26dab1868d3e0bb905db3bccf664ee8ad109f035618a"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fa67d821c1fd268a5a87922ef4940442513b4e6c377553506b9db3b83beebbd8"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c7a596d0be71b7baa037f4ac10d5e057d276f65a9a611c46970f012752ebf2d"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:954d9735ee9c3fa74874c830d089a815b7b48df6f6b6e357a74130e478dbd951"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5cd20f58c29bbf2680039ff9f569fa6d21453fbd2fa84dbdb4092f006424c2e6"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:03f448ffb731b48323bda68bcc93152f751436ad6037f18a42b7e16af9e91c07"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-win32.whl", hash = "sha256:d997c5938a08b5e172c30583ba6b8aad657ed9901fc24caf3a7152eeccb2f1b4"}, + {file = "SQLAlchemy-2.0.27-cp310-cp310-win_amd64.whl", hash = "sha256:eb15ef40b833f5b2f19eeae65d65e191f039e71790dd565c2af2a3783f72262f"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c5bad7c60a392850d2f0fee8f355953abaec878c483dd7c3836e0089f046bf6"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3012ab65ea42de1be81fff5fb28d6db893ef978950afc8130ba707179b4284a"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbcd77c4d94b23e0753c5ed8deba8c69f331d4fd83f68bfc9db58bc8983f49cd"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d177b7e82f6dd5e1aebd24d9c3297c70ce09cd1d5d37b43e53f39514379c029c"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:680b9a36029b30cf063698755d277885d4a0eab70a2c7c6e71aab601323cba45"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1306102f6d9e625cebaca3d4c9c8f10588735ef877f0360b5cdb4fdfd3fd7131"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-win32.whl", hash = "sha256:5b78aa9f4f68212248aaf8943d84c0ff0f74efc65a661c2fc68b82d498311fd5"}, + {file = "SQLAlchemy-2.0.27-cp311-cp311-win_amd64.whl", hash = "sha256:15e19a84b84528f52a68143439d0c7a3a69befcd4f50b8ef9b7b69d2628ae7c4"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0de1263aac858f288a80b2071990f02082c51d88335a1db0d589237a3435fe71"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce850db091bf7d2a1f2fdb615220b968aeff3849007b1204bf6e3e50a57b3d32"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dfc936870507da96aebb43e664ae3a71a7b96278382bcfe84d277b88e379b18"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4fbe6a766301f2e8a4519f4500fe74ef0a8509a59e07a4085458f26228cd7cc"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4535c49d961fe9a77392e3a630a626af5baa967172d42732b7a43496c8b28876"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0fb3bffc0ced37e5aa4ac2416f56d6d858f46d4da70c09bb731a246e70bff4d5"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-win32.whl", hash = "sha256:7f470327d06400a0aa7926b375b8e8c3c31d335e0884f509fe272b3c700a7254"}, + {file = "SQLAlchemy-2.0.27-cp312-cp312-win_amd64.whl", hash = "sha256:f9374e270e2553653d710ece397df67db9d19c60d2647bcd35bfc616f1622dcd"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e97cf143d74a7a5a0f143aa34039b4fecf11343eed66538610debc438685db4a"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7b5a3e2120982b8b6bd1d5d99e3025339f7fb8b8267551c679afb39e9c7c7f1"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e36aa62b765cf9f43a003233a8c2d7ffdeb55bc62eaa0a0380475b228663a38f"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5ada0438f5b74c3952d916c199367c29ee4d6858edff18eab783b3978d0db16d"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b1d9d1bfd96eef3c3faedb73f486c89e44e64e40e5bfec304ee163de01cf996f"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-win32.whl", hash = "sha256:ca891af9f3289d24a490a5fde664ea04fe2f4984cd97e26de7442a4251bd4b7c"}, + {file = "SQLAlchemy-2.0.27-cp37-cp37m-win_amd64.whl", hash = "sha256:fd8aafda7cdff03b905d4426b714601c0978725a19efc39f5f207b86d188ba01"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec1f5a328464daf7a1e4e385e4f5652dd9b1d12405075ccba1df842f7774b4fc"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad862295ad3f644e3c2c0d8b10a988e1600d3123ecb48702d2c0f26771f1c396"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48217be1de7d29a5600b5c513f3f7664b21d32e596d69582be0a94e36b8309cb"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e56afce6431450442f3ab5973156289bd5ec33dd618941283847c9fd5ff06bf"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:611068511b5531304137bcd7fe8117c985d1b828eb86043bd944cebb7fae3910"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b86abba762ecfeea359112b2bb4490802b340850bbee1948f785141a5e020de8"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-win32.whl", hash = "sha256:30d81cc1192dc693d49d5671cd40cdec596b885b0ce3b72f323888ab1c3863d5"}, + {file = "SQLAlchemy-2.0.27-cp38-cp38-win_amd64.whl", hash = "sha256:120af1e49d614d2525ac247f6123841589b029c318b9afbfc9e2b70e22e1827d"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d07ee7793f2aeb9b80ec8ceb96bc8cc08a2aec8a1b152da1955d64e4825fcbac"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb0845e934647232b6ff5150df37ceffd0b67b754b9fdbb095233deebcddbd4a"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fc19ae2e07a067663dd24fca55f8ed06a288384f0e6e3910420bf4b1270cc51"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b90053be91973a6fb6020a6e44382c97739736a5a9d74e08cc29b196639eb979"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2f5c9dfb0b9ab5e3a8a00249534bdd838d943ec4cfb9abe176a6c33408430230"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33e8bde8fff203de50399b9039c4e14e42d4d227759155c21f8da4a47fc8053c"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-win32.whl", hash = "sha256:d873c21b356bfaf1589b89090a4011e6532582b3a8ea568a00e0c3aab09399dd"}, + {file = "SQLAlchemy-2.0.27-cp39-cp39-win_amd64.whl", hash = "sha256:ff2f1b7c963961d41403b650842dc2039175b906ab2093635d8319bef0b7d620"}, + {file = "SQLAlchemy-2.0.27-py3-none-any.whl", hash = "sha256:1ab4e0448018d01b142c916cc7119ca573803a4745cfe341b8f95657812700ac"}, + {file = "SQLAlchemy-2.0.27.tar.gz", hash = "sha256:86a6ed69a71fe6b88bf9331594fa390a2adda4a49b5c06f98e47bf0d392534f8"}, ] [package.dependencies] @@ -4906,13 +4870,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.28.0" +version = "0.27.1" description = "The lightning-fast ASGI server." optional = true python-versions = ">=3.8" files = [ - {file = "uvicorn-0.28.0-py3-none-any.whl", hash = "sha256:6623abbbe6176204a4226e67607b4d52cc60ff62cda0ff177613645cefa2ece1"}, - {file = "uvicorn-0.28.0.tar.gz", hash = "sha256:cab4473b5d1eaeb5a0f6375ac4bc85007ffc75c3cc1768816d9e5d589857b067"}, + {file = "uvicorn-0.27.1-py3-none-any.whl", hash = "sha256:5c89da2f3895767472a35556e539fd59f7edbe9b1e9c0e1c99eebeadc61838e4"}, + {file = "uvicorn-0.27.1.tar.gz", hash = "sha256:3d9a267296243532db80c83a959a3400502165ade2c1338dea4e67915fd4745a"}, ] [package.dependencies] @@ -5604,4 +5568,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "f7a5ab7c85e79920d41e45e9bbd17f0dbc1180c52d027235a656c270d9e79346" +content-hash = "8b4cc583653becb3be9f5bc4c34cdf5ced146ba32157a5bb4bdc7885291c0403" From f8c568318f1d95985ca75899e6b7e1fd789ca80a Mon Sep 17 00:00:00 2001 From: Isaac Miller Date: Sat, 9 Mar 2024 14:18:40 -0600 Subject: [PATCH 195/243] Update poetry.lock --- poetry.lock | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index 30ec769817..1b336906d0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -151,6 +151,30 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] +[[package]] +name = "anthropic" +version = "0.18.1" +description = "The official Python library for the anthropic API" +optional = true +python-versions = ">=3.7" +files = [ + {file = "anthropic-0.18.1-py3-none-any.whl", hash = "sha256:b85aee64f619ce1b1964ba733a09adc4053e7bc4e6d4186001229ec191099dcf"}, + {file = "anthropic-0.18.1.tar.gz", hash = "sha256:f5d1caafd43f6cc933a79753a93531605095f040a384f6a900c3de9c3fb6694e"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tokenizers = ">=0.13.0" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + [[package]] name = "anyio" version = "4.3.0" @@ -871,6 +895,17 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = true +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + [[package]] name = "dnspython" version = "2.6.1" @@ -5568,4 +5603,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "8b4cc583653becb3be9f5bc4c34cdf5ced146ba32157a5bb4bdc7885291c0403" +content-hash = "f7a5ab7c85e79920d41e45e9bbd17f0dbc1180c52d027235a656c270d9e79346" From edadecd989d98bab32f272316c8d8c23b23bf03c Mon Sep 17 00:00:00 2001 From: Arnav Singhvi Date: Sat, 9 Mar 2024 12:20:04 -0800 Subject: [PATCH 196/243] update with self.history --- docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx b/docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx index edef34da35..b27f2cf201 100644 --- a/docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx +++ b/docs/docs/deep-dive/language_model_clients/custom-lm-client.mdx @@ -52,6 +52,7 @@ def __init__(model, api_key): self.model = model self.api_key = api_key self.provider = "default" + self.history = [] self.base_url = "https://api.anthropic.com/v1/messages" ``` From 2560b2955c87b5aeb9f8ec0a22f4c87cd6d2c94c Mon Sep 17 00:00:00 2001 From: Darin <86675935+darinkishore@users.noreply.github.com> Date: Sat, 9 Mar 2024 22:19:25 -0800 Subject: [PATCH 197/243] higher poetry openai req --- pyproject.toml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 04204686d2..d9ff8ccc6b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,12 @@ classifiers = [ "Programming Language :: Python :: 3", # removed 3.8 "Programming Language :: Python :: 3.9", ] + # We have both project and tool.poetry.dependencies. Should we remove one? +# tool.poetry.dependencies is a convenience thing for poetry users. +# project dependencies function similarly to requirements.txt, +# `pip install .` will pull from pyproject.toml dependencies + dependencies = [ "backoff~=2.2.1", "joblib~=1.3.2", @@ -77,7 +82,7 @@ python = ">=3.9,<3.12" pydantic = "2.5.0" backoff = "^2.2.1" joblib = "^1.3.2" -openai = "^0.28.1" +openai = ">=0.28.1,<2.0.0" pandas = "^2.1.1" regex = "^2023.10.3" ujson = "^5.8.0" From ab65929f555a995b8462a53abe98250d67f38316 Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Sun, 10 Mar 2024 11:38:15 -0700 Subject: [PATCH 198/243] Update README.md Updated table of contents --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3d96788cec..72245aaf46 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,8 @@ If you need help thinking about your task, we recently created a [Discord server 1. **[Tutorials & Documentation](#2-documentation)** 1. **[Framework Syntax](#3-syntax-youre-in-charge-of-the-workflowits-free-form-python-code)** 1. **[Compiling: Two Powerful Concepts](#4-two-powerful-concepts-signatures--teleprompters)** -1. **[FAQ: Is DSPy right for me?](#5-faq-is-dspy-right-for-me)** +1. **[Pydantic Types](#5-pydantic-types)** +1. **[FAQ: Is DSPy right for me?](#6-faq-is-dspy-right-for-me)** From d73efde0032d4160ea12db1e19ba2e66d42fc3a5 Mon Sep 17 00:00:00 2001 From: Raja Rajendran Date: Mon, 11 Mar 2024 00:22:08 +0530 Subject: [PATCH 199/243] run ruff check --fix-only dspy/retrieve/faiss_rm.py --- dspy/retrieve/faiss_rm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/retrieve/faiss_rm.py b/dspy/retrieve/faiss_rm.py index 37460f8951..f321d87364 100755 --- a/dspy/retrieve/faiss_rm.py +++ b/dspy/retrieve/faiss_rm.py @@ -3,7 +3,7 @@ """ import logging -from typing import Union, Optional +from typing import Optional, Union import numpy as np From 0c1d1b1b2c9b5d6dc6d565a84bfd8f17c273669d Mon Sep 17 00:00:00 2001 From: Quajak Date: Sun, 10 Mar 2024 16:52:55 -0400 Subject: [PATCH 200/243] Fix return_all_scores in evaluate again to return all (#625) --- dspy/evaluate/evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 1099ede890..486e4b68e9 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -219,7 +219,7 @@ def wrapped_program(example_idx, example): ipython_display(HTML(message)) if return_all_scores and return_outputs: - return round(100 * ncorrect / ntotal, 2), results + return round(100 * ncorrect / ntotal, 2), results, [score for *_, score in reordered_devset] elif return_all_scores: return round(100 * ncorrect / ntotal, 2), [score for *_, score in reordered_devset] elif return_outputs: From a5bc0c94571cec1379fe65c0d038ba773dee4c52 Mon Sep 17 00:00:00 2001 From: Tomaz Bratanic Date: Mon, 11 Mar 2024 18:15:09 +0100 Subject: [PATCH 201/243] fix neo4j docs --- docs/api/retrieval_model_clients/Neo4jRM.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/retrieval_model_clients/Neo4jRM.md b/docs/api/retrieval_model_clients/Neo4jRM.md index 2bb2ddd2f1..6b408b780c 100644 --- a/docs/api/retrieval_model_clients/Neo4jRM.md +++ b/docs/api/retrieval_model_clients/Neo4jRM.md @@ -78,6 +78,6 @@ retriever_model = Neo4jRM( results = retriever_model("Explore the significance of quantum computing", k=3) -for passage in results.passages: - print("Document:", result, "\n") +for passage in results: + print("Document:", passage, "\n") ``` From 56d4378446f6f296346f83110d61163ea16d4fd9 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Mon, 11 Mar 2024 23:00:53 +0530 Subject: [PATCH 202/243] folder structure change, example driven optimization, mix of lm generation, teacher based output gen. --- dspy/experimental/synthesizer/__init__.py | 1 + dspy/experimental/synthesizer/config.py | 25 +++ dspy/experimental/synthesizer/instructions.py | 5 + dspy/experimental/synthesizer/signatures.py | 62 +++++++ .../{ => synthesizer}/synthesizer.py | 157 ++++++++---------- dspy/experimental/synthesizer/utils.py | 22 +++ 6 files changed, 184 insertions(+), 88 deletions(-) create mode 100644 dspy/experimental/synthesizer/__init__.py create mode 100644 dspy/experimental/synthesizer/config.py create mode 100644 dspy/experimental/synthesizer/instructions.py create mode 100644 dspy/experimental/synthesizer/signatures.py rename dspy/experimental/{ => synthesizer}/synthesizer.py (50%) create mode 100644 dspy/experimental/synthesizer/utils.py diff --git a/dspy/experimental/synthesizer/__init__.py b/dspy/experimental/synthesizer/__init__.py new file mode 100644 index 0000000000..44d2c775d8 --- /dev/null +++ b/dspy/experimental/synthesizer/__init__.py @@ -0,0 +1 @@ +from .synthesizer import * \ No newline at end of file diff --git a/dspy/experimental/synthesizer/config.py b/dspy/experimental/synthesizer/config.py new file mode 100644 index 0000000000..646df9f026 --- /dev/null +++ b/dspy/experimental/synthesizer/config.py @@ -0,0 +1,25 @@ +import dspy + +from typing import Union, List, Optional +from pydantic import BaseModel, field_validator + +class SynthesizerArguments(BaseModel): + # [TODO] + feedback_mode: Optional[str] = None + num_example_for_feedback: Optional[int] = None + + input_lm_model: Optional[dspy.LM] = None + output_lm_model: Optional[dspy.LM] = None + output_teacher_module: Optional[Union[dspy.Module, dspy.Predict]] = None + + num_example_for_optim: Optional[int] = None + + @field_validator(fields=["feedback_mode", "num_example_for_feedback"]) + def validate_feedback_mode(cls, value): + if value and value not in ["human", "llm"]: + raise ValueError("Feedback mode should be either 'human' or 'llm'.") + + if value and not cls.num_example_for_feedback: + raise ValueError("Number of examples for feedback is required when feedback mode is provided.") + + return value \ No newline at end of file diff --git a/dspy/experimental/synthesizer/instructions.py b/dspy/experimental/synthesizer/instructions.py new file mode 100644 index 0000000000..1e0f13ee78 --- /dev/null +++ b/dspy/experimental/synthesizer/instructions.py @@ -0,0 +1,5 @@ +INPUT_GENERATION_TASK_WITH_EXAMPLES = f"""Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed. + +Additionally I'll be providing you some data I generated before hand, make sure the data you generate if consistent with task I provided but different from the data I provided in every way possible.""" + +INPUT_GENERATION_TASK_WITH_FEEDBACK = f"""""" \ No newline at end of file diff --git a/dspy/experimental/synthesizer/signatures.py b/dspy/experimental/synthesizer/signatures.py new file mode 100644 index 0000000000..46b2f81265 --- /dev/null +++ b/dspy/experimental/synthesizer/signatures.py @@ -0,0 +1,62 @@ +import dspy + +from .utils import format_examples + +class UnderstandTask(dspy.Signature): + """I'll be providing you a task description, your task is to prepare a concise, comprehensible summary that captures the broad essence and purpose of the task this description aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" + + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task.", + ) + explanation = dspy.OutputField( + prefix="Task Description:", + desc="Explanation of the task.", + ) + +class ExplainTask(dspy.Signature): + """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the broad essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" + + examples = dspy.InputField( + prefix="Examples Datapoints:-", + desc="List of datapoints to analyze and explain the task.", + format=format_examples, + ) + explanation = dspy.OutputField( + prefix="Task Description:", + desc="Explanation of the task.", + ) + +class GenerateFieldDescription(dspy.Signature): + """Generate a concise and informative description for a given field based on the provided name and task description. This description should be no longer than 10 words and should be in simple english.""" + + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task the field is an input to.", + ) + field_name = dspy.InputField( + prefix="Field Name:", + desc="Name of the field to generate synthetic data for.", + ) + field_description = dspy.OutputField( + prefix="Field Description:", + desc="Description of the field.", + ) + +class GenerateInputFieldsData(dspy.Signature): + """Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed. + +A knowledge seed is the index of the knowledge base you have, each index represents a different knowledge base.""" + + knowledge_seed = dspy.InputField( + prefix="Knowledge Seed:", + desc="Seed for the knowledge base search to base the inputs around.", + format=lambda x: str(x), + ) + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task the field is an input to.", + ) + +class GenerateOutputFieldsData(dspy.Signature): + pass \ No newline at end of file diff --git a/dspy/experimental/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py similarity index 50% rename from dspy/experimental/synthesizer.py rename to dspy/experimental/synthesizer/synthesizer.py index 771b12c981..4d62b3e773 100644 --- a/dspy/experimental/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -1,93 +1,31 @@ +import dspy import random -from collections.abc import Mapping -from typing import List, Union from datasets import Dataset from tqdm import tqdm, trange +from typing import List, Union, Optional, Mapping -import dspy - +from .signatures import ( + ExplainTask, + GenerateFieldDescription, + GenerateInputFieldsData, + GenerateOutputFieldsData, + UnderstandTask, +) +from .config import SynthesizerArguments +from .instructions import INPUT_GENERATION_TASK_WITH_EXAMPLES +from .utils import format_examples -def format_examples(examples: List[dspy.Example]) -> str: - if isinstance(examples, str): - return examples - - formatted_example = "" - - for example in examples: - input_keys = example.inputs().keys() - label_keys = example.labels().keys() - - formatted_example += "Inputs:\n" - for key in input_keys: - formatted_example += f"{key}: {example[key]}\n" - - formatted_example += "Outputs:\n" - for key in label_keys: - formatted_example += f"{key}: {example[key]}\n" - - return formatted_example - -class UnderstandTask(dspy.Signature): - """I'll be providing you a task description, your task is to prepare a concise, comprehensible summary that captures the broad essence and purpose of the task this description aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" - - task_description = dspy.InputField( - prefix="Task Description:", - desc="Description of the task.", - ) - explanation = dspy.OutputField( - prefix="Task Description:", - desc="Explanation of the task.", - ) - -class ExplainTask(dspy.Signature): - """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the broad essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" - - examples = dspy.InputField( - prefix="Examples Datapoints:-", - desc="List of datapoints to analyze and explain the task.", - format=format_examples, - ) - explanation = dspy.OutputField( - prefix="Task Description:", - desc="Explanation of the task.", - ) - -class GenerateFieldDescription(dspy.Signature): - """Generate a concise and informative description for a given field based on the provided name and task description. This description should be no longer than 10 words and should be in simple english.""" - - task_description = dspy.InputField( - prefix="Task Description:", - desc="Description of the task the field is an input to.", - ) - field_name = dspy.InputField( - prefix="Field Name:", - desc="Name of the field to generate synthetic data for.", - ) - field_description = dspy.OutputField( - prefix="Field Description:", - desc="Description of the field.", - ) - -class GenerateInputFieldsData(dspy.Signature): - """Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed.""" - - knowledge_seed = dspy.InputField( - prefix="Knowledge Seed:", - desc="Seed for the knowledge base search to base the inputs around.", - format=lambda x: str(x), - ) - task_description = dspy.InputField( - prefix="Task Description:", - desc="Description of the task the field is an input to.", - ) - -class GenerateOutputFieldsData(dspy.Signature): - pass +__all__ = ["Synthesizer"] class Synthesizer: - def __init__(self): + def __init__(self, config: SynthesizerArguments): + self.config = config + self.input_lm = config.input_lm_model or dspy.settings.lm + self.output_lm = config.output_lm_model or dspy.settings.lm + self.explain_task = dspy.Predict(ExplainTask) + self.understand_task = dspy.Predict(UnderstandTask) self.generate_field_description = dspy.Predict(GenerateFieldDescription) self.generate_input_data = GenerateInputFieldsData @@ -111,7 +49,12 @@ def _get_field_data(self, key: str, keys_dict: Mapping[str, str]): return field_name, field_description - def _prepare_synthetic_data_predictors(self, input_keys: Mapping[str, str], output_keys: Mapping[str, str], task_description: str): + def _prepare_synthetic_data_predictors( + self, + input_keys: Mapping[str, str], + output_keys: Mapping[str, str], + ground_source: Optional[Union[List[dspy.Example], dspy.Signature]] = None, + ): for key in tqdm(input_keys, desc="Preparing Input Fields"): field_name, field_description = self._get_field_data(key, input_keys) @@ -125,6 +68,17 @@ def _prepare_synthetic_data_predictors(self, input_keys: Mapping[str, str], outp output_field, ) + if ground_source: + self.generate_input_data = self.generate_input_data.insert( + -1, + "ground_source", + dspy.InputField( + prefix=f"Pre-Generated Examples:", + desc="Pre-Generated Examples to differ the inputs around.", + format=format_examples, + ), + ) + input_field = dspy.InputField( prefix=f"{field_name}:", desc=field_description, @@ -152,7 +106,10 @@ def _prepare_synthetic_data_predictors(self, input_keys: Mapping[str, str], outp def _get_dataset_metadata(self, ground_source: Union[List[dspy.Example], dspy.Signature]): if isinstance(ground_source, dspy.SignatureMeta): - task_description = self.explain_task(examples=ground_source.__doc__).explanation + task_description = ground_source.__doc__ + if task_description.startswith("Given the fields"): + task_description = self.understand_task(examples=ground_source.__doc__).explanation + input_keys = {k:v.json_schema_extra["desc"] for k,v in ground_source.input_fields.items()} output_keys = {k:v.json_schema_extra["desc"] for k,v in ground_source.output_fields.items()} @@ -172,17 +129,19 @@ def generate( self, ground_source: Union[List[dspy.Example], dspy.Signature], num_data: int, - batch_size: int = None, + batch_size: int = 1, ): batch_size = batch_size or 1 task_description, input_keys, output_keys = self._get_dataset_metadata(ground_source) + if self.config.num_example_for_optim: + self.generate_input_data.__doc__ = INPUT_GENERATION_TASK_WITH_EXAMPLES self.generate_output_data.__doc__ = task_description self.input_predictor, self.output_predictor = self._prepare_synthetic_data_predictors( input_keys=input_keys, output_keys=output_keys, - task_description=task_description, + ground_source=ground_source if self.config.num_example_for_optim else None, ) data = [] @@ -191,7 +150,23 @@ def generate( iter_temperature = 0.7+0.01*idx iter_seed = random.randint(0, 1000000) - inputs = self.input_predictor(task_description=task_description, knowledge_seed=iter_seed, config=dict(temperature=iter_temperature, n=batch_size)) + inputs = None + + with dspy.context(lm=self.input_lm): + if self.config.num_example_for_optim: + example_for_optimization = random.sample(ground_source, self.config.num_example_for_optim) + inputs = self.input_predictor( + task_description=task_description, + knowledge_seed=iter_seed, + ground_source=example_for_optimization, + config=dict(temperature=iter_temperature, n=batch_size) + ) + else: + inputs = self.input_predictor( + task_description=task_description, + knowledge_seed=iter_seed, + config=dict(temperature=iter_temperature, n=batch_size) + ) input_kwargs = [{ key: getattr(completions, key) @@ -199,7 +174,14 @@ def generate( } for completions in inputs.completions] for kwargs in input_kwargs: - outputs = self.output_predictor(**kwargs, config=dict(temperature=iter_temperature)) + outputs = None + + with dspy.context(lm=self.output_lm, temperature=iter_temperature): + if self.config.output_teacher_module: + outputs = self.config.output_teacher_module(**kwargs) + + else: + outputs = self.output_predictor(**kwargs, config=dict(temperature=iter_temperature)) output_kwargs = { key: getattr(outputs, key) @@ -210,7 +192,6 @@ def generate( return data - def export(self, data: List[dspy.Example], path: str, mode: str = None, **kwargs): extention = mode or path.split(".")[-1] diff --git a/dspy/experimental/synthesizer/utils.py b/dspy/experimental/synthesizer/utils.py new file mode 100644 index 0000000000..b52ae90169 --- /dev/null +++ b/dspy/experimental/synthesizer/utils.py @@ -0,0 +1,22 @@ +import dspy +from typing import List + +def format_examples(examples: List[dspy.Example]) -> str: + if isinstance(examples, str): + return examples + + formatted_example = "" + + for example in examples: + input_keys = example.inputs().keys() + label_keys = example.labels().keys() + + formatted_example += "Inputs:\n" + for key in input_keys: + formatted_example += f"{key}: {example[key]}\n" + + formatted_example += "Outputs:\n" + for key in label_keys: + formatted_example += f"{key}: {example[key]}\n" + + return formatted_example \ No newline at end of file From 0e135534f755da83bdde56f36d7b808ec30f7c1b Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Mon, 11 Mar 2024 23:03:36 +0530 Subject: [PATCH 203/243] typo fixes --- dspy/experimental/synthesizer/signatures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dspy/experimental/synthesizer/signatures.py b/dspy/experimental/synthesizer/signatures.py index 46b2f81265..1ca0cb759a 100644 --- a/dspy/experimental/synthesizer/signatures.py +++ b/dspy/experimental/synthesizer/signatures.py @@ -3,7 +3,7 @@ from .utils import format_examples class UnderstandTask(dspy.Signature): - """I'll be providing you a task description, your task is to prepare a concise, comprehensible summary that captures the broad essence and purpose of the task this description aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" + """I'll be providing you a task description. Your task is to prepare a concise, comprehensible summary that captures the broad essence and purpose of this task description. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances or specifics of individual datapoints, models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise without touching on methodologies or solutions.""" task_description = dspy.InputField( prefix="Task Description:", @@ -18,7 +18,7 @@ class ExplainTask(dspy.Signature): """Analyze the provided set of datapoints carefully, and prepare a concise, comprehensible summary that captures the broad essence and purpose of the task these datapoints aim to address. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances of individual datapoints, specifics about models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise, without touching on methodologies or solutions.""" examples = dspy.InputField( - prefix="Examples Datapoints:-", + prefix="Examples Datapoints:", desc="List of datapoints to analyze and explain the task.", format=format_examples, ) From 5a2c7141c11e0e6b0bc2e9fc83843bbcf76cc4db Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Mon, 11 Mar 2024 23:16:48 +0530 Subject: [PATCH 204/243] lint fixes --- dspy/experimental/synthesizer/config.py | 6 ++++-- dspy/experimental/synthesizer/instructions.py | 4 ++-- dspy/experimental/synthesizer/signatures.py | 1 + dspy/experimental/synthesizer/synthesizer.py | 16 +++++++++------- dspy/experimental/synthesizer/utils.py | 4 +++- 5 files changed, 19 insertions(+), 12 deletions(-) diff --git a/dspy/experimental/synthesizer/config.py b/dspy/experimental/synthesizer/config.py index 646df9f026..609efbe126 100644 --- a/dspy/experimental/synthesizer/config.py +++ b/dspy/experimental/synthesizer/config.py @@ -1,8 +1,10 @@ -import dspy +from typing import Optional, Union -from typing import Union, List, Optional from pydantic import BaseModel, field_validator +import dspy + + class SynthesizerArguments(BaseModel): # [TODO] feedback_mode: Optional[str] = None diff --git a/dspy/experimental/synthesizer/instructions.py b/dspy/experimental/synthesizer/instructions.py index 1e0f13ee78..2fa65493ab 100644 --- a/dspy/experimental/synthesizer/instructions.py +++ b/dspy/experimental/synthesizer/instructions.py @@ -1,5 +1,5 @@ -INPUT_GENERATION_TASK_WITH_EXAMPLES = f"""Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed. +INPUT_GENERATION_TASK_WITH_EXAMPLES = """Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed. Additionally I'll be providing you some data I generated before hand, make sure the data you generate if consistent with task I provided but different from the data I provided in every way possible.""" -INPUT_GENERATION_TASK_WITH_FEEDBACK = f"""""" \ No newline at end of file +INPUT_GENERATION_TASK_WITH_FEEDBACK = """""" \ No newline at end of file diff --git a/dspy/experimental/synthesizer/signatures.py b/dspy/experimental/synthesizer/signatures.py index 1ca0cb759a..0597dfdd59 100644 --- a/dspy/experimental/synthesizer/signatures.py +++ b/dspy/experimental/synthesizer/signatures.py @@ -2,6 +2,7 @@ from .utils import format_examples + class UnderstandTask(dspy.Signature): """I'll be providing you a task description. Your task is to prepare a concise, comprehensible summary that captures the broad essence and purpose of this task description. Your summary should illuminate the general objective and the type of problem being solved, offering a clear picture of what the task entails at a high level. Avoid getting into the nuances or specifics of individual datapoints, models, examples, algorithms, or any intricate technicalities. Your explanation should serve to clarify the task's overall goal and its basic premise without touching on methodologies or solutions.""" diff --git a/dspy/experimental/synthesizer/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py index 4d62b3e773..d85f2cdb43 100644 --- a/dspy/experimental/synthesizer/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -1,10 +1,14 @@ -import dspy import random +from collections.abc import Mapping +from typing import List, Optional, Union from datasets import Dataset from tqdm import tqdm, trange -from typing import List, Union, Optional, Mapping +import dspy + +from .config import SynthesizerArguments +from .instructions import INPUT_GENERATION_TASK_WITH_EXAMPLES from .signatures import ( ExplainTask, GenerateFieldDescription, @@ -12,8 +16,6 @@ GenerateOutputFieldsData, UnderstandTask, ) -from .config import SynthesizerArguments -from .instructions import INPUT_GENERATION_TASK_WITH_EXAMPLES from .utils import format_examples __all__ = ["Synthesizer"] @@ -73,7 +75,7 @@ def _prepare_synthetic_data_predictors( -1, "ground_source", dspy.InputField( - prefix=f"Pre-Generated Examples:", + prefix="Pre-Generated Examples:", desc="Pre-Generated Examples to differ the inputs around.", format=format_examples, ), @@ -159,13 +161,13 @@ def generate( task_description=task_description, knowledge_seed=iter_seed, ground_source=example_for_optimization, - config=dict(temperature=iter_temperature, n=batch_size) + config=dict(temperature=iter_temperature, n=batch_size), ) else: inputs = self.input_predictor( task_description=task_description, knowledge_seed=iter_seed, - config=dict(temperature=iter_temperature, n=batch_size) + config=dict(temperature=iter_temperature, n=batch_size), ) input_kwargs = [{ diff --git a/dspy/experimental/synthesizer/utils.py b/dspy/experimental/synthesizer/utils.py index b52ae90169..f08b142e1a 100644 --- a/dspy/experimental/synthesizer/utils.py +++ b/dspy/experimental/synthesizer/utils.py @@ -1,6 +1,8 @@ -import dspy from typing import List +import dspy + + def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): return examples From 2d845de186b5d47a34c3f843cfc250be679087c0 Mon Sep 17 00:00:00 2001 From: Thomas Dybdahl Ahle Date: Mon, 11 Mar 2024 11:53:43 -0700 Subject: [PATCH 205/243] Update 8-typed_predictors.md A few fixes to the typed docs --- .../building-blocks/8-typed_predictors.md | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/docs/docs/building-blocks/8-typed_predictors.md b/docs/docs/building-blocks/8-typed_predictors.md index e8c7167c19..9fe1785f8c 100644 --- a/docs/docs/building-blocks/8-typed_predictors.md +++ b/docs/docs/building-blocks/8-typed_predictors.md @@ -16,12 +16,12 @@ Let's take a simple task as an example i.e. given the `context` and `query`, the from pydantic import BaseModel, Field class Input(BaseModel): - context: str = Field(..., description="The context for the question") - query: str = Field(..., description="The question to be answered") + context: str = Field(description="The context for the question") + query: str = Field(description="The question to be answered") class Output(BaseModel): - answer: str = Field(..., description="The answer for the question") - factual_: float = Field(..., description="The confidence score for the answer") + answer: str = Field(description="The answer for the question") + confidence: float = Field(ge=0, le=1, description="The confidence score for the answer") ``` As you can see, we can describe the attributes by defining a simple Signature that takes in the input and returns the output. @@ -46,6 +46,11 @@ predictor = dspy.TypedPredictor(QASignature) Similar to other modules, we pass the `QASignature` to `dspy.TypedPredictor` which enforces the typed constraints. +And similarly to `dspy.Predict`, we can also use a "string signature", which we type as: +```python +predictor = dspy.TypedPredictor("input:Input -> output:Output") +``` + ### I/O in Typed Predictors Now let's test out the Typed Predictor by providing some sample input to the predictor and verifying the output type. We can create an `Input` instance and pass it to the predictor to get a dictionary of the output. @@ -62,8 +67,8 @@ prediction = predictor(input=doc_query_pair) Let's see the output and its type. ```python -answer = prediction['answer'] -confidence_score = prediction['confidence_score'] +answer = prediction.answer +confidence_score = prediction.confidence print(f"Prediction: {prediction}\n\n") print(f"Answer: {answer}, Answer Type: {type(answer)}") @@ -89,18 +94,18 @@ prediction = cot_predictor(input=doc_query_pair) While the `dspy.TypedPredictor` and `dspy.TypedChainOfThought` provide a convenient way to use typed predictors, you can also use them as decorators to enforce type constraints on the inputs and outputs of the function. This relies on the internal definitions of the Signature class and its function arguments, outputs, and docstrings. -``` -# Function name is output key - +```python @dspy.predictor -def qa_function(doc_query_pair: Input) -> Output: - """Answer the question based on the context and query provided, and on the scale of 10 tell how confident you are about the answer.""" +def answer(doc_query_pair: Input) -> Output: + """Answer the question based on the context and query provided, and on the scale of 0-1 tell how confident you are about the answer.""" pass @dspy.cot -def qa_function(doc_query_pair: Input) -> Output: - """Answer the question based on the context and query provided, and on the scale of 10 tell how confident you are about the answer.""" +def answer(doc_query_pair: Input) -> Output: + """Answer the question based on the context and query provided, and on the scale of 0-1 tell how confident you are about the answer.""" pass + +prediction = answer(doc_query_pair=doc_query_pair) ``` ## Composing Functional Typed Predictors in `dspy.Module` From cad9b475b2e3ee9e4ffeae50b62e272a55cc51e5 Mon Sep 17 00:00:00 2001 From: demontego Date: Tue, 12 Mar 2024 13:02:01 +0300 Subject: [PATCH 206/243] fix dotdict in marqo --- dspy/retrieve/marqo_rm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dspy/retrieve/marqo_rm.py b/dspy/retrieve/marqo_rm.py index 29c52fdb46..a8b4966008 100644 --- a/dspy/retrieve/marqo_rm.py +++ b/dspy/retrieve/marqo_rm.py @@ -2,7 +2,7 @@ from typing import List, Union import dspy -from dspy import dotdict +from dsp.utils import dotdict try: import marqo From c26d5ba65e0220ab2e0804dd51dfe8664f17e960 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Wed, 13 Mar 2024 03:06:47 +0530 Subject: [PATCH 207/243] feedback driven generation --- dspy/experimental/synthesizer/config.py | 25 +++--- .../synthesizer/instruction_suffixes.py | 3 + dspy/experimental/synthesizer/instructions.py | 5 -- dspy/experimental/synthesizer/signatures.py | 16 ++++ dspy/experimental/synthesizer/synthesizer.py | 83 ++++++++++++++----- dspy/experimental/synthesizer/utils.py | 4 +- poetry.lock | 25 +++++- pyproject.toml | 1 + 8 files changed, 113 insertions(+), 49 deletions(-) create mode 100644 dspy/experimental/synthesizer/instruction_suffixes.py delete mode 100644 dspy/experimental/synthesizer/instructions.py diff --git a/dspy/experimental/synthesizer/config.py b/dspy/experimental/synthesizer/config.py index 609efbe126..640989c224 100644 --- a/dspy/experimental/synthesizer/config.py +++ b/dspy/experimental/synthesizer/config.py @@ -1,27 +1,22 @@ -from typing import Optional, Union - -from pydantic import BaseModel, field_validator - -import dspy - +from typing import Optional, Any +from pydantic import BaseModel, model_validator class SynthesizerArguments(BaseModel): - # [TODO] feedback_mode: Optional[str] = None num_example_for_feedback: Optional[int] = None - input_lm_model: Optional[dspy.LM] = None - output_lm_model: Optional[dspy.LM] = None - output_teacher_module: Optional[Union[dspy.Module, dspy.Predict]] = None + input_lm_model: Optional[Any] = None + output_lm_model: Optional[Any] = None + output_teacher_module: Optional[Any] = None num_example_for_optim: Optional[int] = None - @field_validator(fields=["feedback_mode", "num_example_for_feedback"]) - def validate_feedback_mode(cls, value): - if value and value not in ["human", "llm"]: + @model_validator(mode='after') + def validate_feedback_mode(self): + if self.feedback_mode and self.feedback_mode not in ["human", "llm"]: raise ValueError("Feedback mode should be either 'human' or 'llm'.") - if value and not cls.num_example_for_feedback: + if self.feedback_mode and not self.num_example_for_feedback: raise ValueError("Number of examples for feedback is required when feedback mode is provided.") - return value \ No newline at end of file + return self \ No newline at end of file diff --git a/dspy/experimental/synthesizer/instruction_suffixes.py b/dspy/experimental/synthesizer/instruction_suffixes.py new file mode 100644 index 0000000000..53404a2a66 --- /dev/null +++ b/dspy/experimental/synthesizer/instruction_suffixes.py @@ -0,0 +1,3 @@ +INPUT_GENERATION_TASK_WITH_EXAMPLES_SUFFIX = """\n\nI'll also be providing you some data I generated before hand, make sure the data you generate if consistent with task I provided but different from the data I provided in every way possible.""" + +INPUT_GENERATION_TASK_WITH_FEEDBACK_SUFFIX = "\n\nAdditionally, I'll be providing you with feedback on the data you generate, while generating the data make sure to take into account the feedback I provide and try to improve the data you generate based on the feedback I provide." \ No newline at end of file diff --git a/dspy/experimental/synthesizer/instructions.py b/dspy/experimental/synthesizer/instructions.py deleted file mode 100644 index 2fa65493ab..0000000000 --- a/dspy/experimental/synthesizer/instructions.py +++ /dev/null @@ -1,5 +0,0 @@ -INPUT_GENERATION_TASK_WITH_EXAMPLES = """Create synthetic data using the task description and the provided knowledge seed. Your task is to generate diverse and imaginative data that aligns with the given task description and knowledge seed. You are encouraged to be creative and not limit yourself, allowing for a wide range of synthetic data that reflects the characteristics and details provided in the task description. The data should be unique and varied, showcasing originality and creativity while maintaining relevance to the task and knowledge seed. - -Additionally I'll be providing you some data I generated before hand, make sure the data you generate if consistent with task I provided but different from the data I provided in every way possible.""" - -INPUT_GENERATION_TASK_WITH_FEEDBACK = """""" \ No newline at end of file diff --git a/dspy/experimental/synthesizer/signatures.py b/dspy/experimental/synthesizer/signatures.py index 0597dfdd59..444cbe12f6 100644 --- a/dspy/experimental/synthesizer/signatures.py +++ b/dspy/experimental/synthesizer/signatures.py @@ -28,6 +28,22 @@ class ExplainTask(dspy.Signature): desc="Explanation of the task.", ) +class UpdateTaskDescriptionBasedOnFeedback(dspy.Signature): + """Update the task description based on the feedback provided. Ensure that the revised task description incorporates the feedback to improve its overall clarity and effectiveness. Focus on enhancing the task's goal and basic premise, without delving into specific data points, models, examples, algorithms, or technical intricacies. Your explanation should aim to clarify the task's fundamental objective and purpose.""" + + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task.", + ) + feedback = dspy.InputField( + prefix="Feedback:", + desc="Feedback on the task description.", + ) + updated_task_description = dspy.OutputField( + prefix="Task Description:", + desc="Updated description of the task.", + ) + class GenerateFieldDescription(dspy.Signature): """Generate a concise and informative description for a given field based on the provided name and task description. This description should be no longer than 10 words and should be in simple english.""" diff --git a/dspy/experimental/synthesizer/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py index d85f2cdb43..5930cfbbeb 100644 --- a/dspy/experimental/synthesizer/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -1,24 +1,31 @@ +import dspy import random -from collections.abc import Mapping -from typing import List, Optional, Union from datasets import Dataset from tqdm import tqdm, trange - -import dspy +from rich import print as rprint +from collections.abc import Mapping +from typing import List, Optional, Union from .config import SynthesizerArguments -from .instructions import INPUT_GENERATION_TASK_WITH_EXAMPLES +from .instruction_suffixes import ( + INPUT_GENERATION_TASK_WITH_EXAMPLES_SUFFIX, + INPUT_GENERATION_TASK_WITH_FEEDBACK_SUFFIX, +) from .signatures import ( ExplainTask, GenerateFieldDescription, GenerateInputFieldsData, GenerateOutputFieldsData, UnderstandTask, + UpdateTaskDescriptionBasedOnFeedback, ) from .utils import format_examples -__all__ = ["Synthesizer"] +__all__ = [ + "Synthesizer", + "SynthesizerArguments", +] class Synthesizer: def __init__(self, config: SynthesizerArguments): @@ -29,10 +36,33 @@ def __init__(self, config: SynthesizerArguments): self.explain_task = dspy.Predict(ExplainTask) self.understand_task = dspy.Predict(UnderstandTask) self.generate_field_description = dspy.Predict(GenerateFieldDescription) + self.update_task_description = dspy.Predict(UpdateTaskDescriptionBasedOnFeedback) self.generate_input_data = GenerateInputFieldsData self.generate_output_data = GenerateOutputFieldsData + def _gather_feedback(self, examples: dspy.Example) -> str: + if self.config.feedback_mode == "human": + input_keys = examples.inputs().keys() + + print("-"*75) + print_text = "[bold blue]Generated Data:[bold blue]\n[bold red]Inputs:[bold red]\n" + + for key in input_keys: + print_text += f"\t[bold yellow]{key}[bold yellow]: [green]{examples[key]}[green]\n" + + rprint(print_text) + feedback = input("Provide feedback on the generated data: ") + print("-"*75) + + return feedback + + elif self.config.feedback_mode == "llm": + raise NotImplementedError("Feedback mode 'llm' is not implemented yet.") + + else: + raise ValueError("Feedback mode should be either 'human' or 'llm'.") + def _get_field_data(self, key: str, keys_dict: Mapping[str, str]): if key.startswith("$"): field_details = self.generate_field_description( @@ -137,7 +167,11 @@ def generate( task_description, input_keys, output_keys = self._get_dataset_metadata(ground_source) if self.config.num_example_for_optim: - self.generate_input_data.__doc__ = INPUT_GENERATION_TASK_WITH_EXAMPLES + self.generate_input_data.__doc__ += INPUT_GENERATION_TASK_WITH_EXAMPLES_SUFFIX + + if self.config.feedback_mode: + self.generate_input_data.__doc__ += INPUT_GENERATION_TASK_WITH_FEEDBACK_SUFFIX + self.generate_output_data.__doc__ = task_description self.input_predictor, self.output_predictor = self._prepare_synthetic_data_predictors( @@ -147,28 +181,23 @@ def generate( ) data = [] + feedback = "" for idx in trange(0, num_data, batch_size, desc="Generating Synthetic Data"): iter_temperature = 0.7+0.01*idx iter_seed = random.randint(0, 1000000) - inputs = None + kwargs = { + "task_description": task_description, + "knowledge_seed": iter_seed, + "config": dict(temperature=iter_temperature, n=batch_size), + } + if self.config.num_example_for_optim: + kwargs["ground_source"] = random.sample(ground_source, self.config.num_example_for_optim) + with dspy.context(lm=self.input_lm): - if self.config.num_example_for_optim: - example_for_optimization = random.sample(ground_source, self.config.num_example_for_optim) - inputs = self.input_predictor( - task_description=task_description, - knowledge_seed=iter_seed, - ground_source=example_for_optimization, - config=dict(temperature=iter_temperature, n=batch_size), - ) - else: - inputs = self.input_predictor( - task_description=task_description, - knowledge_seed=iter_seed, - config=dict(temperature=iter_temperature, n=batch_size), - ) + inputs = self.input_predictor(**kwargs) input_kwargs = [{ key: getattr(completions, key) @@ -191,6 +220,16 @@ def generate( } data.append(dspy.Example(**kwargs, **output_kwargs).with_inputs(*input_keys)) + + if self.config.feedback_mode and idx < self.config.num_example_for_feedback: + feedback = self._gather_feedback(data[-1]) + + task_description = self.update_task_description( + task_description=task_description, + feedback=feedback, + ).updated_task_description + + self.output_predictor.signature.__doc__ = task_description return data diff --git a/dspy/experimental/synthesizer/utils.py b/dspy/experimental/synthesizer/utils.py index f08b142e1a..b52ae90169 100644 --- a/dspy/experimental/synthesizer/utils.py +++ b/dspy/experimental/synthesizer/utils.py @@ -1,7 +1,5 @@ -from typing import List - import dspy - +from typing import List def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): diff --git a/poetry.lock b/poetry.lock index 30ec769817..ebf68afe9b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2021,7 +2021,7 @@ testing = ["coverage", "pyyaml"] name = "markdown-it-py" version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, @@ -2163,7 +2163,7 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, @@ -3703,7 +3703,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -4036,6 +4035,24 @@ requests = ">=2.0.0" [package.extras] rsa = ["oauthlib[signedtoken] (>=3.0.0)"] +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "rpds-py" version = "0.18.0" @@ -5568,4 +5585,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "8b4cc583653becb3be9f5bc4c34cdf5ced146ba32157a5bb4bdc7885291c0403" +content-hash = "ccfd76e32c6c94d32fd3f81815b5d499d4ba9b6f4707bc547972a2c1e0846745" diff --git a/pyproject.toml b/pyproject.toml index e2c719eaf2..749b15f6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,6 +101,7 @@ sphinx_rtd_theme = { version = "*", optional = true } autodoc_pydantic = { version = "*", optional = true } sphinx-reredirects = { version = "^0.1.2", optional = true } sphinx-automodapi = { version = "0.16.0", optional = true } +rich = "^13.7.1" [tool.poetry.group.test.dependencies] From e53fcbe8953e5d221f3b53d20049938c4e4d88bb Mon Sep 17 00:00:00 2001 From: Max Friedrich Date: Wed, 13 Mar 2024 17:47:24 +0100 Subject: [PATCH 208/243] fix(dspy): use DataFrame.applymap in older pandas --- dspy/evaluate/evaluate.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dspy/evaluate/evaluate.py b/dspy/evaluate/evaluate.py index 486e4b68e9..603b552aa0 100644 --- a/dspy/evaluate/evaluate.py +++ b/dspy/evaluate/evaluate.py @@ -184,7 +184,10 @@ def wrapped_program(example_idx, example): df = pd.DataFrame(data) # Truncate every cell in the DataFrame - df = df.map(truncate_cell) + if hasattr(df, "map"): # DataFrame.applymap was renamed to DataFrame.map in Pandas 2.1.0 + df = df.map(truncate_cell) + else: + df = df.applymap(truncate_cell) # Rename the 'correct' column to the name of the metric object assert callable(metric) From 04bedffccf8eec16bc5af8d3be36fc8e79f4426b Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 14 Mar 2024 02:40:02 +0530 Subject: [PATCH 209/243] Automated feedback incorporations --- dspy/experimental/synthesizer/signatures.py | 17 +++++++++++++++++ dspy/experimental/synthesizer/synthesizer.py | 11 ++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/dspy/experimental/synthesizer/signatures.py b/dspy/experimental/synthesizer/signatures.py index 444cbe12f6..e1a50c6894 100644 --- a/dspy/experimental/synthesizer/signatures.py +++ b/dspy/experimental/synthesizer/signatures.py @@ -44,6 +44,23 @@ class UpdateTaskDescriptionBasedOnFeedback(dspy.Signature): desc="Updated description of the task.", ) +class GetFeedbackOnGeneration(dspy.Signature): + """Provide constructive feedback on the synthetic data generated, focusing on its quality, relevance, and diversity. Highlight any areas that require improvement and offer suggestions for enhancement. The feedback should center on the overall effectiveness of the synthetic data in aligning with the task description and knowledge seed. Avoid delving into specific data points, models, examples, algorithms, or technical intricacies. Your feedback should be critical but constructive, aiming to improve the synthetic data and the task description.""" + + synthetic_data = dspy.InputField( + prefix="Synthetic Data:", + desc="Synthetic data generated.", + format=format_examples, + ) + task_description = dspy.InputField( + prefix="Task Description:", + desc="Description of the task the synthetic data is aligned with.", + ) + feedback = dspy.OutputField( + prefix="Feedback:", + desc="Feedback on the synthetic data.", + ) + class GenerateFieldDescription(dspy.Signature): """Generate a concise and informative description for a given field based on the provided name and task description. This description should be no longer than 10 words and should be in simple english.""" diff --git a/dspy/experimental/synthesizer/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py index 5930cfbbeb..a3ffdcda8e 100644 --- a/dspy/experimental/synthesizer/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -17,6 +17,7 @@ GenerateFieldDescription, GenerateInputFieldsData, GenerateOutputFieldsData, + GetFeedbackOnGeneration, UnderstandTask, UpdateTaskDescriptionBasedOnFeedback, ) @@ -35,6 +36,7 @@ def __init__(self, config: SynthesizerArguments): self.explain_task = dspy.Predict(ExplainTask) self.understand_task = dspy.Predict(UnderstandTask) + self.get_feedback_on_generation = dspy.Predict(GetFeedbackOnGeneration) self.generate_field_description = dspy.Predict(GenerateFieldDescription) self.update_task_description = dspy.Predict(UpdateTaskDescriptionBasedOnFeedback) @@ -58,7 +60,14 @@ def _gather_feedback(self, examples: dspy.Example) -> str: return feedback elif self.config.feedback_mode == "llm": - raise NotImplementedError("Feedback mode 'llm' is not implemented yet.") + feedback = self.get_feedback_on_generation( + synthetic_data=[examples], + task_description=self.generate_output_data.__doc__, + ) + + print(feedback.feedback) + + return feedback.feedback else: raise ValueError("Feedback mode should be either 'human' or 'llm'.") From cd78c4d57ca52a5b7c55d7ce321af068d5a8e6b8 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 14 Mar 2024 02:49:59 +0530 Subject: [PATCH 210/243] lint fixes --- dspy/experimental/synthesizer/config.py | 4 +++- dspy/experimental/synthesizer/synthesizer.py | 9 +++++---- dspy/experimental/synthesizer/utils.py | 4 +++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/dspy/experimental/synthesizer/config.py b/dspy/experimental/synthesizer/config.py index 640989c224..557299cfa7 100644 --- a/dspy/experimental/synthesizer/config.py +++ b/dspy/experimental/synthesizer/config.py @@ -1,6 +1,8 @@ -from typing import Optional, Any +from typing import Any, Optional + from pydantic import BaseModel, model_validator + class SynthesizerArguments(BaseModel): feedback_mode: Optional[str] = None num_example_for_feedback: Optional[int] = None diff --git a/dspy/experimental/synthesizer/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py index a3ffdcda8e..ad71f9eee6 100644 --- a/dspy/experimental/synthesizer/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -1,11 +1,12 @@ -import dspy import random +from collections.abc import Mapping +from typing import List, Optional, Union from datasets import Dataset -from tqdm import tqdm, trange from rich import print as rprint -from collections.abc import Mapping -from typing import List, Optional, Union +from tqdm import tqdm, trange + +import dspy from .config import SynthesizerArguments from .instruction_suffixes import ( diff --git a/dspy/experimental/synthesizer/utils.py b/dspy/experimental/synthesizer/utils.py index b52ae90169..f08b142e1a 100644 --- a/dspy/experimental/synthesizer/utils.py +++ b/dspy/experimental/synthesizer/utils.py @@ -1,6 +1,8 @@ -import dspy from typing import List +import dspy + + def format_examples(examples: List[dspy.Example]) -> str: if isinstance(examples, str): return examples From ce8eade360d90f0d935be9d49af5375399ef219f Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 14 Mar 2024 02:50:49 +0530 Subject: [PATCH 211/243] print removal --- dspy/experimental/synthesizer/synthesizer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/dspy/experimental/synthesizer/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py index ad71f9eee6..0672a28081 100644 --- a/dspy/experimental/synthesizer/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -66,8 +66,6 @@ def _gather_feedback(self, examples: dspy.Example) -> str: task_description=self.generate_output_data.__doc__, ) - print(feedback.feedback) - return feedback.feedback else: From 0dce4f7daf22457ddff0ca3ec3f5191e78b7265c Mon Sep 17 00:00:00 2001 From: nbqu Date: Thu, 14 Mar 2024 20:41:08 +0900 Subject: [PATCH 212/243] fix(dspy): remove debug print statement --- dsp/modules/anthropic.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 68f30ce7a1..2c33dc727c 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -49,7 +49,7 @@ def __init__( from anthropic import Anthropic, RateLimitError except ImportError as err: raise ImportError("Claude requires `pip install anthropic`.") from err - + self.provider = "anthropic" self.api_key = api_key = os.environ.get("ANTHROPIC_API_KEY") if api_key is None else api_key self.api_base = BASE_URL if api_base is None else api_base @@ -80,7 +80,6 @@ def basic_request(self, prompt: str, **kwargs): # caching mechanism requires hashable kwargs kwargs["messages"] = [{"role": "user", "content": prompt}] kwargs.pop("n") - print(kwargs) response = self.client.messages.create(**kwargs) history = { @@ -120,7 +119,7 @@ def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): assert only_completed, "for now" assert return_sorted is False, "for now" - + # per eg here: https://docs.anthropic.com/claude/reference/messages-examples # max tokens can be used as a proxy to return smaller responses # so this cannot be a proper indicator for incomplete response unless it isnt the user-intent. @@ -137,4 +136,4 @@ def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): if only_completed and response.stop_reason == "max_tokens": continue completions = [c.text for c in response.content] - return completions \ No newline at end of file + return completions From 73a9890e99845e53f6889bdb102118ce9af489ae Mon Sep 17 00:00:00 2001 From: nbqu Date: Thu, 14 Mar 2024 20:59:59 +0900 Subject: [PATCH 213/243] lint(dspy): refactor --- dsp/modules/anthropic.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 2c33dc727c..e78e9d2d2f 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -19,7 +19,7 @@ def backoff_hdlr(details): - """Handler from https://pypi.org/project/backoff/""" + """Handler from https://pypi.org/project/backoff/.""" print( "Backing off {wait:0.1f} seconds after {tries} tries " "calling function {target} with kwargs " @@ -28,7 +28,7 @@ def backoff_hdlr(details): def giveup_hdlr(details): - """wrapper function that decides when to give up on retry""" + """Wrapper function that decides when to give up on retry.""" if "rate limits" in details.message: return False return True @@ -46,7 +46,7 @@ def __init__( super().__init__(model) try: - from anthropic import Anthropic, RateLimitError + from anthropic import Anthropic except ImportError as err: raise ImportError("Claude requires `pip install anthropic`.") from err @@ -55,10 +55,10 @@ def __init__( self.api_base = BASE_URL if api_base is None else api_base self.kwargs = { - "temperature": 0.0 if "temperature" not in kwargs else kwargs["temperature"], + "temperature": kwargs.get("temperature", 0.0), "max_tokens": min(kwargs.get("max_tokens", 4096), 4096), - "top_p": 1.0 if "top_p" not in kwargs else kwargs["top_p"], - "top_k": 1 if "top_k" not in kwargs else kwargs["top_k"], + "top_p": kwargs.get("top_p", 1.0), + "top_k": kwargs.get("top_k", 1), "n": kwargs.pop("n", kwargs.pop("num_generations", 1)), **kwargs, } @@ -101,7 +101,7 @@ def basic_request(self, prompt: str, **kwargs): giveup=giveup_hdlr, ) def request(self, prompt: str, **kwargs): - """Handles retrieval of completions from Anthropic whilst handling API errors""" + """Handles retrieval of completions from Anthropic whilst handling API errors.""" return self.basic_request(prompt, **kwargs) def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): @@ -123,12 +123,10 @@ def __call__(self, prompt, only_completed=True, return_sorted=False, **kwargs): # per eg here: https://docs.anthropic.com/claude/reference/messages-examples # max tokens can be used as a proxy to return smaller responses # so this cannot be a proper indicator for incomplete response unless it isnt the user-intent. - # if only_completed and response.stop_reason != "end_turn": - # choices = [] n = kwargs.pop("n", 1) completions = [] - for i in range(n): + for _ in range(n): response = self.request(prompt, **kwargs) # TODO: Log llm usage instead of hardcoded openai usage # if dsp.settings.log_openai_usage: From 9242b01e6ae9516017a2265b57456e9e9f2bc089 Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 14 Mar 2024 18:27:56 +0530 Subject: [PATCH 214/243] Remove kwargs print call in Claude LM --- dsp/modules/anthropic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index 68f30ce7a1..d638ebcd3c 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -80,7 +80,7 @@ def basic_request(self, prompt: str, **kwargs): # caching mechanism requires hashable kwargs kwargs["messages"] = [{"role": "user", "content": prompt}] kwargs.pop("n") - print(kwargs) + response = self.client.messages.create(**kwargs) history = { From 11847e09c37718f14dadc10a8948ac08779c9aef Mon Sep 17 00:00:00 2001 From: Herumb Shandilya Date: Thu, 14 Mar 2024 18:59:25 +0530 Subject: [PATCH 215/243] Revert "Remove kwargs print call in Claude LM" This reverts commit 9242b01e6ae9516017a2265b57456e9e9f2bc089. --- dsp/modules/anthropic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dsp/modules/anthropic.py b/dsp/modules/anthropic.py index d638ebcd3c..68f30ce7a1 100644 --- a/dsp/modules/anthropic.py +++ b/dsp/modules/anthropic.py @@ -80,7 +80,7 @@ def basic_request(self, prompt: str, **kwargs): # caching mechanism requires hashable kwargs kwargs["messages"] = [{"role": "user", "content": prompt}] kwargs.pop("n") - + print(kwargs) response = self.client.messages.create(**kwargs) history = { From 69cfd9f1aff78ccf76e483825f381b2784c76b4b Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 14 Mar 2024 11:28:06 -0700 Subject: [PATCH 216/243] Fixed but with prefixes and annotated types --- dspy/functional/functional.py | 195 ++++++---------------------- tests/functional/test_functional.py | 66 ++++++++-- 2 files changed, 92 insertions(+), 169 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index d24dd4d9d7..048cd9c1a2 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -1,10 +1,8 @@ import inspect import json -import os import typing from typing import Annotated, List, Tuple # noqa: UP035 -import openai import pydantic import ujson @@ -13,9 +11,6 @@ from dspy.primitives.prediction import Prediction from dspy.signatures.signature import ensure_signature, make_signature -# Some improvement ideas: -# - Increase the temperature on error - def predictor(func) -> dspy.Module: """Decorator that creates a predictor module based on the provided function.""" @@ -73,27 +68,38 @@ def TypedChainOfThought(signature, max_retries=3) -> dspy.Module: # noqa: N802 class TypedPredictor(dspy.Module): - def __init__(self, signature, max_retries=3): + def __init__(self, signature, max_retries=3, wrap_json=False): + """Like dspy.Predict, but enforces type annotations in the signature. + + Args: + signature: The signature of the module. Can use type annotations. + max_retries: The number of times to retry the prediction if the output is invalid. + wrap_json: If True, json objects in the input will be wrapped in ```json ... ``` + """ super().__init__() self.signature = ensure_signature(signature) self.predictor = dspy.Predict(signature) self.max_retries = max_retries + self.wrap_json = wrap_json def copy(self) -> "TypedPredictor": - return TypedPredictor(self.signature, self.max_retries) + return TypedPredictor(self.signature, self.max_retries, self.wrap_json) def __repr__(self): + """Return a string representation of the TypedPredictor object.""" return f"TypedPredictor({self.signature})" - @staticmethod - def _make_example(type_) -> str: + def _make_example(self, type_) -> str: # Note: DSPy will cache this call so we only pay the first time TypedPredictor is called. + schema = json.dumps(type_.model_json_schema()) + if self.wrap_json: + schema = "```json\n" + schema + "\n```\n" json_object = dspy.Predict( make_signature( "json_schema -> json_object", "Make a very succinct json object that validates with the following schema", ), - )(json_schema=json.dumps(type_.model_json_schema())).json_object + )(json_schema=schema).json_object # We use the model_validate_json method to make sure the example is valid try: type_.model_validate_json(_unwrap_json(json_object)) @@ -147,6 +153,9 @@ def _prepare_signature(self) -> dspy.Signature: to_json = lambda x: x.model_dump_json() from_json = lambda x, type_=type_: type_.model_validate_json(x) schema = json.dumps(type_.model_json_schema()) + if self.wrap_json: + to_json = lambda x, inner=to_json: "```json\n" + inner(x) + "\n```\n" + schema = "```json\n" + schema + "\n```" signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") @@ -156,6 +165,7 @@ def _prepare_signature(self) -> dspy.Signature: type_=type_, ) else: # If input field + is_json = False format_ = lambda x: x if isinstance(x, str) else str(x) if type_ in (List[str], list[str], Tuple[str], tuple[str]): format_ = passages2text @@ -168,8 +178,12 @@ def _prepare_signature(self) -> dspy.Signature: ) else: format_ = lambda x: x if isinstance(x, str) else json.dumps(x) + is_json = True elif inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): format_ = lambda x: x if isinstance(x, str) else x.model_dump_json() + is_json = True + if self.wrap_json and is_json: + format_ = lambda x, inner=format_: x if isinstance(x, str) else "```json\n" + inner(x) + "\n```\n" signature = signature.with_updated_fields(name, format=format_) return signature @@ -215,7 +229,7 @@ def forward(self, **kwargs) -> dspy.Prediction: # Instantiate the actual signature with the parsed values. # This allow pydantic to validate the fields defined in the signature. try: - _dummy = self.signature(**kwargs, **parsed) + _ = self.signature(**kwargs, **parsed) parsed_results.append(parsed) except pydantic.ValidationError as e: errors["general"] = _format_error(e) @@ -223,10 +237,15 @@ def forward(self, **kwargs) -> dspy.Prediction: # Add new fields for each error for name, error in errors.items(): modified_kwargs[f"error_{name}_{try_i}"] = error + if name == "general": + error_prefix = "General:" + else: + error_prefix = signature.output_fields[name].json_schema_extra["prefix"] + number = "" if try_i == 0 else f" ({try_i+1})" signature = signature.append( f"error_{name}_{try_i}", dspy.InputField( - prefix="Past Error " + (f"({name}):" if try_i == 0 else f"({name}, {try_i+1}):"), + prefix=f"Past Error{number} in {error_prefix}", desc="An error to avoid in the future", ), ) @@ -248,14 +267,13 @@ def _format_error(error: Exception): fields = ", ".join(map(str, e["loc"])) errors.append(f"{e['msg']}: {fields} (error type: {e['type']})") return "; ".join(errors) - else: - return repr(error) + return repr(error) def _func_to_signature(func): """Make a dspy.Signature based on a function definition.""" sig = inspect.signature(func) - annotations = typing.get_type_hints(func) + annotations = typing.get_type_hints(func, include_extras=True) output_key = func.__name__ instructions = func.__doc__ fields = {} @@ -268,14 +286,18 @@ def _func_to_signature(func): annotation = annotations.get(param.name, str) kwargs = {} if typing.get_origin(annotation) is Annotated: - annotation, kwargs["desc"] = typing.get_args(annotation) + desc = next((arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None) + if desc is not None: + kwargs["desc"] = desc fields[param.name] = (annotation, dspy.InputField(**kwargs)) # Output field kwargs = {} annotation = annotations.get("return", str) if typing.get_origin(annotation) is Annotated: - annotation, kwargs["desc"] = typing.get_args(annotation) + desc = next((arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None) + if desc is not None: + kwargs["desc"] = desc fields[output_key] = (annotation, dspy.OutputField(**kwargs)) return dspy.Signature(fields, instructions) @@ -287,145 +309,8 @@ def _unwrap_json(output): if not output.startswith("```json"): raise ValueError("json output should start with ```json") if not output.endswith("```"): - raise ValueError("json output should end with ```") + raise ValueError("Don't write anything after the final json ```") output = output[7:-3].strip() if not output.startswith("{") or not output.endswith("}"): raise ValueError("json output should start and end with { and }") return ujson.dumps(ujson.loads(output)) # ujson is a bit more robust than the standard json - - -################################################################################ -# Example usage -################################################################################ - - -def main() -> None: - class Answer(pydantic.BaseModel): - value: float - certainty: float - comments: list[str] = pydantic.Field(description="At least two comments about the answer") - - class QA(dspy.Module): - @predictor - def hard_question(self, topic: str) -> str: - """Think of a hard factual question about a topic. It should be answerable with a number.""" - - @cot - def answer(self, question: Annotated[str, "Question to answer"]) -> Answer: - pass - - def forward(self, **kwargs): - question = self.hard_question(**kwargs) - return (question, self.answer(question=question)) - - openai.api_key = os.getenv("OPENAI_API_KEY") - lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000) - # lm = dspy.OpenAI(model="gpt-4", max_tokens=4000) - # lm = dspy.OpenAI(model="gpt-4-preview-1106", max_tokens=4000) - with dspy.context(lm=lm): - qa = QA() - question, answer = qa(topic="Physics") - # lm.inspect_history(n=5) - - print("Question:", question) # noqa: T201 - print("Answer:", answer) # noqa: T201 - - -################################################################################ -# HotpotQA example with SimpleBaleen -################################################################################ - - -def validate_context_and_answer_and_hops(example, pred, trace=None) -> bool: - if not dspy.evaluate.answer_exact_match(example, pred): - return False - if not dspy.evaluate.answer_passage_match(example, pred): - return False - - hops = [example.question] + [outputs.query for *_, outputs in trace if "query" in outputs] - - if max([len(h) for h in hops]) > 100: - return False - if any(dspy.evaluate.answer_exact_match_str(hops[idx], hops[:idx], frac=0.8) for idx in range(2, len(hops))): - return False - - return True - - -def gold_passages_retrieved(example, pred, _trace=None) -> bool: - gold_titles = set(map(dspy.evaluate.normalize_text, example["gold_titles"])) - found_titles = set(map(dspy.evaluate.normalize_text, [c.split(" | ")[0] for c in pred.context])) - - return gold_titles.issubset(found_titles) - - -def hotpot() -> None: - import dspy.evaluate - from dsp.utils import deduplicate - from dspy.datasets import HotPotQA - from dspy.evaluate.evaluate import Evaluate - from dspy.teleprompt.bootstrap import BootstrapFewShot - - print("Load the dataset.") # noqa: T201 - dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0) - trainset = [x.with_inputs("question") for x in dataset.train] - devset = [x.with_inputs("question") for x in dataset.dev] - print("Done") # noqa: T201 - - class SimplifiedBaleen(FunctionalModule): - def __init__(self, passages_per_hop=3, max_hops=1): - super().__init__() - self.retrieve = dspy.Retrieve(k=passages_per_hop) - self.max_hops = max_hops - - @cot - def generate_query(self, context: list[str], question) -> str: - """Write a simple search query that will help answer a complex question.""" - pass - - @cot - def generate_answer(self, context: list[str], question) -> str: - """Answer questions with short factoid answers.""" - pass - - def forward(self, question): - context = [] - - for _ in range(self.max_hops): - query = self.generate_query(context=context, question=question) - passages = self.retrieve(query).passages - context = deduplicate(context + passages) - - answer = self.generate_answer(context=context, question=question) - return dspy.Prediction(context=context, answer=answer) - - openai.api_key = os.getenv("OPENAI_API_KEY") - rm = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts") - lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000) - dspy.settings.configure(lm=lm, rm=rm, trace=[]) - - evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=10, display_progress=True, display_table=5) - - # uncompiled (i.e., zero-shot) program - uncompiled_baleen = SimplifiedBaleen() - print( # noqa: T201 - "Uncompiled Baleen retrieval score:", - evaluate_on_hotpotqa(uncompiled_baleen, metric=gold_passages_retrieved), - ) - - # compiled (i.e., few-shot) program - compiled_baleen = BootstrapFewShot(metric=validate_context_and_answer_and_hops).compile( - SimplifiedBaleen(), - teacher=SimplifiedBaleen(passages_per_hop=2), - trainset=trainset, - ) - print( # noqa: T201 - "Compiled Baleen retrieval score:", - evaluate_on_hotpotqa(compiled_baleen, metric=gold_passages_retrieved), - ) - # lm.inspect_history(n=5) - - -if __name__ == "__main__": - # main() - hotpot() diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 7f8f9237eb..f0e22e4be8 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -1,7 +1,7 @@ import datetime import textwrap import pydantic -from pydantic import Field, BaseModel, field_validator +from pydantic import AfterValidator, Field, BaseModel, field_validator from typing import Annotated, Generic, Literal, TypeVar from typing import List @@ -362,9 +362,9 @@ def flight_information(email: str) -> TravelInformation: Email: ${email} - Past Error (flight_information): An error to avoid in the future + Past Error in Flight Information: An error to avoid in the future - Past Error (flight_information, 2): An error to avoid in the future + Past Error (2) in Flight Information: An error to avoid in the future Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} @@ -372,9 +372,9 @@ def flight_information(email: str) -> TravelInformation: Email: Some email - Past Error (flight_information): String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) + Past Error in Flight Information: String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) - Past Error (flight_information, 2): String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) + Past Error (2) in Flight Information: String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) Flight Information: {"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}""" ) @@ -418,28 +418,25 @@ def get_user_details() -> UserDetails: Follow the following format. - Past Error (get_user_details): An error to avoid in the future - Past Error (get_user_details, 2): An error to avoid in the future + Past Error in Get User Details: An error to avoid in the future + Past Error (2) in Get User Details: An error to avoid in the future Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} --- - Past Error (get_user_details): Value error, Name must be in uppercase.: name (error type: value_error) - Past Error (get_user_details, 2): Value error, Name must be in uppercase.: name (error type: value_error) + Past Error in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + Past Error (2) in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) Get User Details: {"name": "lower case name", "age": 25}""" ) def test_annotated_field(): - # Since we don't currently validate fields on the main signature, - # the annotated fields are also not validated. - # But at least it should not crash. - @predictor def test(input: Annotated[str, Field(description="description")]) -> Annotated[float, Field(gt=0, lt=1)]: pass - lm = DummyLM(["0.5"]) + # First try 0, which fails, then try 0.5, which passes + lm = DummyLM(["0", "0.5"]) dspy.settings.configure(lm=lm) output = test(input="input") @@ -654,3 +651,44 @@ def space_in_a(cls, a: str) -> str: _ = ValidatedSignature(a="no-space") _ = ValidatedSignature(a="with space") + + +def test_annotated_validator(): + def is_square(n: int) -> int: + root = n**0.5 + if not root.is_integer(): + raise ValueError(f"{n} is not a square") + return n + + class MySignature(dspy.Signature): + """What is the next square number after n?""" + + n: int = dspy.InputField() + next_square: Annotated[int, AfterValidator(is_square)] = dspy.OutputField() + + lm = DummyLM(["3", "4"]) + dspy.settings.configure(lm=lm) + + m = TypedPredictor(MySignature)(n=2).next_square + lm.inspect_history(n=2) + + assert m == 4 + + +def test_annotated_validator_functional(): + def is_square(n: int) -> int: + if not (n**0.5).is_integer(): + raise ValueError(f"{n} is not a square") + return n + + @predictor + def next_square(n: int) -> Annotated[int, AfterValidator(is_square)]: + """What is the next square number after n?""" + + lm = DummyLM(["3", "4"]) + dspy.settings.configure(lm=lm) + + m = next_square(n=2) + lm.inspect_history(n=2) + + assert m == 4 From 259d6b03aa6e6d8dafcc7499339acaf665b0a80e Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 14 Mar 2024 13:25:45 -0700 Subject: [PATCH 217/243] Automatically copy over "description" from pydantic field to dspy field --- dspy/signatures/field.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dspy/signatures/field.py b/dspy/signatures/field.py index 4e32714778..4e8acd6871 100644 --- a/dspy/signatures/field.py +++ b/dspy/signatures/field.py @@ -19,6 +19,9 @@ def move_kwargs(**kwargs): json_schema_extra[k] = v else: pydantic_kwargs[k] = v + # Also copy over the pydantic "description" if no dspy "desc" is given. + if "description" in kwargs and "desc" not in json_schema_extra: + json_schema_extra["desc"] = kwargs["description"] pydantic_kwargs["json_schema_extra"] = json_schema_extra return pydantic_kwargs From 982db444322cbf6a545a2504ce162b15dafee975 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 14 Mar 2024 15:35:25 -0700 Subject: [PATCH 218/243] Fixed issue with boolean outputs --- dspy/functional/functional.py | 17 +++++- tests/functional/test_functional.py | 89 +++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 1 deletion(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 048cd9c1a2..dab08eb43f 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -119,7 +119,22 @@ def _prepare_signature(self) -> dspy.Signature: is_output = field.json_schema_extra["__dspy_field_type"] == "output" type_ = field.annotation if is_output: - if type_ in (str, int, float, bool): + if type_ is bool: + + def parse(x): + x = x.strip().lower() + if x not in ("true", "false"): + raise ValueError("Respond with true or false") + return x == "true" + + signature = signature.with_updated_fields( + name, + desc=field.json_schema_extra.get("desc", "") + + (" (Respond with true or false)" if type_ != str else ""), + format=lambda x: x if isinstance(x, str) else str(x), + parser=parse, + ) + elif type_ in (str, int, float, bool): signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index f0e22e4be8..2bedfa3560 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -653,6 +653,28 @@ def space_in_a(cls, a: str) -> str: _ = ValidatedSignature(a="with space") +def test_lm_as_validator(): + @predictor + def is_square(n: int) -> bool: + """Is n a square number?""" + + def check_square(n): + assert is_square(n=n) + return n + + @predictor + def next_square(n: int) -> Annotated[int, AfterValidator(check_square)]: + """What is the next square number after n?""" + + lm = DummyLM(["3", "False", "4", "True"]) + dspy.settings.configure(lm=lm) + + m = next_square(n=2) + lm.inspect_history(n=2) + + assert m == 4 + + def test_annotated_validator(): def is_square(n: int) -> int: root = n**0.5 @@ -692,3 +714,70 @@ def next_square(n: int) -> Annotated[int, AfterValidator(is_square)]: lm.inspect_history(n=2) assert m == 4 + + +def test_demos(): + demos = [ + dspy.Example(input="What is the speed of light?", output="3e8"), + ] + program = LabeledFewShot(k=len(demos)).compile( + student=dspy.TypedPredictor("input -> output"), + trainset=[ex.with_inputs("input") for ex in demos], + ) + + lm = DummyLM(["Paris"]) + dspy.settings.configure(lm=lm) + + assert program(input="What is the capital of France?").output == "Paris" + + assert lm.get_convo(-1) == textwrap.dedent("""\ + Given the fields `input`, produce the fields `output`. + + --- + + Follow the following format. + + Input: ${input} + Output: ${output} + + --- + + Input: What is the speed of light? + Output: 3e8 + + --- + + Input: What is the capital of France? + Output: Paris""") + + +def _test_demos_missing_input(): + demos = [dspy.Example(input="What is the speed of light?", output="3e8")] + program = LabeledFewShot(k=len(demos)).compile( + student=dspy.TypedPredictor("input -> output, thoughts"), + trainset=[ex.with_inputs("input") for ex in demos], + ) + dspy.settings.configure(lm=DummyLM(["My thoughts", "Paris"])) + assert program(input="What is the capital of France?").output == "Paris" + + assert dspy.settings.lm.get_convo(-1) == textwrap.dedent("""\ + Given the fields `input`, produce the fields `output`. + + --- + + Follow the following format. + + Input: ${input} + Thoughts: ${thoughts} + Output: ${output} + + --- + + Input: What is the speed of light? + Output: 3e8 + + --- + + Input: What is the capital of France? + Thoughts: My thoughts + Output: Paris""") From 96ee792ed3b5c5a8b5d5e4605863b807f5758eb2 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 14 Mar 2024 21:12:20 -0700 Subject: [PATCH 219/243] Formatting --- dspy/teleprompt/bootstrap.py | 69 ++-- dspy/teleprompt/copro_optimizer.py | 212 ++++++++----- dspy/teleprompt/ensemble.py | 8 +- dspy/teleprompt/finetune.py | 99 +++--- dspy/teleprompt/knn_fewshot.py | 8 +- dspy/teleprompt/mipro_optimizer.py | 371 ++++++++++++++-------- dspy/teleprompt/random_search.py | 67 ++-- dspy/teleprompt/signature_opt.py | 19 +- dspy/teleprompt/signature_opt_bayesian.py | 60 +++- dspy/teleprompt/teleprompt.py | 3 - dspy/teleprompt/teleprompt_optuna.py | 48 ++- dspy/teleprompt/vanilla.py | 5 +- 12 files changed, 637 insertions(+), 332 deletions(-) diff --git a/dspy/teleprompt/bootstrap.py b/dspy/teleprompt/bootstrap.py index 6fc9dd2ef7..86a1c0f362 100644 --- a/dspy/teleprompt/bootstrap.py +++ b/dspy/teleprompt/bootstrap.py @@ -31,7 +31,16 @@ class BootstrapFewShot(Teleprompter): - def __init__(self, metric=None, metric_threshold=None, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, max_errors=5): + def __init__( + self, + metric=None, + metric_threshold=None, + teacher_settings={}, + max_bootstrapped_demos=4, + max_labeled_demos=16, + max_rounds=1, + max_errors=5, + ): self.metric = metric self.metric_threshold = metric_threshold self.teacher_settings = teacher_settings @@ -39,7 +48,7 @@ def __init__(self, metric=None, metric_threshold=None, teacher_settings={}, max_ self.max_bootstrapped_demos = max_bootstrapped_demos self.max_labeled_demos = max_labeled_demos self.max_rounds = max_rounds - self.max_errors= max_errors + self.max_errors = max_errors self.error_count = 0 self.error_lock = threading.Lock() @@ -59,14 +68,14 @@ def compile(self, student, *, teacher=None, trainset, valset=None): self.student._suggest_failures = 0 return self.student - + def _prepare_student_and_teacher(self, student, teacher): self.student = student.reset_copy() self.teacher = teacher.deepcopy() if teacher is not None else student.reset_copy() - assert getattr(self.student, '_compiled', False) is False, "Student must be uncompiled." + assert getattr(self.student, "_compiled", False) is False, "Student must be uncompiled." - if self.max_labeled_demos and getattr(self.teacher, '_compiled', False) is False: + if self.max_labeled_demos and getattr(self.teacher, "_compiled", False) is False: teleprompter = LabeledFewShot(k=self.max_labeled_demos) self.teacher = teleprompter.compile(self.teacher.reset_copy(), trainset=self.trainset) @@ -74,14 +83,18 @@ def _prepare_predictor_mappings(self): name2predictor, predictor2name = {}, {} student, teacher = self.student, self.teacher - assert len(student.predictors()) == len(teacher.predictors()), "Student and teacher must have the same number of predictors." + assert len(student.predictors()) == len( + teacher.predictors() + ), "Student and teacher must have the same number of predictors." for (name1, predictor1), (name2, predictor2) in zip(student.named_predictors(), teacher.named_predictors()): assert name1 == name2, "Student and teacher must have the same program structure." - assert predictor1.signature.equals(predictor2.signature), f"Student and teacher must have the same signatures. {type(predictor1.signature)} != {type(predictor2.signature)}" + assert predictor1.signature.equals( + predictor2.signature + ), f"Student and teacher must have the same signatures. {type(predictor1.signature)} != {type(predictor2.signature)}" assert id(predictor1) != id(predictor2), "Student and teacher must be different objects." - name2predictor[name1] = None # dict(student=predictor1, teacher=predictor2) + name2predictor[name1] = None # dict(student=predictor1, teacher=predictor2) predictor2name[id(predictor1)] = name1 # FIXME(shangyint): This is an ugly hack to bind traces of @@ -89,7 +102,7 @@ def _prepare_predictor_mappings(self): # if isinstance(predictor1, Retry): # predictor2name[id(predictor1.module)] = name1 - predictor2name[id(predictor2)] = name2 + predictor2name[id(predictor2)] = name2 self.name2predictor = name2predictor self.predictor2name = predictor2name @@ -111,8 +124,8 @@ def _bootstrap(self, *, max_bootstraps=None): if success: bootstrapped[example_idx] = True - print(f'Bootstrapped {len(bootstrapped)} full traces after {example_idx+1} examples in round {round_idx}.') - + print(f"Bootstrapped {len(bootstrapped)} full traces after {example_idx+1} examples in round {round_idx}.") + # Unbootstrapped training examples self.validation = [x for idx, x in enumerate(self.trainset) if idx not in bootstrapped] @@ -123,10 +136,10 @@ def _bootstrap(self, *, max_bootstraps=None): # NOTE: Can't yet use evaluate because we need to trace *per example* # evaluate = Evaluate(program=self.teacher, metric=self.metric, num_threads=12) # score = evaluate(self.metric, display_table=False, display_progress=True) - + def _bootstrap_one_example(self, example, round_idx=0): name2traces = self.name2traces - teacher = self.teacher #.deepcopy() + teacher = self.teacher # .deepcopy() predictor_cache = {} try: @@ -145,7 +158,7 @@ def _bootstrap_one_example(self, example, round_idx=0): for name, predictor in teacher.named_predictors(): predictor.demos = predictor_cache[name] - + if self.metric: metric_val = self.metric(example, prediction, trace) if self.metric_threshold: @@ -162,13 +175,13 @@ def _bootstrap_one_example(self, example, round_idx=0): current_error_count = self.error_count if current_error_count >= self.max_errors: raise e - print(f'Failed to run or to evaluate example {example} with {self.metric} due to {e}.') - + print(f"Failed to run or to evaluate example {example} with {self.metric} due to {e}.") + if success: for step in trace: predictor, inputs, outputs = step - if 'dspy_uuid' in example: + if "dspy_uuid" in example: demo = Example(augmented=True, dspy_uuid=example.dspy_uuid, **inputs, **outputs) else: # TODO: FIXME: This is a hack. RandomSearch will complain for now in this edge case. @@ -177,16 +190,20 @@ def _bootstrap_one_example(self, example, round_idx=0): try: predictor_name = self.predictor2name[id(predictor)] except KeyError as e: - continue # FIXME: ! + continue # FIXME: ! # TODO: Look closer into this. It's a bit tricky to reproduce. - print(f'Failed to find predictor {predictor} in {self.predictor2name}.') - print('Are you doing this in a notebook (Jupyter)? This might be caused by redefining values by rerunning cells.') - print('Try restarting the notebook, or open an issue.') - raise KeyError(f'Failed to find predictor {id(predictor)} {predictor} in {self.predictor2name}.') from e + print(f"Failed to find predictor {predictor} in {self.predictor2name}.") + print( + "Are you doing this in a notebook (Jupyter)? This might be caused by redefining values by rerunning cells." + ) + print("Try restarting the notebook, or open an issue.") + raise KeyError( + f"Failed to find predictor {id(predictor)} {predictor} in {self.predictor2name}." + ) from e name2traces[predictor_name].append(demo) - + return success def _train(self): @@ -194,13 +211,13 @@ def _train(self): raw_demos = self.validation for name, predictor in self.student.named_predictors(): - augmented_demos = self.name2traces[name][:self.max_bootstrapped_demos] - + augmented_demos = self.name2traces[name][: self.max_bootstrapped_demos] + sample_size = min(self.max_labeled_demos - len(augmented_demos), len(raw_demos)) sample_size = max(0, sample_size) raw_demos = rng.sample(raw_demos, sample_size) - + if dspy.settings.release >= 20230928: predictor.demos = raw_demos + augmented_demos else: diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index 9d304cf9ed..3523c73804 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -31,24 +31,41 @@ * total_calls: The total number of calls to the task metric. These statistics will be returned as attributes of the best program. """ + + class BasicGenerateInstruction(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" basic_instruction = dspy.InputField(desc="The initial instructions before optimization") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + proposed_prefix_for_output_field = dspy.OutputField( + desc="The string at the end of the prompt, which will help the model start solving the task" + ) + class GenerateInstructionGivenAttempts(dspy.Signature): - """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. + """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. -Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" + Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" + + attempted_instructions = dspy.InputField(format=dsp.passages2text) + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField( + desc="The string at the end of the prompt, which will help the model start solving the task" + ) - attempted_instructions = dspy.InputField(format=dsp.passages2text) - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") class COPRO(Teleprompter): - def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): + def __init__( + self, + prompt_model=None, + metric=None, + breadth=10, + depth=3, + init_temperature=1.4, + verbose=False, + track_stats=False, + ): if breadth <= 1: raise ValueError("Breadth must be greater than 1") self.metric = metric @@ -75,16 +92,16 @@ def _drop_duplicates(self, candidates): last_batch_score = -1 for c in candidates: repeat = False - if c['score'] == last_batch_score: + if c["score"] == last_batch_score: for c2 in last_batch: - if (self._check_candidates_equal(c, c2)): + if self._check_candidates_equal(c, c2): repeat = True break if not repeat: last_batch.append(c) else: last_batch = [c] - last_batch_score = c['score'] + last_batch_score = c["score"] if not repeat: final_candidates.append(c) return final_candidates @@ -95,32 +112,34 @@ def _print_signature(self, predictor): print(f"i: {signature.instructions}") print(f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}") print() - + def _get_signature(self, predictor): - if (hasattr(predictor, 'extended_signature')): + if hasattr(predictor, "extended_signature"): return predictor.extended_signature - elif (hasattr(predictor, 'signature')): + elif hasattr(predictor, "signature"): return predictor.signature - + def _set_signature(self, predictor, updated_signature): - if (hasattr(predictor, 'extended_signature')): + if hasattr(predictor, "extended_signature"): predictor.extended_signature = updated_signature - elif (hasattr(predictor, 'signature')): + elif hasattr(predictor, "signature"): predictor.signature = updated_signature - def compile(self, student, *, trainset, eval_kwargs): """student is a program that needs to be optimized, note that it may be zero-shot or already pre-optimized for demos != []""" module = student.deepcopy() evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) total_calls = 0 - results_best = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} - results_latest = {id(p):{"depth": [], "max": [], "average": [], "min":[], "std": []} for p in module.predictors()} + results_best = { + id(p): {"depth": [], "max": [], "average": [], "min": [], "std": []} for p in module.predictors() + } + results_latest = { + id(p): {"depth": [], "max": [], "average": [], "min": [], "std": []} for p in module.predictors() + } if self.track_stats: import numpy as np - candidates = {} evaluated_candidates = defaultdict(dict) @@ -130,64 +149,85 @@ def compile(self, student, *, trainset, eval_kwargs): basic_prefix = None *_, last_key = self._get_signature(predictor).fields.keys() basic_instruction = self._get_signature(predictor).instructions - basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra['prefix'] - if self.prompt_model: + basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra["prefix"] + if self.prompt_model: with dspy.settings.context(lm=self.prompt_model): - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + instruct = dspy.Predict( + BasicGenerateInstruction, n=self.breadth - 1, temperature=self.init_temperature + )(basic_instruction=basic_instruction) else: - instruct = dspy.Predict(BasicGenerateInstruction, n=self.breadth-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) + instruct = dspy.Predict( + BasicGenerateInstruction, n=self.breadth - 1, temperature=self.init_temperature + )(basic_instruction=basic_instruction) # Add in our initial prompt as a candidate as well instruct.completions.proposed_instruction.append(basic_instruction) instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) candidates[id(predictor)] = instruct.completions evaluated_candidates[id(predictor)] = {} - - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + + if self.verbose and self.prompt_model: + print(f"{self.prompt_model.inspect_history(n=1)}") latest_candidates = candidates all_candidates = candidates - + module_clone = module.deepcopy() # For each iteration in depth... - for d in range(self.depth): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors + for d in range( + self.depth + ): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors print(f"Iteration Depth: {d+1}/{self.depth}.") latest_scores = [] - + # Go through our module's predictors for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): - candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation + candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation if len(module.predictors()) > 1: - candidates_ = all_candidates[id(p_old)] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) + candidates_ = all_candidates[ + id(p_old) + ] # Unless our program has multiple predictors, in which case we need to reevaluate all prompts with the new prompt(s) for the other predictor(s) # For each candidate - for c_i, c in enumerate(candidates_): - # Get the candidate instruction and prefix - instruction, prefix = c.proposed_instruction.strip('"').strip(), c.proposed_prefix_for_output_field.strip('"').strip() - - # Set this new module with our instruction / prefix + for c_i, c in enumerate(candidates_): + # Get the candidate instruction and prefix + instruction, prefix = ( + c.proposed_instruction.strip('"').strip(), + c.proposed_prefix_for_output_field.strip('"').strip(), + ) + + # Set this new module with our instruction / prefix *_, last_key = self._get_signature(p_new).fields.keys() - updated_signature = self._get_signature(p_new) \ - .with_instructions(instruction) \ + updated_signature = ( + self._get_signature(p_new) + .with_instructions(instruction) .with_updated_fields(last_key, prefix=prefix) + ) self._set_signature(p_new, updated_signature) - # Score the instruction / prefix - if self.verbose: print("----------------") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i+1}") + # Score the instruction / prefix + if self.verbose: + print("----------------") + for i, predictor in enumerate(module_clone.predictors()): + if self.verbose: + print(f"Predictor {i+1}") self._print_signature(predictor) - print(f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.") + print( + f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}." + ) score = evaluate(module_clone, devset=trainset, **eval_kwargs) - if self.verbose and self.prompt_model: print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") + if self.verbose and self.prompt_model: + print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") total_calls += 1 - if self.verbose: print("----------------") + if self.verbose: + print("----------------") replace_entry = True - if self.verbose: print(f"(instruction, prefix) {(instruction, prefix)}") + if self.verbose: + print(f"(instruction, prefix) {(instruction, prefix)}") # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") - if ((instruction, prefix) in evaluated_candidates[id(p_old)]): + if (instruction, prefix) in evaluated_candidates[id(p_old)]: # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: replace_entry = False @@ -201,93 +241,107 @@ def compile(self, student, *, trainset, eval_kwargs): "prefix": prefix, "depth": d, } - - if (len(candidates_)-self.breadth <= c_i): + + if len(candidates_) - self.breadth <= c_i: latest_scores.append(score) if self.track_stats: results_latest[id(p_old)]["depth"].append(d) results_latest[id(p_old)]["max"].append(max(latest_scores)) - results_latest[id(p_old)]["average"].append(sum(latest_scores)/len(latest_scores)) + results_latest[id(p_old)]["average"].append(sum(latest_scores) / len(latest_scores)) results_latest[id(p_old)]["min"].append(min(latest_scores)) results_latest[id(p_old)]["std"].append(np.std(latest_scores)) - + # Now that we've evaluated the candidates, set this predictor to the best performing version # to ensure the next round of scores reflect the best possible version - best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate['score']) + best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate["score"]) *_, last_key = self._get_signature(p_old).fields.keys() - updated_signature = self._get_signature(p_new) \ - .with_instructions(best_candidate["instruction"]) \ + updated_signature = ( + self._get_signature(p_new) + .with_instructions(best_candidate["instruction"]) .with_updated_fields(last_key, prefix=best_candidate["prefix"]) + ) self._set_signature(p_new, updated_signature) - if self.verbose: print(f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}") - if self.verbose: print("Full predictor with update: ") - for i,predictor in enumerate(module_clone.predictors()): - if self.verbose: print(f"Predictor {i}") + if self.verbose: + print( + f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}" + ) + if self.verbose: + print("Full predictor with update: ") + for i, predictor in enumerate(module_clone.predictors()): + if self.verbose: + print(f"Predictor {i}") self._print_signature(predictor) - if d == self.depth-1: + if d == self.depth - 1: break - new_candidates = {} for p_base in module.predictors(): # Build Few-Shot Example of Optimized Prompts attempts = [] shortest_len = self.breadth - shortest_len = min(len(evaluated_candidates[id(p_base)]),shortest_len) + shortest_len = min(len(evaluated_candidates[id(p_base)]), shortest_len) best_predictors = list(evaluated_candidates[id(p_base)].values()) # best_predictors = evaluated_candidates[id(p_base)].values()[:] - best_predictors.sort(key=lambda x: x['score'], reverse=True) + best_predictors.sort(key=lambda x: x["score"], reverse=True) if self.track_stats: - scores = [x['score'] for x in best_predictors][:10] + scores = [x["score"] for x in best_predictors][:10] results_best[id(p_base)]["depth"].append(d) results_best[id(p_base)]["max"].append(max(scores)) - results_best[id(p_base)]["average"].append(sum(scores)/len(scores)) + results_best[id(p_base)]["average"].append(sum(scores) / len(scores)) results_best[id(p_base)]["min"].append(min(scores)) results_best[id(p_base)]["std"].append(np.std(scores)) - - for i in range(shortest_len-1,-1,-1): + + for i in range(shortest_len - 1, -1, -1): # breakpoint() attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') - + # Generate next batch of potential prompts to optimize, with previous attempts as input - if self.prompt_model: + if self.prompt_model: with dspy.settings.context(lm=self.prompt_model): - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + instr = dspy.Predict( + GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature + )(attempted_instructions=attempts) else: - instr = dspy.Predict(GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature)(attempted_instructions=attempts) + instr = dspy.Predict( + GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature + )(attempted_instructions=attempts) - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + if self.verbose and self.prompt_model: + print(f"{self.prompt_model.inspect_history(n=1)}") # Get candidates for each predictor new_candidates[id(p_base)] = instr.completions all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) - all_candidates[id(p_base)].proposed_prefix_for_output_field.extend(instr.completions.proposed_prefix_for_output_field) + all_candidates[id(p_base)].proposed_prefix_for_output_field.extend( + instr.completions.proposed_prefix_for_output_field + ) - if self.verbose and self.prompt_model: print(f"{self.prompt_model.inspect_history(n=1)}") + if self.verbose and self.prompt_model: + print(f"{self.prompt_model.inspect_history(n=1)}") latest_candidates = new_candidates - + candidates = [] for predictor in module.predictors(): candidates.extend(list(evaluated_candidates[id(predictor)].values())) if self.track_stats: best_predictors = list(evaluated_candidates[id(predictor)].values()) - best_predictors.sort(key=lambda x: x['score'], reverse=True) + best_predictors.sort(key=lambda x: x["score"], reverse=True) - scores = [x['score'] for x in best_predictors][:10] + scores = [x["score"] for x in best_predictors][:10] results_best[id(predictor)]["depth"].append(d) results_best[id(predictor)]["max"].append(max(scores)) - results_best[id(predictor)]["average"].append(sum(scores)/len(scores)) + results_best[id(predictor)]["average"].append(sum(scores) / len(scores)) results_best[id(predictor)]["min"].append(min(scores)) results_best[id(predictor)]["std"].append(np.std(scores)) # if verbose: print(f"candidates: {candidates}") - candidates.sort(key=lambda x: x['score'], reverse=True) + candidates.sort(key=lambda x: x["score"], reverse=True) candidates = self._drop_duplicates(candidates) @@ -298,4 +352,4 @@ def compile(self, student, *, trainset, eval_kwargs): best_program.results_best = results_best best_program.results_latest = results_latest - return best_program \ No newline at end of file + return best_program diff --git a/dspy/teleprompt/ensemble.py b/dspy/teleprompt/ensemble.py index 5e0db9bcac..f7cfb01607 100644 --- a/dspy/teleprompt/ensemble.py +++ b/dspy/teleprompt/ensemble.py @@ -6,12 +6,13 @@ TODO: The EnsembledProgram should actually imitate the structure of the individual programs (IF they are all compatible). This allows compiling with an ensemble program as a (singular) teacher. Basically the top majority-compatible trace will end up being used, if dspy.majority is the reduce_fn. """ + class Ensemble(Teleprompter): def __init__(self, *, reduce_fn=None, size=None, deterministic=False): """A common reduce_fn is dspy.majority.""" - + assert deterministic is False, "TODO: Implement example hashing for deterministic ensemble." - + self.reduce_fn = reduce_fn self.size = size self.deterministic = deterministic @@ -21,11 +22,12 @@ def compile(self, programs): reduce_fn = self.reduce_fn import dspy + class EnsembledProgram(dspy.Module): def __init__(self): super().__init__() self.programs = programs - + def forward(self, *args, **kwargs): programs = random.sample(self.programs, size) if size else self.programs outputs = [prog(*args, **kwargs) for prog in programs] diff --git a/dspy/teleprompt/finetune.py b/dspy/teleprompt/finetune.py index 8400fe7749..ca080a5c3d 100644 --- a/dspy/teleprompt/finetune.py +++ b/dspy/teleprompt/finetune.py @@ -18,11 +18,11 @@ # from dspy.evaluate.evaluate import Evaluate -if os.environ.get('DSP_NOTEBOOK_CACHEDIR'): - training_data_directory = os.path.join(os.environ.get('DSP_NOTEBOOK_CACHEDIR'), 'compiler') +if os.environ.get("DSP_NOTEBOOK_CACHEDIR"): + training_data_directory = os.path.join(os.environ.get("DSP_NOTEBOOK_CACHEDIR"), "compiler") print(training_data_directory) else: - training_data_directory = 'local_cache/compiler' + training_data_directory = "local_cache/compiler" if not os.path.exists(training_data_directory): os.makedirs(training_data_directory) @@ -54,20 +54,36 @@ def __init__(self, metric=None, teacher_settings={}, multitask=True): self.multitask = multitask metric = metric or (lambda *args: True) - self.teleprompter = BootstrapFewShot(metric=metric, - max_bootstrapped_demos=999999, - max_labeled_demos=0, # FIXME: TODO: Make this zero? or param, with default as 16 or 0? - teacher_settings=teacher_settings) - - - def compile(self, student, *, teacher=None, trainset, valset=None, - target='t5-large', bsize=12, accumsteps=1, lr=5e-5, epochs=1, bf16=False, int8=False, peft=False, path_prefix=None): - + self.teleprompter = BootstrapFewShot( + metric=metric, + max_bootstrapped_demos=999999, + max_labeled_demos=0, # FIXME: TODO: Make this zero? or param, with default as 16 or 0? + teacher_settings=teacher_settings, + ) + + def compile( + self, + student, + *, + teacher=None, + trainset, + valset=None, + target="t5-large", + bsize=12, + accumsteps=1, + lr=5e-5, + epochs=1, + bf16=False, + int8=False, + peft=False, + path_prefix=None, + ): # It's usually better to supply a few-shot teacher, rather than uncompiled module (the student). if teacher is None: - print("WARNING: Using a vanilla teacher. " - "Are you sure you want to use BootstrapFinetune without a compiled teacher?") - + print( + "WARNING: Using a vanilla teacher. " + "Are you sure you want to use BootstrapFinetune without a compiled teacher?" + ) teachers = teacher if isinstance(teacher, list) else [teacher] finetune_data = {} @@ -79,7 +95,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, # Prepare finetune pairs. for name, predictor in compiled.named_predictors(): - name_ = 'all' if multitask else name + name_ = "all" if multitask else name finetune_data[name_] = [] if name_ not in finetune_data else finetune_data[name_] for demo in predictor.demos: @@ -96,44 +112,47 @@ def compile(self, student, *, teacher=None, trainset, valset=None, random.Random(0).shuffle(finetune_data[name_]) print(name_, len(finetune_data[name_])) - # # Dump as files. - # + # finetune_paths = {} for name in finetune_data: data = finetune_data[name] - hashed_name = name + '.' + Hasher.hash(data) - output_path = os.path.join(training_data_directory, f'{hashed_name}.jsonl') + hashed_name = name + "." + Hasher.hash(data) + output_path = os.path.join(training_data_directory, f"{hashed_name}.jsonl") print(output_path) - with open(output_path, 'w') as f: + with open(output_path, "w") as f: for line in data: - f.write(ujson.dumps(line) + '\n') - + f.write(ujson.dumps(line) + "\n") + finetune_paths[name] = output_path - # # Train! # import string + compiler_config = { - 'save': ''.join(random.Random(time.time()).choices(string.ascii_uppercase + string.digits, k=13)), # https://stackoverflow.com/a/2257449/1493011 - 'peft': peft, - 'fp16': False, - 'bf16': bf16, - 'int8': int8, - 'fid': False, - 'rationale': False, - 'batch_size': bsize, - 'epochs': epochs, - 'gradient_accumulation_steps': accumsteps, # 2, - 'lr': lr, + "save": "".join( + random.Random(time.time()).choices(string.ascii_uppercase + string.digits, k=13) + ), # https://stackoverflow.com/a/2257449/1493011 + "peft": peft, + "fp16": False, + "bf16": bf16, + "int8": int8, + "fid": False, + "rationale": False, + "batch_size": bsize, + "epochs": epochs, + "gradient_accumulation_steps": accumsteps, # 2, + "lr": lr, } - compiler_config['save'] = os.path.join(path_prefix, compiler_config['save']) if path_prefix else compiler_config['save'] + compiler_config["save"] = ( + os.path.join(path_prefix, compiler_config["save"]) if path_prefix else compiler_config["save"] + ) from dsp.modules.finetuning import finetune_hf @@ -143,11 +162,11 @@ def compile(self, student, *, teacher=None, trainset, valset=None, for name in finetune_data: training_data_path = finetune_paths[name] compiler_config_ = dict(compiler_config) - compiler_config_['save'] = compiler_config['save'] + '.' + name + compiler_config_["save"] = compiler_config["save"] + "." + name best_ckpt_path = finetune_hf(training_data_path, target, compiler_config_) print(f"#> Best checkpoint path: {best_ckpt_path} for {name}") - finetune_models[name] = dsp.HFModel(model=target, checkpoint=best_ckpt_path) # best_ckpt_path + finetune_models[name] = dsp.HFModel(model=target, checkpoint=best_ckpt_path) # best_ckpt_path # # Set the LMs to the finetuned ones, per module @@ -158,7 +177,7 @@ def compile(self, student, *, teacher=None, trainset, valset=None, for (name, predictor), (name2, predictor2) in zip(compiled.named_predictors(), compiled2.named_predictors()): assert name == name2 - name = 'all' if multitask else name + name = "all" if multitask else name # TODO: FIXME: When we assign .lm, the Predict.forward will also set only_query=True. # This is correct for here but we may want to make it more explicitly restricted to finetuned models. @@ -166,5 +185,5 @@ def compile(self, student, *, teacher=None, trainset, valset=None, predictor2.lm = finetune_models[name] assert predictor2.demos == [] - + return compiled2 diff --git a/dspy/teleprompt/knn_fewshot.py b/dspy/teleprompt/knn_fewshot.py index b999447078..fe0d961b8a 100644 --- a/dspy/teleprompt/knn_fewshot.py +++ b/dspy/teleprompt/knn_fewshot.py @@ -17,8 +17,10 @@ def compile(self, student, *, teacher=None, trainset, valset=None): def forward_pass(*args, **kwargs): knn_trainset = self.KNN(**kwargs) few_shot_bootstrap = BootstrapFewShot() - compiled_program = few_shot_bootstrap.compile(student, teacher=teacher, trainset=knn_trainset, valset=valset) + compiled_program = few_shot_bootstrap.compile( + student, teacher=teacher, trainset=knn_trainset, valset=valset + ) return compiled_program(**kwargs) - + student_copy.forward = types.MethodType(forward_pass, student_copy) - return student_copy \ No newline at end of file + return student_copy diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index c9045ff79f..00f77fbccb 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -3,6 +3,7 @@ import sys import textwrap from collections import defaultdict +from typing import Any import optuna @@ -43,12 +44,16 @@ This information will be returned as attributes of the best program. """ + class BasicGenerateInstruction(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" basic_instruction = dspy.InputField(desc="The initial instructions before optimization") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + proposed_prefix_for_output_field = dspy.OutputField( + desc="The string at the end of the prompt, which will help the model start solving the task" + ) + class BasicGenerateInstructionWithDataObservations(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. I will also give you some ``observations`` I have made about the dataset and task. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" @@ -56,54 +61,88 @@ class BasicGenerateInstructionWithDataObservations(Signature): basic_instruction = dspy.InputField(desc="The initial instructions before optimization") observations = dspy.InputField(desc="Observations about the dataset and task") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") + proposed_prefix_for_output_field = dspy.OutputField( + desc="The string at the end of the prompt, which will help the model start solving the task" + ) + class BasicGenerateInstructionWithExamples(dspy.Signature): - ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. + """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. + + Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + + # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField( + desc="The string at the end of the prompt, which will help the model start solving the task" + ) -Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""") - # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): - ("""You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will give you some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. I will also provide you with the current ``basic instruction`` that is being used for this task. + """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will give you some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. I will also provide you with the current ``basic instruction`` that is being used for this task. + + Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + + observations = dspy.InputField(desc="Observations about the dataset and task") + examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") + basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_prefix_for_output_field = dspy.OutputField( + desc="The string at the end of the prompt, which will help the model start solving the task" + ) -Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""") - observations = dspy.InputField(desc="Observations about the dataset and task") - examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") - proposed_prefix_for_output_field = dspy.OutputField(desc="The string at the end of the prompt, which will help the model start solving the task") class ObservationSummarizer(dspy.Signature): - ("""Given a series of observations I have made about my dataset, please summarize them into a brief 2-3 sentence summary which highlights only the most important details.""") + """Given a series of observations I have made about my dataset, please summarize them into a brief 2-3 sentence summary which highlights only the most important details.""" + observations = dspy.InputField(desc="Observations I have made about my dataset") - summary = dspy.OutputField(desc="Two to Three sentence summary of only the most significant highlights of my observations") + summary = dspy.OutputField( + desc="Two to Three sentence summary of only the most significant highlights of my observations" + ) + class DatasetDescriptor(dspy.Signature): - ("""Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ - """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ - """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""") - + ( + """Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ + """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ + """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""" + ) + examples = dspy.InputField(desc="Sample data points from the dataset") observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed") + class DatasetDescriptorWithPriorObservations(dspy.Signature): - ("""Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ - """I will also provide you with a few observations I have already made. Please add your own observations or if you feel the observations are comprehensive say 'COMPLETE' """ - """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ - """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""") - + ( + """Given several examples from a dataset please write observations about trends that hold for most or all of the samples. """ + """I will also provide you with a few observations I have already made. Please add your own observations or if you feel the observations are comprehensive say 'COMPLETE' """ + """Some areas you may consider in your observations: topics, content, syntax, conciceness, etc. """ + """It will be useful to make an educated guess as to the nature of the task this dataset will enable. Don't be afraid to be creative""" + ) + examples = dspy.InputField(desc="Sample data points from the dataset") prior_observations = dspy.InputField(desc="Some prior observations I made about the data") - observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add") + observations = dspy.OutputField( + desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add" + ) + class MIPRO(Teleprompter): - def __init__(self, metric, prompt_model=None, task_model=None, teacher_settings={}, num_candidates=10, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): + def __init__( + self, + metric, + prompt_model=None, + task_model=None, + teacher_settings={}, + num_candidates=10, + init_temperature=1.0, + verbose=False, + track_stats=True, + view_data_batch_size=10, + ): self.num_candidates = num_candidates self.metric = metric self.init_temperature = init_temperature @@ -113,17 +152,22 @@ def __init__(self, metric, prompt_model=None, task_model=None, teacher_settings= self.track_stats = track_stats self.teacher_settings = teacher_settings self.view_data_batch_size = view_data_batch_size - + def _print_full_program(self, program): - for i,predictor in enumerate(program.predictors()): - if self.verbose: print(f"Predictor {i}") - if self.verbose: print(f"i: {self._get_signature(predictor).instructions}") + for i, predictor in enumerate(program.predictors()): + if self.verbose: + print(f"Predictor {i}") + if self.verbose: + print(f"i: {self._get_signature(predictor).instructions}") *_, last_field = self._get_signature(predictor).fields.values() - if self.verbose: print(f"p: {last_field.json_schema_extra['prefix']}") - if self.verbose: print("\n") - + if self.verbose: + print(f"p: {last_field.json_schema_extra['prefix']}") + if self.verbose: + print("\n") + def _print_model_history(self, model, n=1): - if self.verbose: print(f"Model ({model}) History:") + if self.verbose: + print(f"Model ({model}) History:") model.inspect_history(n=n) def _observe_data(self, trainset, max_iterations=10): @@ -134,8 +178,10 @@ def _observe_data(self, trainset, max_iterations=10): skips = 0 iterations = 0 for b in range(self.view_data_batch_size, len(trainset), self.view_data_batch_size): - upper_lim = min(len(trainset), b+self.view_data_batch_size) - output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)(prior_observations=observations, examples=(trainset[b:upper_lim].__repr__())) + upper_lim = min(len(trainset), b + self.view_data_batch_size) + output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)( + prior_observations=observations, examples=(trainset[b:upper_lim].__repr__()) + ) iterations += 1 if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE": skips += 1 @@ -149,9 +195,8 @@ def _observe_data(self, trainset, max_iterations=10): summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations) return summary.summary - - def _create_example_string(self, fields, example): + def _create_example_string(self, fields, example): # Building the output string output = [] for field in fields: @@ -167,21 +212,30 @@ def _create_example_string(self, fields, example): output.append(field_str) # Joining all the field strings - return '\n'.join(output) + return "\n".join(output) def _get_signature(self, predictor): - if (hasattr(predictor, 'extended_signature')): + if hasattr(predictor, "extended_signature"): return predictor.extended_signature - elif (hasattr(predictor, 'signature')): + elif hasattr(predictor, "signature"): return predictor.signature - + return None + def _set_signature(self, predictor, updated_signature): - if (hasattr(predictor, 'extended_signature')): + if hasattr(predictor, "extended_signature"): predictor.extended_signature = updated_signature - elif (hasattr(predictor, 'signature')): + elif hasattr(predictor, "signature"): predictor.signature = updated_signature - - def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo_candidates, devset): + + def _generate_first_N_candidates( # noqa: N802 + self, + module: dspy.Module, + N: int, # noqa: N803 + view_data: bool, + view_examples: bool, + demo_candidates: dict, + devset, + ) -> tuple[dict, dict]: candidates = {} evaluated_candidates = defaultdict(dict) @@ -189,26 +243,25 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo # Create data observations self.observations = None with dspy.settings.context(lm=self.prompt_model): - self.observations = self._observe_data(devset).replace("Observations:","").replace("Summary:","") - + self.observations = self._observe_data(devset).replace("Observations:", "").replace("Summary:", "") + if view_examples: example_sets = {} for predictor in module.predictors(): # Get all augmented examples example_set = {} - all_sets_of_examples = demo_candidates[id(predictor)] # Get all generated sets of examples + all_sets_of_examples = demo_candidates[id(predictor)] # Get all generated sets of examples for example_set_i, set_of_examples in enumerate(all_sets_of_examples): - if example_set_i != 0: # Skip the no examples case - for example in set_of_examples: # Get each individual example in the set - if "augmented" in example.keys(): - if example["augmented"]: - if example_set_i not in example_set: - example_set[example_set_i] = [] - fields_to_use = signature_to_template(predictor.signature).fields - input_variable_names = list(self._get_signature(predictor).input_fields.keys()) - example_string = self._create_example_string(fields_to_use, example) - example_set[example_set_i].append(example_string) - example_sets[id(predictor)] = example_set + if example_set_i != 0: # Skip the no examples case + for example in set_of_examples: # Get each individual example in the set + if "augmented" in example and example["augmented"]: + if example_set_i not in example_set: + example_set[example_set_i] = [] + fields_to_use = signature_to_template(predictor.signature).fields + _input_variable_names = list(self._get_signature(predictor).input_fields.keys()) + example_string = self._create_example_string(fields_to_use, example) + example_set[example_set_i].append(example_string) + example_sets[id(predictor)] = example_set else: example_set[example_set_i] = [] example_sets[id(predictor)] = example_set @@ -223,7 +276,7 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo with dspy.settings.context(lm=self.prompt_model): # Data & Examples if view_data and view_examples: - if 1 not in example_sets[id(predictor)].keys(): + if 1 not in example_sets[id(predictor)]: raise ValueError("No examples found for the given predictor") instruct = None for i in range(1, self.num_candidates): @@ -239,15 +292,21 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo if not instruct: instruct = new_instruct else: - instruct.completions.proposed_instruction.extend(new_instruct.completions.proposed_instruction) - instruct.completions.proposed_prefix_for_output_field.extend(new_instruct.completions.proposed_prefix_for_output_field) + instruct.completions.proposed_instruction.extend( + new_instruct.completions.proposed_instruction + ) + instruct.completions.proposed_prefix_for_output_field.extend( + new_instruct.completions.proposed_prefix_for_output_field + ) # Just data - elif view_data: - instruct = dspy.Predict(BasicGenerateInstructionWithDataObservations, n=N-1, temperature=self.init_temperature)(basic_instruction=basic_instruction, observations=self.observations) + elif view_data: + instruct = dspy.Predict( + BasicGenerateInstructionWithDataObservations, n=N - 1, temperature=self.init_temperature + )(basic_instruction=basic_instruction, observations=self.observations) # Just examples - elif view_examples: + elif view_examples: instruct = None - for i in range(1,self.num_candidates): # Note: skip over the first example set which is empty + for i in range(1, self.num_candidates): # Note: skip over the first example set which is empty new_instruct = dspy.Predict( BasicGenerateInstructionWithExamples, n=1, @@ -259,33 +318,55 @@ def _generate_first_N_candidates(self, module, N, view_data, view_examples, demo if not instruct: instruct = new_instruct else: - instruct.completions.proposed_instruction.extend(new_instruct.completions.proposed_instruction) - instruct.completions.proposed_prefix_for_output_field.extend(new_instruct.completions.proposed_prefix_for_output_field) + instruct.completions.proposed_instruction.extend( + new_instruct.completions.proposed_instruction, + ) + instruct.completions.proposed_prefix_for_output_field.extend( + new_instruct.completions.proposed_prefix_for_output_field, + ) # Neither - else: - instruct = dspy.Predict(BasicGenerateInstruction, n=N-1, temperature=self.init_temperature)(basic_instruction=basic_instruction) - + else: + instruct = dspy.Predict(BasicGenerateInstruction, n=N - 1, temperature=self.init_temperature)( + basic_instruction=basic_instruction + ) + # Add in our initial prompt as a candidate as well instruct.completions.proposed_instruction.insert(0, basic_instruction) instruct.completions.proposed_prefix_for_output_field.insert(0, basic_prefix) candidates[id(predictor)] = instruct.completions evaluated_candidates[id(predictor)] = {} - - if self.verbose: self._print_model_history(self.prompt_model) - + + if self.verbose: + self._print_model_history(self.prompt_model) + return candidates, evaluated_candidates - def compile(self, student, *, trainset, num_trials, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, view_data=True, view_examples=True, requires_permission_to_run=True): + def compile( + self, + student: dspy.Program, + *, + trainset: list[dspy.Example], + num_trials: int, + max_bootstrapped_demos: int, + max_labeled_demos: int, + eval_kwargs: dict[str, Any], + seed=42, + view_data=True, + view_examples=True, + requires_permission_to_run=True, + ) -> dspy.Program: # Define ANSI escape codes for colors - YELLOW = '\033[93m' - BLUE = '\033[94m' - BOLD = '\033[1m' - ENDC = '\033[0m' # Resets the color to default + YELLOW = "\033[93m" + BLUE = "\033[94m" + BOLD = "\033[1m" + ENDC = "\033[0m" # Resets the color to default random.seed(seed) - + estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P - estimated_prompt_model_calls = 10 + self.num_candidates * len(student.predictors()) # num data summary calls + N * P + estimated_prompt_model_calls = 10 + self.num_candidates * len( + student.predictors() + ) # num data summary calls + N * P user_message = textwrap.dedent(f"""\ {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} @@ -305,7 +386,7 @@ def compile(self, student, *, trainset, num_trials, max_bootstrapped_demos, max_ {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""") - + user_confirmation_message = textwrap.dedent(f"""\ To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. @@ -315,47 +396,54 @@ def compile(self, student, *, trainset, num_trials, max_bootstrapped_demos, max_ """) print(user_message) - - sys.stdout.flush() # Flush the output buffer to force the message to print + sys.stdout.flush() # Flush the output buffer to force the message to print - run=True + run = True if requires_permission_to_run: print(user_confirmation_message) user_input = input("Do you wish to continue? (y/n): ").strip().lower() - if user_input != 'y': + if user_input != "y": print("Compilation aborted by the user.") - run=False + run = False if run: # Set up program and evaluation function module = student.deepcopy() evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) - + # In the case where the bootstrapped and labeled demos are set to 0, we'll stil bootstrap examples to use in our meta prompt - if max_bootstrapped_demos==0 and max_labeled_demos==0: #TODO: address case when max_bootstrapped alone is 0 - max_bootstrapped_demos_for_candidate_gen = 1 - max_labeled_demos_for_candidate_gen = 1 #TODO: this might only need to be 0 + if ( + max_bootstrapped_demos == 0 and max_labeled_demos == 0 + ): # TODO: address case when max_bootstrapped alone is 0 + max_bootstrapped_demos_for_candidate_gen = 1 + max_labeled_demos_for_candidate_gen = 1 # TODO: this might only need to be 0 else: - max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos + max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos max_labeled_demos_for_candidate_gen = max_labeled_demos # Generate N few shot example sets demo_candidates = {} for i in range(self.num_candidates): - if i == 0: # Story empty set of demos as default for index 0 + if i == 0: # Story empty set of demos as default for index 0 for module_p in module.predictors(): if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append([]) else: - if self.verbose: print(f"Creating basic bootstrap: {i}/{self.num_candidates-1}") + if self.verbose: + print(f"Creating basic bootstrap: {i}/{self.num_candidates-1}") # Create a new basic bootstrap few - shot program . rng = random.Random(i) shuffled_trainset = trainset[:] # Create a copy of devset rng.shuffle(shuffled_trainset) # Shuffle the copy - tp = BootstrapFewShot(metric = self.metric, max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings) + tp = BootstrapFewShot( + metric=self.metric, + max_bootstrapped_demos=max_bootstrapped_demos_for_candidate_gen, + max_labeled_demos=max_labeled_demos_for_candidate_gen, + teacher_settings=self.teacher_settings, + ) candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_trainset) # Store the candidate demos @@ -363,16 +451,18 @@ def compile(self, student, *, trainset, num_trials, max_bootstrapped_demos, max_ if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append(candidate_p.demos) - + # Generate N candidate prompts - instruction_candidates, _ = self._generate_first_N_candidates(module, self.num_candidates, view_data, view_examples, demo_candidates, trainset) + instruction_candidates, _ = self._generate_first_N_candidates( + module, self.num_candidates, view_data, view_examples, demo_candidates, trainset + ) # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples - if max_bootstrapped_demos==0 and max_labeled_demos==0: + if max_bootstrapped_demos == 0 and max_labeled_demos == 0: demo_candidates = None # Initialize variables to store the best program and its score - best_score = float('-inf') + best_score = float("-inf") best_program = None trial_num = 0 @@ -384,40 +474,54 @@ def objective(trial): nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables candidate_program = baseline_program.deepcopy() - # Suggest the instruction to use for our predictor + # Suggest the instruction to use for our predictor print(f"Starting trial #{trial_num}") trial_logs[trial_num] = {} for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): - # Get instruction candidates for our given predictor p_instruction_candidates = instruction_candidates[id(p_old)] - if demo_candidates: p_demo_candidates = demo_candidates[id(p_old)] + if demo_candidates: + p_demo_candidates = demo_candidates[id(p_old)] # Suggest the index of the instruction candidate to use in our trial - instruction_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_instruction",range(len(p_instruction_candidates))) - if demo_candidates: demos_idx = trial.suggest_categorical(f"{id(p_old)}_predictor_demos",range(len(p_demo_candidates))) + instruction_idx = trial.suggest_categorical( + f"{id(p_old)}_predictor_instruction", range(len(p_instruction_candidates)) + ) + if demo_candidates: + demos_idx = trial.suggest_categorical( + f"{id(p_old)}_predictor_demos", range(len(p_demo_candidates)) + ) trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx - if demo_candidates: trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx + if demo_candidates: + trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx - # Get the selected instruction candidate + # Get the selected instruction candidate selected_candidate = p_instruction_candidates[instruction_idx] selected_instruction = selected_candidate.proposed_instruction.strip('"').strip() selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() # Use this candidates in our program *_, last_field = self._get_signature(p_new).fields.keys() - updated_signature = self._get_signature(p_new).with_instructions(selected_instruction).with_updated_fields(last_field, prefix=selected_prefix) + updated_signature = ( + self._get_signature(p_new) + .with_instructions(selected_instruction) + .with_updated_fields(last_field, prefix=selected_prefix) + ) self._set_signature(p_new, updated_signature) # Get the selected demos - if demo_candidates: selected_demos = p_demo_candidates[demos_idx] + if demo_candidates: + selected_demos = p_demo_candidates[demos_idx] # Use these demos in our program - if demo_candidates: p_new.demos = selected_demos - - if self.verbose: print("Evaling the following program:") - if self.verbose: self._print_full_program(candidate_program) + if demo_candidates: + p_new.demos = selected_demos + + if self.verbose: + print("Evaling the following program:") + if self.verbose: + self._print_full_program(candidate_program) trial_logs[trial_num]["program"] = candidate_program # Evaluate with the new prompts @@ -430,11 +534,13 @@ def objective(trial): end_index = min((i + 1) * batch_size, len(trainset)) split_trainset = trainset[start_index:end_index] split_score = evaluate(candidate_program, devset=split_trainset, display_table=0) - if self.verbose: print(f"{i}st split score: {split_score}") + if self.verbose: + print(f"{i}st split score: {split_score}") total_score += split_score * len(split_trainset) - curr_weighted_avg_score = total_score / min((i+1)*100,len(trainset)) - if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") + curr_weighted_avg_score = total_score / min((i + 1) * 100, len(trainset)) + if self.verbose: + print(f"curr average score: {curr_weighted_avg_score}") trial.report(curr_weighted_avg_score, i) @@ -443,35 +549,38 @@ def objective(trial): print("Trial pruned.") trial_logs[trial_num]["score"] = curr_weighted_avg_score trial_logs[trial_num]["pruned"] = True - trial_num += 1 + trial_num += 1 raise optuna.TrialPruned() - - if self.verbose: print(f"Fully evaled score: {curr_weighted_avg_score}") - if self.verbose: self._print_model_history(self.task_model, n=1) + + if self.verbose: + print(f"Fully evaled score: {curr_weighted_avg_score}") + if self.verbose: + self._print_model_history(self.task_model, n=1) score = curr_weighted_avg_score - + trial_logs[trial_num]["score"] = curr_weighted_avg_score trial_logs[trial_num]["pruned"] = False - + # Update the best program if the current score is better if score > best_score: best_score = score best_program = candidate_program.deepcopy() - - trial_num += 1 + + trial_num += 1 return score return objective - # Run the trial + # Run the trial objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) sampler = optuna.samplers.TPESampler(seed=seed) study = optuna.create_study(direction="maximize", sampler=sampler) - score = study.optimize(objective_function, n_trials=num_trials) + _score = study.optimize(objective_function, n_trials=num_trials) if best_program is not None and self.track_stats: best_program.trial_logs = trial_logs print(f"Returning {best_program} from continue_program") - return best_program \ No newline at end of file + return best_program + return None diff --git a/dspy/teleprompt/random_search.py b/dspy/teleprompt/random_search.py index b95bd6e461..7958bc4717 100644 --- a/dspy/teleprompt/random_search.py +++ b/dspy/teleprompt/random_search.py @@ -24,7 +24,19 @@ class BootstrapFewShotWithRandomSearch(Teleprompter): - def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6, max_errors=10, stop_at_score=None, metric_threshold=None): + def __init__( + self, + metric, + teacher_settings={}, + max_bootstrapped_demos=4, + max_labeled_demos=16, + max_rounds=1, + num_candidate_programs=16, + num_threads=6, + max_errors=10, + stop_at_score=None, + metric_threshold=None, + ): self.metric = metric self.teacher_settings = teacher_settings self.max_rounds = max_rounds @@ -64,17 +76,22 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None if seed == -3: # zero-shot program2 = student.reset_copy() - + elif seed == -2: # labels only teleprompter = LabeledFewShot(k=self.max_labeled_demos) program2 = teleprompter.compile(student, trainset=trainset2, sample=labeled_sample) - + elif seed == -1: # unshuffled few-shot - program = BootstrapFewShot(metric=self.metric, metric_threshold=self.metric_threshold, max_bootstrapped_demos=self.max_num_samples, - max_labeled_demos=self.max_labeled_demos, - teacher_settings=self.teacher_settings, max_rounds=self.max_rounds) + program = BootstrapFewShot( + metric=self.metric, + metric_threshold=self.metric_threshold, + max_bootstrapped_demos=self.max_num_samples, + max_labeled_demos=self.max_labeled_demos, + teacher_settings=self.teacher_settings, + max_rounds=self.max_rounds, + ) program2 = program.compile(student, teacher=teacher, trainset=trainset2) else: @@ -83,37 +100,47 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None random.Random(seed).shuffle(trainset2) size = random.Random(seed).randint(self.min_num_samples, self.max_num_samples) - teleprompter = BootstrapFewShot(metric=self.metric, metric_threshold=self.metric_threshold, max_bootstrapped_demos=size, - max_labeled_demos=self.max_labeled_demos, - teacher_settings=self.teacher_settings, - max_rounds=self.max_rounds) + teleprompter = BootstrapFewShot( + metric=self.metric, + metric_threshold=self.metric_threshold, + max_bootstrapped_demos=size, + max_labeled_demos=self.max_labeled_demos, + teacher_settings=self.teacher_settings, + max_rounds=self.max_rounds, + ) program2 = teleprompter.compile(student, teacher=teacher, trainset=trainset2) - evaluate = Evaluate(devset=self.valset, metric=self.metric, num_threads=self.num_threads, - max_errors=self.max_errors, display_table=False, display_progress=True) + evaluate = Evaluate( + devset=self.valset, + metric=self.metric, + num_threads=self.num_threads, + max_errors=self.max_errors, + display_table=False, + display_progress=True, + ) score, subscores = evaluate(program2, return_all_scores=True) all_subscores.append(subscores) ############ Assertion-aware Optimization ############ - if hasattr(program2, '_suggest_failures'): + if hasattr(program2, "_suggest_failures"): score = score - program2._suggest_failures * 0.2 - if hasattr(program2, '_assert_failures'): + if hasattr(program2, "_assert_failures"): score = 0 if program2._assert_failures > 0 else score ###################################################### - print('Score:', score, 'for set:', [len(predictor.demos) for predictor in program2.predictors()]) + print("Score:", score, "for set:", [len(predictor.demos) for predictor in program2.predictors()]) if len(scores) == 0 or score > max(scores): - print('New best score:', score, 'for seed', seed) + print("New best score:", score, "for seed", seed) best_program = program2 scores.append(score) print(f"Scores so far: {scores}") - print('Best score:', max(scores)) + print("Best score:", max(scores)) score_data.append((score, subscores, seed, program2)) @@ -125,8 +152,8 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None transposed_subscores = zip(*[subscores for _, subscores, *_ in top_3_scores if subscores]) avg_of_max_per_entry = sum(max(entry) for entry in transposed_subscores) / len(top_3_scores[0][1]) - print(f'Average of max per entry across top {k} scores: {avg_of_max_per_entry}') - + print(f"Average of max per entry across top {k} scores: {avg_of_max_per_entry}") + if self.stop_at_score is not None and score >= self.stop_at_score: print(f"Stopping early because score {score} is >= stop_at_score {self.stop_at_score}") break @@ -140,8 +167,6 @@ def compile(self, student, *, teacher=None, trainset, valset=None, restrict=None return best_program - - # sample between 4 and 10 examples from traces # TODO: FIXME: The max number of demos should be determined in part by the LM's tokenizer + max_length. # This does require executing the program, or at least the predictor. diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index 9ba1da92c9..2b41f8de41 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -1,4 +1,3 @@ - from .copro_optimizer import COPRO """ @@ -32,10 +31,22 @@ These statistics will be returned as attributes of the best program. """ + class SignatureOptimizer(COPRO): - def __init__(self, prompt_model=None, metric=None, breadth=10, depth=3, init_temperature=1.4, verbose=False, track_stats=False): - print("\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m") + def __init__( + self, + prompt_model=None, + metric=None, + breadth=10, + depth=3, + init_temperature=1.4, + verbose=False, + track_stats=False, + ): + print( + "\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m" + ) super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) def compile(self, student, *, devset, eval_kwargs): - return super().compile(student, trainset=devset, eval_kwargs=eval_kwargs) \ No newline at end of file + return super().compile(student, trainset=devset, eval_kwargs=eval_kwargs) diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index b74c961511..f338644dc2 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -1,4 +1,3 @@ - from dspy.teleprompt.mipro_optimizer import MIPRO """ @@ -35,11 +34,60 @@ This information will be returned as attributes of the best program. """ + class BayesianSignatureOptimizer(MIPRO): - def __init__(self, prompt_model=None, task_model=None, teacher_settings={}, n=10, metric=None, init_temperature=1.0, verbose=False, track_stats=True, view_data_batch_size=10): - print("\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m") + def __init__( + self, + prompt_model=None, + task_model=None, + teacher_settings={}, + n=10, + metric=None, + init_temperature=1.0, + verbose=False, + track_stats=True, + view_data_batch_size=10, + ): + print( + "\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m" + ) - super().__init__(metric=metric,prompt_model=prompt_model, task_model=task_model, teacher_settings=teacher_settings,num_candidates=n,init_temperature=init_temperature,verbose=verbose,track_stats=track_stats,view_data_batch_size=view_data_batch_size) + super().__init__( + metric=metric, + prompt_model=prompt_model, + task_model=task_model, + teacher_settings=teacher_settings, + num_candidates=n, + init_temperature=init_temperature, + verbose=verbose, + track_stats=track_stats, + view_data_batch_size=view_data_batch_size, + ) - def compile(self, student, *, devset, max_bootstrapped_demos, max_labeled_demos, eval_kwargs, seed=42, optuna_trials_num, view_data=True, view_examples=True, requires_permission_to_run=False, num_trials=None): - return super().compile(student, trainset=devset, max_bootstrapped_demos=max_bootstrapped_demos, max_labeled_demos=max_labeled_demos, eval_kwargs=eval_kwargs, seed=seed, view_data=view_data, view_examples=view_examples, requires_permission_to_run=requires_permission_to_run, num_trials=optuna_trials_num) + def compile( + self, + student, + *, + devset, + max_bootstrapped_demos, + max_labeled_demos, + eval_kwargs, + seed=42, + optuna_trials_num, + view_data=True, + view_examples=True, + requires_permission_to_run=False, + num_trials=None, + ): + return super().compile( + student, + trainset=devset, + max_bootstrapped_demos=max_bootstrapped_demos, + max_labeled_demos=max_labeled_demos, + eval_kwargs=eval_kwargs, + seed=seed, + view_data=view_data, + view_examples=view_examples, + requires_permission_to_run=requires_permission_to_run, + num_trials=optuna_trials_num, + ) diff --git a/dspy/teleprompt/teleprompt.py b/dspy/teleprompt/teleprompt.py index ab3ae060c4..fe5b0b9500 100644 --- a/dspy/teleprompt/teleprompt.py +++ b/dspy/teleprompt/teleprompt.py @@ -1,6 +1,3 @@ - - - class Teleprompter: def __init__(self): pass diff --git a/dspy/teleprompt/teleprompt_optuna.py b/dspy/teleprompt/teleprompt_optuna.py index 501bd71fde..9c5290c91e 100644 --- a/dspy/teleprompt/teleprompt_optuna.py +++ b/dspy/teleprompt/teleprompt_optuna.py @@ -7,7 +7,16 @@ class BootstrapFewShotWithOptuna(Teleprompter): - def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6): + def __init__( + self, + metric, + teacher_settings={}, + max_bootstrapped_demos=4, + max_labeled_demos=16, + max_rounds=1, + num_candidate_programs=16, + num_threads=6, + ): self.metric = metric self.teacher_settings = teacher_settings self.max_rounds = max_rounds @@ -29,31 +38,42 @@ def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_la def objective(self, trial): program2 = self.student.reset_copy() - for (name, compiled_predictor), (_, program2_predictor) in zip(self.compiled_teleprompter.named_predictors(), program2.named_predictors()): + for (name, compiled_predictor), (_, program2_predictor) in zip( + self.compiled_teleprompter.named_predictors(), program2.named_predictors() + ): all_demos = compiled_predictor.demos demo_index = trial.suggest_int(f"demo_index_for_{name}", 0, len(all_demos) - 1) selected_demo = dict(all_demos[demo_index]) program2_predictor.demos = [selected_demo] - evaluate = Evaluate(devset=self.valset, metric=self.metric, num_threads=self.num_threads, - display_table=False, display_progress=True) + evaluate = Evaluate( + devset=self.valset, + metric=self.metric, + num_threads=self.num_threads, + display_table=False, + display_progress=True, + ) score, _ = evaluate(program2, return_all_scores=True) trial.set_user_attr("program", program2) return score - def compile(self, student, *, teacher=None, max_demos, trainset, valset=None): self.trainset = trainset self.valset = valset or trainset self.student = student.reset_copy() self.teacher = teacher.deepcopy() if teacher is not None else student.reset_copy() - teleprompter_optimize = BootstrapFewShot(metric=self.metric, max_bootstrapped_demos=max_demos, - max_labeled_demos=self.max_labeled_demos, - teacher_settings=self.teacher_settings, - max_rounds=self.max_rounds) - self.compiled_teleprompter = teleprompter_optimize.compile(self.student, teacher=self.teacher, trainset=self.trainset) - study = optuna.create_study(direction='maximize') + teleprompter_optimize = BootstrapFewShot( + metric=self.metric, + max_bootstrapped_demos=max_demos, + max_labeled_demos=self.max_labeled_demos, + teacher_settings=self.teacher_settings, + max_rounds=self.max_rounds, + ) + self.compiled_teleprompter = teleprompter_optimize.compile( + self.student, teacher=self.teacher, trainset=self.trainset + ) + study = optuna.create_study(direction="maximize") study.optimize(self.objective, n_trials=self.num_candidate_sets) best_program = study.trials[study.best_trial.number].user_attrs["program"] - print('Best score:', study.best_value) - print('Best program:', best_program) - return best_program \ No newline at end of file + print("Best score:", study.best_value) + print("Best program:", best_program) + return best_program diff --git a/dspy/teleprompt/vanilla.py b/dspy/teleprompt/vanilla.py index ced8312116..4f437f9c15 100644 --- a/dspy/teleprompt/vanilla.py +++ b/dspy/teleprompt/vanilla.py @@ -20,10 +20,11 @@ def compile(self, student, *, trainset, sample=True): if sample: predictor.demos = rng.sample(self.trainset, min(self.k, len(self.trainset))) else: - predictor.demos = self.trainset[:min(self.k, len(self.trainset))] + predictor.demos = self.trainset[: min(self.k, len(self.trainset))] return self.student - + + # NOTE: I believe templatev2 keeps rdemos as long as they have the last field. # This may change later, especially with the introduction of required vs optional fields. # NOTE: Since we're relying on downstream code to handle the demos, this sampling may be sub-sampled. From 4244c786896ace6c89664968511bb756696d5eb1 Mon Sep 17 00:00:00 2001 From: Thomas D Ahle Date: Thu, 14 Mar 2024 22:21:29 -0700 Subject: [PATCH 220/243] formatting --- dspy/teleprompt/bootstrap.py | 8 ++--- dspy/teleprompt/copro_optimizer.py | 28 ++++++++++------ dspy/teleprompt/finetune.py | 4 +-- dspy/teleprompt/knn_fewshot.py | 5 ++- dspy/teleprompt/mipro_optimizer.py | 40 ++++++++++++++--------- dspy/teleprompt/signature_opt.py | 2 +- dspy/teleprompt/signature_opt_bayesian.py | 2 +- dspy/teleprompt/teleprompt_optuna.py | 4 +-- 8 files changed, 57 insertions(+), 36 deletions(-) diff --git a/dspy/teleprompt/bootstrap.py b/dspy/teleprompt/bootstrap.py index 86a1c0f362..de10a74a37 100644 --- a/dspy/teleprompt/bootstrap.py +++ b/dspy/teleprompt/bootstrap.py @@ -84,13 +84,13 @@ def _prepare_predictor_mappings(self): student, teacher = self.student, self.teacher assert len(student.predictors()) == len( - teacher.predictors() + teacher.predictors(), ), "Student and teacher must have the same number of predictors." for (name1, predictor1), (name2, predictor2) in zip(student.named_predictors(), teacher.named_predictors()): assert name1 == name2, "Student and teacher must have the same program structure." assert predictor1.signature.equals( - predictor2.signature + predictor2.signature, ), f"Student and teacher must have the same signatures. {type(predictor1.signature)} != {type(predictor2.signature)}" assert id(predictor1) != id(predictor2), "Student and teacher must be different objects." @@ -195,11 +195,11 @@ def _bootstrap_one_example(self, example, round_idx=0): # TODO: Look closer into this. It's a bit tricky to reproduce. print(f"Failed to find predictor {predictor} in {self.predictor2name}.") print( - "Are you doing this in a notebook (Jupyter)? This might be caused by redefining values by rerunning cells." + "Are you doing this in a notebook (Jupyter)? This might be caused by redefining values by rerunning cells.", ) print("Try restarting the notebook, or open an issue.") raise KeyError( - f"Failed to find predictor {id(predictor)} {predictor} in {self.predictor2name}." + f"Failed to find predictor {id(predictor)} {predictor} in {self.predictor2name}.", ) from e name2traces[predictor_name].append(demo) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index 3523c73804..a1a2062c09 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -39,7 +39,7 @@ class BasicGenerateInstruction(Signature): basic_instruction = dspy.InputField(desc="The initial instructions before optimization") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField( - desc="The string at the end of the prompt, which will help the model start solving the task" + desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -51,7 +51,7 @@ class GenerateInstructionGivenAttempts(dspy.Signature): attempted_instructions = dspy.InputField(format=dsp.passages2text) proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField( - desc="The string at the end of the prompt, which will help the model start solving the task" + desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -153,11 +153,15 @@ def compile(self, student, *, trainset, eval_kwargs): if self.prompt_model: with dspy.settings.context(lm=self.prompt_model): instruct = dspy.Predict( - BasicGenerateInstruction, n=self.breadth - 1, temperature=self.init_temperature + BasicGenerateInstruction, + n=self.breadth - 1, + temperature=self.init_temperature, )(basic_instruction=basic_instruction) else: instruct = dspy.Predict( - BasicGenerateInstruction, n=self.breadth - 1, temperature=self.init_temperature + BasicGenerateInstruction, + n=self.breadth - 1, + temperature=self.init_temperature, )(basic_instruction=basic_instruction) # Add in our initial prompt as a candidate as well instruct.completions.proposed_instruction.append(basic_instruction) @@ -175,7 +179,7 @@ def compile(self, student, *, trainset, eval_kwargs): # For each iteration in depth... for d in range( - self.depth + self.depth, ): # TODO: fix this so that we eval the new batch of predictors with the new best followoing predictors print(f"Iteration Depth: {d+1}/{self.depth}.") @@ -214,7 +218,7 @@ def compile(self, student, *, trainset, eval_kwargs): print(f"Predictor {i+1}") self._print_signature(predictor) print( - f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}." + f"At Depth {d+1}/{self.depth}, Evaluating Prompt Candidate #{c_i+1}/{len(candidates_)} for Predictor {p_i+1} of {len(module.predictors())}.", ) score = evaluate(module_clone, devset=trainset, **eval_kwargs) if self.verbose and self.prompt_model: @@ -264,7 +268,7 @@ def compile(self, student, *, trainset, eval_kwargs): self._set_signature(p_new, updated_signature) if self.verbose: print( - f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}" + f"Updating Predictor {id(p_old)} to:\ni: {best_candidate['instruction']}\np: {best_candidate['prefix']}", ) if self.verbose: print("Full predictor with update: ") @@ -305,11 +309,15 @@ def compile(self, student, *, trainset, eval_kwargs): if self.prompt_model: with dspy.settings.context(lm=self.prompt_model): instr = dspy.Predict( - GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature + GenerateInstructionGivenAttempts, + n=self.breadth, + temperature=self.init_temperature, )(attempted_instructions=attempts) else: instr = dspy.Predict( - GenerateInstructionGivenAttempts, n=self.breadth, temperature=self.init_temperature + GenerateInstructionGivenAttempts, + n=self.breadth, + temperature=self.init_temperature, )(attempted_instructions=attempts) if self.verbose and self.prompt_model: @@ -318,7 +326,7 @@ def compile(self, student, *, trainset, eval_kwargs): new_candidates[id(p_base)] = instr.completions all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) all_candidates[id(p_base)].proposed_prefix_for_output_field.extend( - instr.completions.proposed_prefix_for_output_field + instr.completions.proposed_prefix_for_output_field, ) if self.verbose and self.prompt_model: diff --git a/dspy/teleprompt/finetune.py b/dspy/teleprompt/finetune.py index ca080a5c3d..ef79c1ca4a 100644 --- a/dspy/teleprompt/finetune.py +++ b/dspy/teleprompt/finetune.py @@ -82,7 +82,7 @@ def compile( if teacher is None: print( "WARNING: Using a vanilla teacher. " - "Are you sure you want to use BootstrapFinetune without a compiled teacher?" + "Are you sure you want to use BootstrapFinetune without a compiled teacher?", ) teachers = teacher if isinstance(teacher, list) else [teacher] @@ -136,7 +136,7 @@ def compile( compiler_config = { "save": "".join( - random.Random(time.time()).choices(string.ascii_uppercase + string.digits, k=13) + random.Random(time.time()).choices(string.ascii_uppercase + string.digits, k=13), ), # https://stackoverflow.com/a/2257449/1493011 "peft": peft, "fp16": False, diff --git a/dspy/teleprompt/knn_fewshot.py b/dspy/teleprompt/knn_fewshot.py index fe0d961b8a..ca9405268a 100644 --- a/dspy/teleprompt/knn_fewshot.py +++ b/dspy/teleprompt/knn_fewshot.py @@ -18,7 +18,10 @@ def forward_pass(*args, **kwargs): knn_trainset = self.KNN(**kwargs) few_shot_bootstrap = BootstrapFewShot() compiled_program = few_shot_bootstrap.compile( - student, teacher=teacher, trainset=knn_trainset, valset=valset + student, + teacher=teacher, + trainset=knn_trainset, + valset=valset, ) return compiled_program(**kwargs) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 00f77fbccb..c9e34189fd 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -51,7 +51,7 @@ class BasicGenerateInstruction(Signature): basic_instruction = dspy.InputField(desc="The initial instructions before optimization") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField( - desc="The string at the end of the prompt, which will help the model start solving the task" + desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -62,7 +62,7 @@ class BasicGenerateInstructionWithDataObservations(Signature): observations = dspy.InputField(desc="Observations about the dataset and task") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField( - desc="The string at the end of the prompt, which will help the model start solving the task" + desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -77,7 +77,7 @@ class BasicGenerateInstructionWithExamples(dspy.Signature): examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField( - desc="The string at the end of the prompt, which will help the model start solving the task" + desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -91,7 +91,7 @@ class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): basic_instruction = dspy.InputField(desc="The initial instructions before optimization") proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") proposed_prefix_for_output_field = dspy.OutputField( - desc="The string at the end of the prompt, which will help the model start solving the task" + desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -100,7 +100,7 @@ class ObservationSummarizer(dspy.Signature): observations = dspy.InputField(desc="Observations I have made about my dataset") summary = dspy.OutputField( - desc="Two to Three sentence summary of only the most significant highlights of my observations" + desc="Two to Three sentence summary of only the most significant highlights of my observations", ) @@ -126,7 +126,7 @@ class DatasetDescriptorWithPriorObservations(dspy.Signature): examples = dspy.InputField(desc="Sample data points from the dataset") prior_observations = dspy.InputField(desc="Some prior observations I made about the data") observations = dspy.OutputField( - desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add" + desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add", ) @@ -180,7 +180,8 @@ def _observe_data(self, trainset, max_iterations=10): for b in range(self.view_data_batch_size, len(trainset), self.view_data_batch_size): upper_lim = min(len(trainset), b + self.view_data_batch_size) output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)( - prior_observations=observations, examples=(trainset[b:upper_lim].__repr__()) + prior_observations=observations, + examples=(trainset[b:upper_lim].__repr__()), ) iterations += 1 if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE": @@ -293,15 +294,17 @@ def _generate_first_N_candidates( # noqa: N802 instruct = new_instruct else: instruct.completions.proposed_instruction.extend( - new_instruct.completions.proposed_instruction + new_instruct.completions.proposed_instruction, ) instruct.completions.proposed_prefix_for_output_field.extend( - new_instruct.completions.proposed_prefix_for_output_field + new_instruct.completions.proposed_prefix_for_output_field, ) # Just data elif view_data: instruct = dspy.Predict( - BasicGenerateInstructionWithDataObservations, n=N - 1, temperature=self.init_temperature + BasicGenerateInstructionWithDataObservations, + n=N - 1, + temperature=self.init_temperature, )(basic_instruction=basic_instruction, observations=self.observations) # Just examples elif view_examples: @@ -327,7 +330,7 @@ def _generate_first_N_candidates( # noqa: N802 # Neither else: instruct = dspy.Predict(BasicGenerateInstruction, n=N - 1, temperature=self.init_temperature)( - basic_instruction=basic_instruction + basic_instruction=basic_instruction, ) # Add in our initial prompt as a candidate as well @@ -365,7 +368,7 @@ def compile( estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P estimated_prompt_model_calls = 10 + self.num_candidates * len( - student.predictors() + student.predictors(), ) # num data summary calls + N * P user_message = textwrap.dedent(f"""\ @@ -454,7 +457,12 @@ def compile( # Generate N candidate prompts instruction_candidates, _ = self._generate_first_N_candidates( - module, self.num_candidates, view_data, view_examples, demo_candidates, trainset + module, + self.num_candidates, + view_data, + view_examples, + demo_candidates, + trainset, ) # Reset demo_candidates to None for our optimization if the user asked for no fewshot examples @@ -486,11 +494,13 @@ def objective(trial): # Suggest the index of the instruction candidate to use in our trial instruction_idx = trial.suggest_categorical( - f"{id(p_old)}_predictor_instruction", range(len(p_instruction_candidates)) + f"{id(p_old)}_predictor_instruction", + range(len(p_instruction_candidates)), ) if demo_candidates: demos_idx = trial.suggest_categorical( - f"{id(p_old)}_predictor_demos", range(len(p_demo_candidates)) + f"{id(p_old)}_predictor_demos", + range(len(p_demo_candidates)), ) trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx if demo_candidates: diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index 2b41f8de41..e5c364daae 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -44,7 +44,7 @@ def __init__( track_stats=False, ): print( - "\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m" + "\u001b[31m[WARNING] SignatureOptimizer has been deprecated and replaced with COPRO. SignatureOptimizer will be removed in a future release. \u001b[31m", ) super().__init__(prompt_model, metric, breadth, depth, init_temperature, verbose, track_stats) diff --git a/dspy/teleprompt/signature_opt_bayesian.py b/dspy/teleprompt/signature_opt_bayesian.py index f338644dc2..51ca108227 100644 --- a/dspy/teleprompt/signature_opt_bayesian.py +++ b/dspy/teleprompt/signature_opt_bayesian.py @@ -49,7 +49,7 @@ def __init__( view_data_batch_size=10, ): print( - "\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m" + "\u001b[31m[WARNING] BayesianSignatureOptimizer has been deprecated and replaced with MIPRO. BayesianSignatureOptimizer will be removed in a future release. \u001b[31m", ) super().__init__( diff --git a/dspy/teleprompt/teleprompt_optuna.py b/dspy/teleprompt/teleprompt_optuna.py index 9c5290c91e..ad2f6fe4ad 100644 --- a/dspy/teleprompt/teleprompt_optuna.py +++ b/dspy/teleprompt/teleprompt_optuna.py @@ -39,7 +39,7 @@ def __init__( def objective(self, trial): program2 = self.student.reset_copy() for (name, compiled_predictor), (_, program2_predictor) in zip( - self.compiled_teleprompter.named_predictors(), program2.named_predictors() + self.compiled_teleprompter.named_predictors(), program2.named_predictors(), ): all_demos = compiled_predictor.demos demo_index = trial.suggest_int(f"demo_index_for_{name}", 0, len(all_demos) - 1) @@ -69,7 +69,7 @@ def compile(self, student, *, teacher=None, max_demos, trainset, valset=None): max_rounds=self.max_rounds, ) self.compiled_teleprompter = teleprompter_optimize.compile( - self.student, teacher=self.teacher, trainset=self.trainset + self.student, teacher=self.teacher, trainset=self.trainset, ) study = optuna.create_study(direction="maximize") study.optimize(self.objective, n_trials=self.num_candidate_sets) From e0d545c4653449071459718e826fc7a6040ff0be Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 12:44:59 -0400 Subject: [PATCH 221/243] fix: updated predict tests with both lm and backend functionality --- dsp/utils/settings.py | 1 + dspy/predict/predict.py | 122 ++++++++++++++++++++++++++++------ dspy/primitives/prediction.py | 56 +--------------- tests/predict/test_predict.py | 107 +++++++++++++++++++++++------ 4 files changed, 190 insertions(+), 96 deletions(-) diff --git a/dsp/utils/settings.py b/dsp/utils/settings.py index f357773541..04c42e8892 100644 --- a/dsp/utils/settings.py +++ b/dsp/utils/settings.py @@ -45,6 +45,7 @@ def __new__(cls): suggest_failures=0, langchain_history=[], cache=True, + experimental=False, ) cls._instance.__append(config) diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index 7e0dea96fd..5f3dca7e38 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -3,7 +3,7 @@ import dsp from dspy.predict.parameter import Parameter -from dspy.primitives.prediction import Prediction +from dspy.primitives.prediction import Completions, Prediction from dspy.signatures.signature import ensure_signature, signature_to_template @@ -15,13 +15,20 @@ def __init__(self, signature, **config): self.reset() def reset(self): - self.backend = None + if dsp.settings.get("experimental", False): + self.backend = None + else: + self.lm = None self.traces = [] self.train = [] self.demos = [] def dump_state(self): - state_keys = ["backend", "traces", "train", "demos"] + if dsp.settings.get("experimental", False): + state_keys = ["backend", "traces", "train", "demos"] + else: + state_keys = ["lm", "traces", "train", "demos"] + state = {k: getattr(self, k) for k in state_keys} # Cache the signature instructions and the last field's name. @@ -57,31 +64,102 @@ def forward(self, **kwargs): demos = kwargs.pop("demos", self.demos) config = dict(**self.config, **kwargs.pop("config", {}), **kwargs) - # Get the right Backend to use. - backend = kwargs.pop("backend", self.backend) or dspy.settings.get( - "backend", None - ) - assert backend is not None, "No Backend is configured." - - if not all(k in kwargs for k in signature.input_fields): - present = [k for k in signature.input_fields if k in kwargs] - missing = [k for k in signature.input_fields if k not in kwargs] - print( - f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." + if dsp.settings.get("experimental", False): + # Get the right Backend to use. + backend = kwargs.pop("backend", self.backend) or dspy.settings.get( + "backend", None ) + assert backend is not None, "No Backend is configured." + + if not all(k in kwargs for k in signature.input_fields): + present = [k for k in signature.input_fields if k in kwargs] + missing = [k for k in signature.input_fields if k not in kwargs] + print( + f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." + ) + + completions = backend(signature, demos=demos, **config) + + pred = Prediction.from_completions(completions) + + trace = dspy.settings.get("trace") + if trace is not None and kwargs.pop("_trace", True): + trace.append((self, {**kwargs}, pred)) - completions = backend(signature, demos=demos, **config) + return pred - # TODO: What purpose does stage play here? - # assert self.stage in x, "The generated (input, output) example was not stored" + else: + lm = kwargs.pop("lm", self.lm) or dsp.settings.get("lm", None) + assert lm is not None, "No LM is loaded." + + # If temperature is 0.0 but its n > 1, set temperature to 0.7 + temperature = config.get("temperature") + temperature = ( + lm.kwargs["temperature"] if temperature is None else temperature + ) + + num_generations = config.get("n") + if num_generations is None: + num_generations = lm.kwargs.get( + "n", lm.kwargs.get("num_generations", None) + ) + + if (temperature is None or temperature <= 0.15) and num_generations > 1: + config["temperature"] = 0.7 + + # All of the other kwargs are presumed to fit a prefix of the signature. + # That is, they are input variables for the bottom most generation, so + # we place them inside the input - x - together with the demos. + x = dsp.Example(demos=demos, **kwargs) + + if not all(k in kwargs for k in signature.input_fields): + present = [k for k in signature.input_fields if k in kwargs] + missing = [k for k in signature.input_fields if k not in kwargs] + print( + f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." + ) + + # Switch to legacy format for dsp.generate + template = signature_to_template(signature) + + if self.lm is None: + x, C = dsp.generate(template, **config)(x, stage=self.stage) + else: + # Note: query_only=True means the instructions and examples are not included. + # I'm not really sure why we'd want to do that, but it's there. + with dsp.settings.context(lm=self.lm, query_only=True): + x, C = dsp.generate(template, **config)(x, stage=self.stage) + + assert ( + self.stage in x + ), "The generated (input, output) example was not stored" + + examples = [] + for c in C: + example = dspy.Example() + for name in self.signature.input_fields: + if name in kwargs.keys(): + example[name] = kwargs[name] + + for name in self.signature.output_fields: + example[name] = getattr(c, name) + + examples.append(example) + + completions = Completions.new( + signature=self.signature, + examples=examples, + prompt=template(x), + kwargs=config, + ) - pred = Prediction.from_completions(completions) + pred = Prediction.from_completions(completions) - trace = dspy.settings.get("trace") - if trace is not None and kwargs.pop("_trace", True): - trace.append((self, {**kwargs}, pred)) + if kwargs.pop("_trace", True) and dsp.settings.trace is not None: + trace = dsp.settings.trace + trace.append((self, {**kwargs}, pred)) - return pred + return pred def update_config(self, **kwargs): self.config = {**self.config, **kwargs} diff --git a/dspy/primitives/prediction.py b/dspy/primitives/prediction.py index 2a980a013d..63c1357614 100644 --- a/dspy/primitives/prediction.py +++ b/dspy/primitives/prediction.py @@ -174,6 +174,7 @@ def get_majority( pred._completions.filter(field, majority_class) return pred + @classmethod def from_completions(cls, completions: Completions): obj = cls() @@ -197,58 +198,3 @@ def __str__(self): @property def completions(self): return self._completions - - -class Completions: - def __init__(self, list_or_dict, signature=None): - self.signature = signature - - if isinstance(list_or_dict, list): - kwargs = {} - for arg in list_or_dict: - for k, v in arg.items(): - kwargs.setdefault(k, []).append(v) - else: - kwargs = list_or_dict - - assert all(isinstance(v, list) for v in kwargs.values()), "All values must be lists" - - if kwargs: - length = len(next(iter(kwargs.values()))) - assert all(len(v) == length for v in kwargs.values()), "All lists must have the same length" - - self._completions = kwargs - - def items(self): - return self._completions.items() - - def __getitem__(self, key): - if isinstance(key, int): - if key < 0 or key >= len(self): - raise IndexError("Index out of range") - - return Prediction(**{k: v[key] for k, v in self._completions.items()}) - - return self._completions[key] - - def __getattr__(self, name): - if name in self._completions: - return self._completions[name] - - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") - - def __len__(self): - # Return the length of the list for one of the keys - # It assumes all lists have the same length - return len(next(iter(self._completions.values()))) - - def __contains__(self, key): - return key in self._completions - - def __repr__(self): - items_repr = ",\n ".join(f"{k}={repr(v)}" for k, v in self._completions.items()) - return f"Completions(\n {items_repr}\n)" - - def __str__(self): - # return str(self._completions) - return self.__repr__() diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py index 71c373077e..972657e69f 100644 --- a/tests/predict/test_predict.py +++ b/tests/predict/test_predict.py @@ -1,9 +1,9 @@ +import dsp import dspy from dspy import Predict, Signature from dspy.backends.json import JSONBackend -from dspy.utils.dummies import DummyLanguageModel from dspy.backends import TemplateBackend -from dspy.utils.dummies import DummyLM +from dspy.utils.dummies import DummyLM, DummyLanguageModel import copy import textwrap @@ -11,19 +11,24 @@ def test_initialization_with_string_signature(): signature_string = "input1, input2 -> output" predict = Predict(signature_string) - expected_instruction = "Given the fields `input1`, `input2`, produce the fields `output`." + expected_instruction = ( + "Given the fields `input1`, `input2`, produce the fields `output`." + ) assert predict.signature.instructions == expected_instruction assert predict.signature.instructions == Signature(signature_string).instructions def test_reset_method(): + dsp.settings.get("experimental", False) + predict_instance = Predict("input -> output") - predict_instance.backend = "modified" + predict_instance.lm = "modified" + predict_instance.traces = ["trace"] predict_instance.train = ["train"] predict_instance.demos = ["demo"] predict_instance.reset() - assert predict_instance.backend is None + assert predict_instance.lm is None assert predict_instance.traces == [] assert predict_instance.train == [] assert predict_instance.demos == [] @@ -31,25 +36,46 @@ def test_reset_method(): def test_dump_and_load_state(): predict_instance = Predict("input -> output") - predict_instance.backend = "backend_state" + predict_instance.lm = "lm_state" dumped_state = predict_instance.dump_state() new_instance = Predict("input -> output") new_instance.load_state(dumped_state) - assert new_instance.backend == "backend_state" + assert new_instance.lm == "lm_state" def test_call_method(): predict_instance = Predict("input -> output") + lm = DummyLM(["test output"]) + dspy.settings.configure(lm=lm) + result = predict_instance(input="test input") + assert result.output == "test output" + assert lm.get_convo(-1) == ( + "Given the fields `input`, produce the fields `output`.\n" + "\n---\n\n" + "Follow the following format.\n\n" + "Input: ${input}\n" + "Output: ${output}\n" + "\n---\n\n" + "Input: test input\n" + "Output: test output" + ) + + +def test_call_method_experimental(): + dspy.settings.configure(experimental=True) + predict_instance = Predict("input -> output") lm = DummyLanguageModel(answers=[["test output"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, lm=None) result = predict_instance(input="test input") assert result.output == "test output" def test_dump_load_state(): + dspy.settings.configure(experimental=False) + predict_instance = Predict(Signature("input -> output", "original instructions")) dumped_state = predict_instance.dump_state() new_instance = Predict(Signature("input -> output", "new instructions")) @@ -58,9 +84,20 @@ def test_dump_load_state(): def test_forward_method(): + dspy.settings.configure(experimental=False) + + program = Predict("question -> answer") + dspy.settings.configure(lm=DummyLM([]), backend=None) + result = program(question="What is 1+1?").answer + assert result == "No more responses" + + +def test_forward_method_experimental(): + dspy.settings.configure(experimental=True) + lm = DummyLanguageModel(answers=[["No more responses"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, lm=None) program = Predict("question -> answer") result = program(question="What is 1+1?").answer @@ -68,11 +105,23 @@ def test_forward_method(): def test_forward_method2(): + dspy.settings.configure(experimental=False) + + program = Predict("question -> answer1, answer2") + dspy.settings.configure(lm=DummyLM(["my first answer", "my second answer"])) + result = program(question="What is 1+1?") + assert result.answer1 == "my first answer" + assert result.answer2 == "my second answer" + + +def test_forward_method2_experimental(): + dspy.settings.configure(experimental=True) + lm = DummyLanguageModel( answers=[[" my first answer\n\nAnswer 2: my second answer"]] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, lm=None) program = Predict("question -> answer1, answer2") result = program(question="What is 1+1?") @@ -88,18 +137,34 @@ def test_config_management(): def test_multi_output(): + dspy.settings.configure(experimental=False) + + program = Predict("question -> answer", n=2) + dspy.settings.configure( + lm=DummyLM(["my first answer", "my second answer"]), backend=False + ) + results = program(question="What is 1+1?") + assert results.completions[0].answer == "my first answer" + assert results.completions[1].answer == "my second answer" + + +def test_multi_output_experimental(): + dspy.settings.configure(experimental=True) + program = Predict("question -> answer", n=2) lm = DummyLanguageModel(answers=[["my first answer", "my second answer"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, lm=None) results = program(question="What is 1+1?") assert results.completions[0].answer == "my first answer" assert results.completions[1].answer == "my second answer" -def test_multi_output_json(): +def test_multi_output_json_experimental(): + dspy.settings.configure(experimental=True) + program = Predict("question -> answer", n=2) lm = DummyLanguageModel( @@ -111,13 +176,15 @@ def test_multi_output_json(): ] ) backend = JSONBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, lm=None) results = program(question="What is 1+1?") assert results.completions[1].answer == "my second answer" def test_multi_output2(): + dspy.settings.configure(experimental=False) + program = Predict("question -> answer1, answer2", n=2) dspy.settings.configure( lm=DummyLM( @@ -128,10 +195,10 @@ def test_multi_output2(): ) ) results = program(question="What is 1+1?") - assert results.completions.answer1[0] == "my 0 answer" - assert results.completions.answer1[1] == "my 1 answer" - assert results.completions.answer2[0] == "my 2 answer" - assert results.completions.answer2[1] == "my 3 answer" + assert results.completions[0].answer1 == "my 0 answer" + assert results.completions[1].answer1 == "my 1 answer" + assert results.completions[0].answer2 == "my 2 answer" + assert results.completions[1].answer2 == "my 3 answer" def test_named_predictors(): @@ -158,7 +225,8 @@ class OutputOnlySignature(dspy.Signature): dspy.settings.configure(lm=lm) assert predictor().output == "short answer" - assert lm.get_convo(-1) == textwrap.dedent("""\ + assert lm.get_convo(-1) == textwrap.dedent( + """\ Given the fields , produce the fields `output`. --- @@ -169,4 +237,5 @@ class OutputOnlySignature(dspy.Signature): --- - Output: short answer""") + Output: short answer""" + ) From 5a502de24d50345821ccb2d33738ef106a8ca01a Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 12:48:11 -0400 Subject: [PATCH 222/243] fix: updated test_program for experimental flag --- tests/primitives/test_program.py | 43 ++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/tests/primitives/test_program.py b/tests/primitives/test_program.py index 5daa643dce..461fba6c43 100644 --- a/tests/primitives/test_program.py +++ b/tests/primitives/test_program.py @@ -8,7 +8,6 @@ from dspy.utils import DummyLM - class HopModule(dspy.Module): def __init__(self): super().__init__() @@ -22,7 +21,9 @@ def forward(self, question): def test_module_initialization(): module = Module() - assert module._compiled is False, "Module _compiled attribute should be False upon initialization" + assert ( + module._compiled is False + ), "Module _compiled attribute should be False upon initialization" def test_named_predictors(): @@ -30,20 +31,38 @@ def test_named_predictors(): named_preds = module.named_predictors() assert len(named_preds) == 2, "Should identify correct number of Predict instances" names, preds = zip(*named_preds) - assert "predict1" in names and "predict2" in names, "Named predictors should include 'predict1' and 'predict2'" + assert ( + "predict1" in names and "predict2" in names + ), "Named predictors should include 'predict1' and 'predict2'" def test_predictors(): module = HopModule() preds = module.predictors() assert len(preds) == 2, "Should return correct number of Predict instances" - assert all(isinstance(p, dspy.Predict) for p in preds), "All returned items should be instances of PredictMock" + assert all( + isinstance(p, dspy.Predict) for p in preds + ), "All returned items should be instances of PredictMock" def test_forward(): + dspy.settings.configure(experimental=False) + + program = HopModule() + dspy.settings.configure( + lm=DummyLM({"What is 1+1?": "let me check", "let me check": "2"}) + ) + result = program(question="What is 1+1?").answer + assert result == "2" + + +def test_forward_experimental(): + dspy.settings.configure(experimental=True) + program = HopModule() - lm = DummyLanguageModel(answers=[["let me check"], ["2"]])backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) + lm = DummyLanguageModel(answers=[["let me check"], ["2"]]) + backend = TemplateBackend(lm=lm) + dspy.settings.configure(backend=backend, cache=False, lm=None) result = program(question="What is 1+1?").answer assert result == "2" @@ -78,7 +97,11 @@ def test_multiple_levels(): module = Module() module.sub = Module() module.sub.subsub = Module() - expected = [("self", module), ("self.sub", module.sub), ("self.sub.subsub", module.sub.subsub)] + expected = [ + ("self", module), + ("self.sub", module.sub), + ("self.sub.subsub", module.sub.subsub), + ] assert list(module.named_sub_modules()) == expected @@ -86,7 +109,11 @@ def test_multiple_sub_modules(): module = Module() module.sub1 = Module() module.sub2 = Module() - expected = [("self", module), ("self.sub1", module.sub1), ("self.sub2", module.sub2)] + expected = [ + ("self", module), + ("self.sub1", module.sub1), + ("self.sub2", module.sub2), + ] assert sorted(list(module.named_sub_modules())) == sorted(expected) From f7cacbc98d5d68961f712d79f478060f30a5e55c Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 12:50:19 -0400 Subject: [PATCH 223/243] fix: updated test_chain_of_thought for experimental flag --- tests/predict/test_chain_of_thought.py | 37 +++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tests/predict/test_chain_of_thought.py b/tests/predict/test_chain_of_thought.py index 3b7c61b6ad..acf2912905 100644 --- a/tests/predict/test_chain_of_thought.py +++ b/tests/predict/test_chain_of_thought.py @@ -7,9 +7,44 @@ def test_initialization_with_string_signature(): + dspy.settings.configure(experimental=False) + + lm = DummyLM(["find the number after 1", "2"]) + dspy.settings.configure(lm=lm) + predict = ChainOfThought("question -> answer") + assert list(predict.extended_signature.output_fields.keys()) == [ + "rationale", + "answer", + ] + assert predict(question="What is 1+1?").answer == "2" + + print(lm.get_convo(-1)) + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `question`, produce the fields `answer`. + + --- + + Follow the following format. + + Question: ${question} + Reasoning: Let's think step by step in order to ${produce the answer}. We ... + Answer: ${answer} + + --- + + Question: What is 1+1? + Reasoning: Let's think step by step in order to find the number after 1 + Answer: 2""" + ) + + +def test_initialization_with_string_signature_experimental(): + dspy.settings.configure(experimental=True) + lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, lm=None, cache=False) predict = ChainOfThought("question -> answer") assert list(predict.extended_signature.output_fields.keys()) == [ "rationale", From 5da3780ff9bda51672208edc052f21a117d4bd6d Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 15:06:18 -0400 Subject: [PATCH 224/243] chore: update formatting for functional.py --- dspy/functional/functional.py | 106 ++++++++++++++++++++++++++-------- 1 file changed, 82 insertions(+), 24 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index dab08eb43f..9c23074b70 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -138,17 +138,33 @@ def parse(x): signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") - + (f" (Respond with a single {type_.__name__} value)" if type_ != str else ""), + + ( + f" (Respond with a single {type_.__name__} value)" + if type_ != str + else "" + ), format=lambda x: x if isinstance(x, str) else str(x), parser=type_, ) elif False: # TODO: I don't like forcing the model to write "value" in the output. - if not (inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel)): - type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) - to_json = lambda x, type_=type_: type_(value=x).model_dump_json()[9:-1] # {"value":"123"} - from_json = lambda x, type_=type_: type_.model_validate_json('{"value":' + x + "}").value - schema = json.dumps(type_.model_json_schema()["properties"]["value"]) + if not ( + inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel) + ): + type_ = pydantic.create_model( + "Output", value=(type_, ...), __base__=pydantic.BaseModel + ) + to_json = lambda x, type_=type_: type_( + value=x + ).model_dump_json()[ + 9:-1 + ] # {"value":"123"} + from_json = lambda x, type_=type_: type_.model_validate_json( + '{"value":' + x + "}" + ).value + schema = json.dumps( + type_.model_json_schema()["properties"]["value"] + ) else: to_json = lambda x: x.model_dump_json() from_json = lambda x, type_=type_: type_.model_validate_json(x) @@ -157,26 +173,42 @@ def parse(x): # Anything else we wrap in a pydantic object if not ( inspect.isclass(type_) - and typing.get_origin(type_) not in (list, tuple) # To support Python 3.9 + and typing.get_origin(type_) + not in (list, tuple) # To support Python 3.9 and issubclass(type_, pydantic.BaseModel) ): - type_ = pydantic.create_model("Output", value=(type_, ...), __base__=pydantic.BaseModel) - to_json = lambda x, type_=type_: type_(value=x).model_dump_json() - from_json = lambda x, type_=type_: type_.model_validate_json(x).value + type_ = pydantic.create_model( + "Output", value=(type_, ...), __base__=pydantic.BaseModel + ) + to_json = lambda x, type_=type_: type_( + value=x + ).model_dump_json() + from_json = lambda x, type_=type_: type_.model_validate_json( + x + ).value schema = json.dumps(type_.model_json_schema()) else: to_json = lambda x: x.model_dump_json() from_json = lambda x, type_=type_: type_.model_validate_json(x) schema = json.dumps(type_.model_json_schema()) if self.wrap_json: - to_json = lambda x, inner=to_json: "```json\n" + inner(x) + "\n```\n" + to_json = ( + lambda x, inner=to_json: "```json\n" + inner(x) + "\n```\n" + ) schema = "```json\n" + schema + "\n```" signature = signature.with_updated_fields( name, desc=field.json_schema_extra.get("desc", "") - + (". Respond with a single JSON object. JSON Schema: " + schema), - format=lambda x, to_json=to_json: (x if isinstance(x, str) else to_json(x)), - parser=lambda x, from_json=from_json: from_json(_unwrap_json(x)), + + ( + ". Respond with a single JSON object. JSON Schema: " + + schema + ), + format=lambda x, to_json=to_json: ( + x if isinstance(x, str) else to_json(x) + ), + parser=lambda x, from_json=from_json: from_json( + _unwrap_json(x) + ), type_=type_, ) else: # If input field @@ -187,9 +219,13 @@ def parse(x): # Special formatting for lists of known types. Maybe the output fields sohuld have this too? elif typing.get_origin(type_) in (List, list, Tuple, tuple): (inner_type,) = typing.get_args(type_) - if inspect.isclass(inner_type) and issubclass(inner_type, pydantic.BaseModel): + if inspect.isclass(inner_type) and issubclass( + inner_type, pydantic.BaseModel + ): format_ = ( - lambda x: x if isinstance(x, str) else "[" + ",".join(i.model_dump_json() for i in x) + "]" + lambda x: x + if isinstance(x, str) + else "[" + ",".join(i.model_dump_json() for i in x) + "]" ) else: format_ = lambda x: x if isinstance(x, str) else json.dumps(x) @@ -198,7 +234,11 @@ def parse(x): format_ = lambda x: x if isinstance(x, str) else x.model_dump_json() is_json = True if self.wrap_json and is_json: - format_ = lambda x, inner=format_: x if isinstance(x, str) else "```json\n" + inner(x) + "\n```\n" + format_ = ( + lambda x, inner=format_: x + if isinstance(x, str) + else "```json\n" + inner(x) + "\n```\n" + ) signature = signature.with_updated_fields(name, format=format_) return signature @@ -236,7 +276,12 @@ def forward(self, **kwargs) -> dspy.Prediction: ): signature = signature.with_updated_fields( name, - desc=current_desc + "\n" + prefix + example + "\n" + suffix, + desc=current_desc + + "\n" + + prefix + + example + + "\n" + + suffix, ) # No reason trying to parse the general signature, or run more completions, if we already have errors if errors: @@ -255,7 +300,9 @@ def forward(self, **kwargs) -> dspy.Prediction: if name == "general": error_prefix = "General:" else: - error_prefix = signature.output_fields[name].json_schema_extra["prefix"] + error_prefix = signature.output_fields[name].json_schema_extra[ + "prefix" + ] number = "" if try_i == 0 else f" ({try_i+1})" signature = signature.append( f"error_{name}_{try_i}", @@ -267,10 +314,14 @@ def forward(self, **kwargs) -> dspy.Prediction: else: # If there are no errors, we return the parsed results return Prediction.from_completions( - {key: [r[key] for r in parsed_results] for key in signature.output_fields}, + { + key: [r[key] for r in parsed_results] + for key in signature.output_fields + }, ) raise ValueError( - "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", + "Too many retries trying to get the correct output format. " + + "Try simplifying the requirements.", errors, ) @@ -301,7 +352,10 @@ def _func_to_signature(func): annotation = annotations.get(param.name, str) kwargs = {} if typing.get_origin(annotation) is Annotated: - desc = next((arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None) + desc = next( + (arg for arg in typing.get_args(annotation) if isinstance(arg, str)), + None, + ) if desc is not None: kwargs["desc"] = desc fields[param.name] = (annotation, dspy.InputField(**kwargs)) @@ -310,7 +364,9 @@ def _func_to_signature(func): kwargs = {} annotation = annotations.get("return", str) if typing.get_origin(annotation) is Annotated: - desc = next((arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None) + desc = next( + (arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None + ) if desc is not None: kwargs["desc"] = desc fields[output_key] = (annotation, dspy.OutputField(**kwargs)) @@ -328,4 +384,6 @@ def _unwrap_json(output): output = output[7:-3].strip() if not output.startswith("{") or not output.endswith("}"): raise ValueError("json output should start and end with { and }") - return ujson.dumps(ujson.loads(output)) # ujson is a bit more robust than the standard json + return ujson.dumps( + ujson.loads(output) + ) # ujson is a bit more robust than the standard json From dab963d8d74c13bb6a40dd8f58303572a3062eef Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 18:48:43 -0400 Subject: [PATCH 225/243] fix: update backend arguments to ensure, that parameter clashes between lm and input arguments are accomodated for --- dspy/backends/base.py | 5 ++++- dspy/backends/json.py | 3 ++- dspy/backends/template.py | 5 +++-- dspy/predict/predict.py | 18 ++++++++---------- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/dspy/backends/base.py b/dspy/backends/base.py index c05dbc8f6f..4ec35f3e94 100644 --- a/dspy/backends/base.py +++ b/dspy/backends/base.py @@ -1,3 +1,4 @@ +import typing as t from abc import ABC, abstractmethod from pydantic import BaseModel, Field @@ -15,6 +16,7 @@ class BaseBackend(BaseModel, ABC): def __call__( self, signature: Signature, + config: dict[str, t.Any] = {}, attempts: int = 1, **kwargs, ) -> Completions: @@ -30,7 +32,7 @@ def __call__( while i < attempts: # Returns a List of Completions # which may or may not be complete - completions = self.generate(signature, **kwargs) + completions = self.generate(signature=signature, config=config, **kwargs) # If 1 or more complete generations exist, simple return all complete if completions.has_complete_example(): @@ -66,6 +68,7 @@ def __call__( def generate( self, signature: Signature, + config: dict[str, t.Any], **kwargs, ) -> Completions: """Generates `n` predictions (complete/partial) for the signature output.""" diff --git a/dspy/backends/json.py b/dspy/backends/json.py index c1b64bbf24..c80bdf2328 100644 --- a/dspy/backends/json.py +++ b/dspy/backends/json.py @@ -24,6 +24,7 @@ class JSONBackend(BaseBackend): def generate( self, signature: Signature, + config: dict[str, t.Any] = {}, demos: t.List[str] = [], **kwargs, ) -> Completions: @@ -42,7 +43,7 @@ def generate( pred = self.lm( template(example, is_json=True), response_format={"type": "json_object"}, - **kwargs, + **config, ) extracted = [ json.loads(prediction["message"]["content"]) diff --git a/dspy/backends/template.py b/dspy/backends/template.py index 049ff39545..e021cee70c 100644 --- a/dspy/backends/template.py +++ b/dspy/backends/template.py @@ -17,7 +17,8 @@ class TemplateBackend(BaseBackend): def generate( self, signature: Signature, - demos: t.List[str] = [], + demos: list[str] = [], + config: dict[str, t.Any] = {}, **kwargs, ) -> Completions: """Wrap the signature and demos into an example, and pass through the Language Model, returning Signature compliant output""" @@ -39,7 +40,7 @@ def generate( for input in signature.input_fields: del kwargs[input] - pred = self.lm(template(example), **kwargs) + pred = self.lm(template(example), **config) # This returns a list of Examples extracted_examples = [ diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index 5f3dca7e38..35db8cd817 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -15,16 +15,17 @@ def __init__(self, signature, **config): self.reset() def reset(self): - if dsp.settings.get("experimental", False): + if dspy.settings.get("backend", None) is not None: self.backend = None else: self.lm = None + self.traces = [] self.train = [] self.demos = [] def dump_state(self): - if dsp.settings.get("experimental", False): + if dspy.settings.get("backend", None) is not None: state_keys = ["backend", "traces", "train", "demos"] else: state_keys = ["lm", "traces", "train", "demos"] @@ -62,15 +63,12 @@ def forward(self, **kwargs): # Extract the three privileged keyword arguments. signature = kwargs.pop("new_signature", kwargs.pop("signature", self.signature)) demos = kwargs.pop("demos", self.demos) - config = dict(**self.config, **kwargs.pop("config", {}), **kwargs) + config = dict(**self.config, **kwargs.pop("config", {})) - if dsp.settings.get("experimental", False): - # Get the right Backend to use. - backend = kwargs.pop("backend", self.backend) or dspy.settings.get( - "backend", None - ) - assert backend is not None, "No Backend is configured." + # Check if we should use backend + backend = dspy.settings.get("backend", None) + if backend is not None: if not all(k in kwargs for k in signature.input_fields): present = [k for k in signature.input_fields if k in kwargs] missing = [k for k in signature.input_fields if k not in kwargs] @@ -78,7 +76,7 @@ def forward(self, **kwargs): f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." ) - completions = backend(signature, demos=demos, **config) + completions = backend(signature, demos=demos, config=config, **kwargs) pred = Prediction.from_completions(completions) From 740c3823c0f97113e67d01fb0b750b4bae2cb9ca Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 18:50:18 -0400 Subject: [PATCH 226/243] fix: update functional for completion/prediction changes --- dspy/functional/functional.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 9c23074b70..2fc3ada9a8 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -8,8 +8,8 @@ import dspy from dsp.templates import passages2text -from dspy.primitives.prediction import Prediction from dspy.signatures.signature import ensure_signature, make_signature +from dspy.primitives.prediction import Completions, Prediction def predictor(func) -> dspy.Module: @@ -293,6 +293,7 @@ def forward(self, **kwargs) -> dspy.Prediction: parsed_results.append(parsed) except pydantic.ValidationError as e: errors["general"] = _format_error(e) + if errors: # Add new fields for each error for name, error in errors.items(): @@ -313,12 +314,21 @@ def forward(self, **kwargs) -> dspy.Prediction: ) else: # If there are no errors, we return the parsed results - return Prediction.from_completions( - { - key: [r[key] for r in parsed_results] - for key in signature.output_fields - }, + examples = [] + for r in parsed_results: + example = dspy.Example() + for key in signature.output_fields: + example[key] = r[key] + + examples.append(example) + + completions = Completions.new( + signature=signature, examples=examples, prompt="unknown", kwargs={} ) + + pred = Prediction.from_completions(completions) + return pred + raise ValueError( "Too many retries trying to get the correct output format. " + "Try simplifying the requirements.", From 32324218440315870c63d9c7c53342cda9157784 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 18:59:34 -0400 Subject: [PATCH 227/243] chore: update formatting for copro optimizer --- dspy/teleprompt/copro_optimizer.py | 88 +++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 21 deletions(-) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index a1a2062c09..e328dd9005 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -36,8 +36,12 @@ class BasicGenerateInstruction(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + basic_instruction = dspy.InputField( + desc="The initial instructions before optimization" + ) + proposed_instruction = dspy.OutputField( + desc="The improved instructions for the language model" + ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -46,10 +50,13 @@ class BasicGenerateInstruction(Signature): class GenerateInstructionGivenAttempts(dspy.Signature): """You are an instruction optimizer for large language models. I will give some task instructions I've tried, along with their corresponding validation scores. The instructions are arranged in increasing order based on their scores, where higher scores indicate better quality. - Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative.""" + Your task is to propose a new instruction that will lead a good language model to perform the task even better. Don't be afraid to be creative. + """ attempted_instructions = dspy.InputField(format=dsp.passages2text) - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_instruction = dspy.OutputField( + desc="The improved instructions for the language model" + ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -77,8 +84,13 @@ def __init__( self.track_stats = track_stats def _check_candidates_equal(self, candidate1, candidate2): - for p1, p2 in zip(candidate1["program"].predictors(), candidate2["program"].predictors()): - if self._get_signature(p1).instructions != self._get_signature(p2).instructions: + for p1, p2 in zip( + candidate1["program"].predictors(), candidate2["program"].predictors() + ): + if ( + self._get_signature(p1).instructions + != self._get_signature(p2).instructions + ): return False *_, p1_last_field = self._get_signature(p1).fields.values() *_, p2_last_field = self._get_signature(p2).fields.values() @@ -110,7 +122,9 @@ def _print_signature(self, predictor): if self.verbose: signature = self._get_signature(predictor) print(f"i: {signature.instructions}") - print(f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}") + print( + f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}" + ) print() def _get_signature(self, predictor): @@ -131,10 +145,12 @@ def compile(self, student, *, trainset, eval_kwargs): evaluate = Evaluate(devset=trainset, metric=self.metric, **eval_kwargs) total_calls = 0 results_best = { - id(p): {"depth": [], "max": [], "average": [], "min": [], "std": []} for p in module.predictors() + id(p): {"depth": [], "max": [], "average": [], "min": [], "std": []} + for p in module.predictors() } results_latest = { - id(p): {"depth": [], "max": [], "average": [], "min": [], "std": []} for p in module.predictors() + id(p): {"depth": [], "max": [], "average": [], "min": [], "std": []} + for p in module.predictors() } if self.track_stats: @@ -149,7 +165,11 @@ def compile(self, student, *, trainset, eval_kwargs): basic_prefix = None *_, last_key = self._get_signature(predictor).fields.keys() basic_instruction = self._get_signature(predictor).instructions - basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra["prefix"] + basic_prefix = ( + self._get_signature(predictor) + .fields[last_key] + .json_schema_extra["prefix"] + ) if self.prompt_model: with dspy.settings.context(lm=self.prompt_model): instruct = dspy.Predict( @@ -186,8 +206,12 @@ def compile(self, student, *, trainset, eval_kwargs): latest_scores = [] # Go through our module's predictors - for p_i, (p_old, p_new) in enumerate(zip(module.predictors(), module_clone.predictors())): - candidates_ = latest_candidates[id(p_old)] # Use the most recently generated candidates for evaluation + for p_i, (p_old, p_new) in enumerate( + zip(module.predictors(), module_clone.predictors()) + ): + candidates_ = latest_candidates[ + id(p_old) + ] # Use the most recently generated candidates for evaluation if len(module.predictors()) > 1: candidates_ = all_candidates[ id(p_old) @@ -222,7 +246,9 @@ def compile(self, student, *, trainset, eval_kwargs): ) score = evaluate(module_clone, devset=trainset, **eval_kwargs) if self.verbose and self.prompt_model: - print(f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}") + print( + f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}" + ) total_calls += 1 if self.verbose: print("----------------") @@ -233,7 +259,12 @@ def compile(self, student, *, trainset, eval_kwargs): # if verbose: print(f"evaluated_candidates[id(p_old)] {evaluated_candidates[id(p_old)]}") if (instruction, prefix) in evaluated_candidates[id(p_old)]: # if verbose: print(f"if evaluated_candidates[id(p_old)][(instruction, prefix)] {evaluated_candidates[id(p_old)][(instruction, prefix)]}") - if evaluated_candidates[id(p_old)][(instruction, prefix)]["score"] >= score: + if ( + evaluated_candidates[id(p_old)][(instruction, prefix)][ + "score" + ] + >= score + ): replace_entry = False if replace_entry: @@ -252,13 +283,18 @@ def compile(self, student, *, trainset, eval_kwargs): if self.track_stats: results_latest[id(p_old)]["depth"].append(d) results_latest[id(p_old)]["max"].append(max(latest_scores)) - results_latest[id(p_old)]["average"].append(sum(latest_scores) / len(latest_scores)) + results_latest[id(p_old)]["average"].append( + sum(latest_scores) / len(latest_scores) + ) results_latest[id(p_old)]["min"].append(min(latest_scores)) results_latest[id(p_old)]["std"].append(np.std(latest_scores)) # Now that we've evaluated the candidates, set this predictor to the best performing version # to ensure the next round of scores reflect the best possible version - best_candidate = max(evaluated_candidates[id(p_old)].values(), key=lambda candidate: candidate["score"]) + best_candidate = max( + evaluated_candidates[id(p_old)].values(), + key=lambda candidate: candidate["score"], + ) *_, last_key = self._get_signature(p_old).fields.keys() updated_signature = ( self._get_signature(p_new) @@ -295,15 +331,23 @@ def compile(self, student, *, trainset, eval_kwargs): scores = [x["score"] for x in best_predictors][:10] results_best[id(p_base)]["depth"].append(d) results_best[id(p_base)]["max"].append(max(scores)) - results_best[id(p_base)]["average"].append(sum(scores) / len(scores)) + results_best[id(p_base)]["average"].append( + sum(scores) / len(scores) + ) results_best[id(p_base)]["min"].append(min(scores)) results_best[id(p_base)]["std"].append(np.std(scores)) for i in range(shortest_len - 1, -1, -1): # breakpoint() - attempts.append(f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}') - attempts.append(f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}') - attempts.append(f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}') + attempts.append( + f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}' + ) + attempts.append( + f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}' + ) + attempts.append( + f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}' + ) # Generate next batch of potential prompts to optimize, with previous attempts as input if self.prompt_model: @@ -324,7 +368,9 @@ def compile(self, student, *, trainset, eval_kwargs): print(f"{self.prompt_model.inspect_history(n=1)}") # Get candidates for each predictor new_candidates[id(p_base)] = instr.completions - all_candidates[id(p_base)].proposed_instruction.extend(instr.completions.proposed_instruction) + all_candidates[id(p_base)].proposed_instruction.extend( + instr.completions.proposed_instruction + ) all_candidates[id(p_base)].proposed_prefix_for_output_field.extend( instr.completions.proposed_prefix_for_output_field, ) From d787bfd3a5239b5bd2d82aa4176d308537646ad4 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 19:42:34 -0400 Subject: [PATCH 228/243] fix: aligned copro_optimizer with new prediction and completions api --- dspy/teleprompt/copro_optimizer.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index e328dd9005..a440c8bb1d 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -183,9 +183,6 @@ def compile(self, student, *, trainset, eval_kwargs): n=self.breadth - 1, temperature=self.init_temperature, )(basic_instruction=basic_instruction) - # Add in our initial prompt as a candidate as well - instruct.completions.proposed_instruction.append(basic_instruction) - instruct.completions.proposed_prefix_for_output_field.append(basic_prefix) candidates[id(predictor)] = instruct.completions evaluated_candidates[id(predictor)] = {} From 370474075953c00ed5bd679ff5a0f5e61a969ff0 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 20:04:01 -0400 Subject: [PATCH 229/243] chore: format mipro_optimizer --- dspy/teleprompt/mipro_optimizer.py | 186 ++++++++++++++++++++++------- 1 file changed, 141 insertions(+), 45 deletions(-) diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index c9e34189fd..dd399df2fb 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -48,8 +48,12 @@ class BasicGenerateInstruction(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + basic_instruction = dspy.InputField( + desc="The initial instructions before optimization" + ) + proposed_instruction = dspy.OutputField( + desc="The improved instructions for the language model" + ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -58,9 +62,13 @@ class BasicGenerateInstruction(Signature): class BasicGenerateInstructionWithDataObservations(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. I will also give you some ``observations`` I have made about the dataset and task. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + basic_instruction = dspy.InputField( + desc="The initial instructions before optimization" + ) observations = dspy.InputField(desc="Observations about the dataset and task") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_instruction = dspy.OutputField( + desc="The improved instructions for the language model" + ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -69,13 +77,18 @@ class BasicGenerateInstructionWithDataObservations(Signature): class BasicGenerateInstructionWithExamples(dspy.Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will also provide you with the current ``basic instruction`` that is being used for this task. I will also provide you with some ``examples`` of the expected inputs and outputs. - Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative. + """ # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") + basic_instruction = dspy.InputField( + desc="The initial instructions before optimization" + ) # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + proposed_instruction = dspy.OutputField( + desc="The improved instructions for the language model" + ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -84,12 +97,17 @@ class BasicGenerateInstructionWithExamples(dspy.Signature): class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Specifically, I will give you some ``observations`` I have made about the dataset and task, along with some ``examples`` of the expected inputs and outputs. I will also provide you with the current ``basic instruction`` that is being used for this task. - Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative.""" + Your task is to propose a new improved instruction and prefix for the output field that will lead a good language model to perform the task well. Don't be afraid to be creative. + """ observations = dspy.InputField(desc="Observations about the dataset and task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") - basic_instruction = dspy.InputField(desc="The initial instructions before optimization") - proposed_instruction = dspy.OutputField(desc="The improved instructions for the language model") + basic_instruction = dspy.InputField( + desc="The initial instructions before optimization" + ) + proposed_instruction = dspy.OutputField( + desc="The improved instructions for the language model" + ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", ) @@ -112,7 +130,9 @@ class DatasetDescriptor(dspy.Signature): ) examples = dspy.InputField(desc="Sample data points from the dataset") - observations = dspy.OutputField(desc="Somethings that holds true for most or all of the data you observed") + observations = dspy.OutputField( + desc="Somethings that holds true for most or all of the data you observed" + ) class DatasetDescriptorWithPriorObservations(dspy.Signature): @@ -124,7 +144,9 @@ class DatasetDescriptorWithPriorObservations(dspy.Signature): ) examples = dspy.InputField(desc="Sample data points from the dataset") - prior_observations = dspy.InputField(desc="Some prior observations I made about the data") + prior_observations = dspy.InputField( + desc="Some prior observations I made about the data" + ) observations = dspy.OutputField( desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add", ) @@ -146,7 +168,9 @@ def __init__( self.num_candidates = num_candidates self.metric = metric self.init_temperature = init_temperature - self.prompt_model = prompt_model if prompt_model is not None else dspy.settings.lm + self.prompt_model = ( + prompt_model if prompt_model is not None else dspy.settings.lm + ) self.task_model = task_model if task_model is not None else dspy.settings.lm self.verbose = verbose self.track_stats = track_stats @@ -172,19 +196,28 @@ def _print_model_history(self, model, n=1): def _observe_data(self, trainset, max_iterations=10): upper_lim = min(len(trainset), self.view_data_batch_size) - observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)(examples=(trainset[0:upper_lim].__repr__())) + observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)( + examples=(trainset[0:upper_lim].__repr__()) + ) observations = observation["observations"] skips = 0 iterations = 0 - for b in range(self.view_data_batch_size, len(trainset), self.view_data_batch_size): + for b in range( + self.view_data_batch_size, len(trainset), self.view_data_batch_size + ): upper_lim = min(len(trainset), b + self.view_data_batch_size) - output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)( + output = dspy.Predict( + DatasetDescriptorWithPriorObservations, n=1, temperature=1.0 + )( prior_observations=observations, examples=(trainset[b:upper_lim].__repr__()), ) iterations += 1 - if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE": + if ( + len(output["observations"]) >= 8 + and output["observations"][:8].upper() == "COMPLETE" + ): skips += 1 if skips >= 5: break @@ -193,7 +226,9 @@ def _observe_data(self, trainset, max_iterations=10): break observations += output["observations"] - summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations) + summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)( + observations=observations + ) return summary.summary @@ -244,23 +279,37 @@ def _generate_first_N_candidates( # noqa: N802 # Create data observations self.observations = None with dspy.settings.context(lm=self.prompt_model): - self.observations = self._observe_data(devset).replace("Observations:", "").replace("Summary:", "") + self.observations = ( + self._observe_data(devset) + .replace("Observations:", "") + .replace("Summary:", "") + ) if view_examples: example_sets = {} for predictor in module.predictors(): # Get all augmented examples example_set = {} - all_sets_of_examples = demo_candidates[id(predictor)] # Get all generated sets of examples + all_sets_of_examples = demo_candidates[ + id(predictor) + ] # Get all generated sets of examples for example_set_i, set_of_examples in enumerate(all_sets_of_examples): if example_set_i != 0: # Skip the no examples case - for example in set_of_examples: # Get each individual example in the set + for ( + example + ) in set_of_examples: # Get each individual example in the set if "augmented" in example and example["augmented"]: if example_set_i not in example_set: example_set[example_set_i] = [] - fields_to_use = signature_to_template(predictor.signature).fields - _input_variable_names = list(self._get_signature(predictor).input_fields.keys()) - example_string = self._create_example_string(fields_to_use, example) + fields_to_use = signature_to_template( + predictor.signature + ).fields + _input_variable_names = list( + self._get_signature(predictor).input_fields.keys() + ) + example_string = self._create_example_string( + fields_to_use, example + ) example_set[example_set_i].append(example_string) example_sets[id(predictor)] = example_set else: @@ -305,11 +354,16 @@ def _generate_first_N_candidates( # noqa: N802 BasicGenerateInstructionWithDataObservations, n=N - 1, temperature=self.init_temperature, - )(basic_instruction=basic_instruction, observations=self.observations) + )( + basic_instruction=basic_instruction, + observations=self.observations, + ) # Just examples elif view_examples: instruct = None - for i in range(1, self.num_candidates): # Note: skip over the first example set which is empty + for i in range( + 1, self.num_candidates + ): # Note: skip over the first example set which is empty new_instruct = dspy.Predict( BasicGenerateInstructionWithExamples, n=1, @@ -329,13 +383,19 @@ def _generate_first_N_candidates( # noqa: N802 ) # Neither else: - instruct = dspy.Predict(BasicGenerateInstruction, n=N - 1, temperature=self.init_temperature)( + instruct = dspy.Predict( + BasicGenerateInstruction, + n=N - 1, + temperature=self.init_temperature, + )( basic_instruction=basic_instruction, ) # Add in our initial prompt as a candidate as well instruct.completions.proposed_instruction.insert(0, basic_instruction) - instruct.completions.proposed_prefix_for_output_field.insert(0, basic_prefix) + instruct.completions.proposed_prefix_for_output_field.insert( + 0, basic_prefix + ) candidates[id(predictor)] = instruct.completions evaluated_candidates[id(predictor)] = {} @@ -366,12 +426,15 @@ def compile( random.seed(seed) - estimated_task_model_calls_wo_module_calls = len(trainset) * num_trials # M * T * P + estimated_task_model_calls_wo_module_calls = ( + len(trainset) * num_trials + ) # M * T * P estimated_prompt_model_calls = 10 + self.num_candidates * len( student.predictors(), ) # num data summary calls + N * P - user_message = textwrap.dedent(f"""\ + user_message = textwrap.dedent( + f"""\ {YELLOW}{BOLD}WARNING: Projected Language Model (LM) Calls{ENDC} Please be advised that based on the parameters you have set, the maximum number of LM calls is projected as follows: @@ -388,15 +451,18 @@ def compile( and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} - {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""") + {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""" + ) - user_confirmation_message = textwrap.dedent(f"""\ + user_confirmation_message = textwrap.dedent( + f"""\ To proceed with the execution of this program, please confirm by typing {BLUE}'y'{ENDC} for yes or {BLUE}'n'{ENDC} for no. If you would like to bypass this confirmation step in future executions, set the {YELLOW}`requires_permission_to_run`{ENDC} flag to {YELLOW}`False`.{ENDC} {YELLOW}Awaiting your input...{ENDC} - """) + """ + ) print(user_message) @@ -420,7 +486,9 @@ def compile( max_bootstrapped_demos == 0 and max_labeled_demos == 0 ): # TODO: address case when max_bootstrapped alone is 0 max_bootstrapped_demos_for_candidate_gen = 1 - max_labeled_demos_for_candidate_gen = 1 # TODO: this might only need to be 0 + max_labeled_demos_for_candidate_gen = ( + 1 # TODO: this might only need to be 0 + ) else: max_bootstrapped_demos_for_candidate_gen = max_bootstrapped_demos max_labeled_demos_for_candidate_gen = max_labeled_demos @@ -447,10 +515,14 @@ def compile( max_labeled_demos=max_labeled_demos_for_candidate_gen, teacher_settings=self.teacher_settings, ) - candidate_program = tp.compile(student=module.deepcopy(), trainset=shuffled_trainset) + candidate_program = tp.compile( + student=module.deepcopy(), trainset=shuffled_trainset + ) # Store the candidate demos - for module_p, candidate_p in zip(module.predictors(), candidate_program.predictors()): + for module_p, candidate_p in zip( + module.predictors(), candidate_program.predictors() + ): if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] demo_candidates[id(module_p)].append(candidate_p.demos) @@ -477,7 +549,13 @@ def compile( trial_logs = {} # Define our trial objective - def create_objective(baseline_program, instruction_candidates, demo_candidates, evaluate, trainset): + def create_objective( + baseline_program, + instruction_candidates, + demo_candidates, + evaluate, + trainset, + ): def objective(trial): nonlocal best_program, best_score, trial_num, trial_logs # Allow access to the outer variables candidate_program = baseline_program.deepcopy() @@ -486,7 +564,9 @@ def objective(trial): print(f"Starting trial #{trial_num}") trial_logs[trial_num] = {} - for p_old, p_new in zip(baseline_program.predictors(), candidate_program.predictors()): + for p_old, p_new in zip( + baseline_program.predictors(), candidate_program.predictors() + ): # Get instruction candidates for our given predictor p_instruction_candidates = instruction_candidates[id(p_old)] if demo_candidates: @@ -502,14 +582,24 @@ def objective(trial): f"{id(p_old)}_predictor_demos", range(len(p_demo_candidates)), ) - trial_logs[trial_num][f"{id(p_old)}_predictor_instruction"] = instruction_idx + trial_logs[trial_num][ + f"{id(p_old)}_predictor_instruction" + ] = instruction_idx if demo_candidates: - trial_logs[trial_num][f"{id(p_old)}_predictor_demos"] = demos_idx + trial_logs[trial_num][ + f"{id(p_old)}_predictor_demos" + ] = demos_idx # Get the selected instruction candidate selected_candidate = p_instruction_candidates[instruction_idx] - selected_instruction = selected_candidate.proposed_instruction.strip('"').strip() - selected_prefix = selected_candidate.proposed_prefix_for_output_field.strip('"').strip() + selected_instruction = ( + selected_candidate.proposed_instruction.strip('"').strip() + ) + selected_prefix = ( + selected_candidate.proposed_prefix_for_output_field.strip( + '"' + ).strip() + ) # Use this candidates in our program *_, last_field = self._get_signature(p_new).fields.keys() @@ -543,12 +633,16 @@ def objective(trial): start_index = i * batch_size end_index = min((i + 1) * batch_size, len(trainset)) split_trainset = trainset[start_index:end_index] - split_score = evaluate(candidate_program, devset=split_trainset, display_table=0) + split_score = evaluate( + candidate_program, devset=split_trainset, display_table=0 + ) if self.verbose: print(f"{i}st split score: {split_score}") total_score += split_score * len(split_trainset) - curr_weighted_avg_score = total_score / min((i + 1) * 100, len(trainset)) + curr_weighted_avg_score = total_score / min( + (i + 1) * 100, len(trainset) + ) if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") @@ -583,7 +677,9 @@ def objective(trial): return objective # Run the trial - objective_function = create_objective(module, instruction_candidates, demo_candidates, evaluate, trainset) + objective_function = create_objective( + module, instruction_candidates, demo_candidates, evaluate, trainset + ) sampler = optuna.samplers.TPESampler(seed=seed) study = optuna.create_study(direction="maximize", sampler=sampler) _score = study.optimize(objective_function, n_trials=num_trials) From 0c2541186d94f90b876e52dfb7cb3c93925ba671 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 20:39:04 -0400 Subject: [PATCH 230/243] fix: passing all tests --- dspy/primitives/template.py | 3 + dspy/teleprompt/copro_optimizer.py | 7 +- dspy/teleprompt/mipro_optimizer.py | 15 +- dspy/utils/__init__.py | 1 + dspy/utils/dummies.py | 7 +- dspy/utils/testing.py | 11 + tests/functional/test_functional.py | 387 ++++++++++++++++-- tests/predict/test_chain_of_thought.py | 5 +- .../test_chain_of_thought_with_hint.py | 48 ++- tests/predict/test_multi_chain_comparison.py | 40 +- tests/predict/test_predict.py | 48 +-- tests/predict/test_program_of_thought.py | 116 +----- tests/primitives/test_program.py | 2 + tests/signatures/test_signature.py | 30 +- tests/teleprompt/test_copro_optimizer.py | 101 +++-- tests/teleprompt/test_mipro_optimizer.py | 110 ++--- 16 files changed, 616 insertions(+), 315 deletions(-) create mode 100644 dspy/utils/testing.py diff --git a/dspy/primitives/template.py b/dspy/primitives/template.py index afa8f61dcb..3801cd54be 100644 --- a/dspy/primitives/template.py +++ b/dspy/primitives/template.py @@ -122,6 +122,8 @@ def extract(self, example: Example, raw_pred: str) -> Example: if not full_text.endswith("\n\n---"): full_text = full_text + "\n\n---" + print(full_text) + # Generate Search Strings prefixes = ( [ @@ -141,6 +143,7 @@ def extract(self, example: Example, raw_pred: str) -> Example: for prefix in prefixes ] for idx, (name, field) in enumerate(self.signature.output_fields.items()): + print(f"NAME: {name}") stop_prefixes = "|".join(prefixes[idx:]) target_prefix = ( diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index a440c8bb1d..eceac84ce5 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -365,11 +365,8 @@ def compile(self, student, *, trainset, eval_kwargs): print(f"{self.prompt_model.inspect_history(n=1)}") # Get candidates for each predictor new_candidates[id(p_base)] = instr.completions - all_candidates[id(p_base)].proposed_instruction.extend( - instr.completions.proposed_instruction - ) - all_candidates[id(p_base)].proposed_prefix_for_output_field.extend( - instr.completions.proposed_prefix_for_output_field, + all_candidates[id(p_base)].completions.extend_examples( + instr.completions ) if self.verbose and self.prompt_model: diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index dd399df2fb..808c9f7103 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -342,11 +342,8 @@ def _generate_first_N_candidates( # noqa: N802 if not instruct: instruct = new_instruct else: - instruct.completions.proposed_instruction.extend( - new_instruct.completions.proposed_instruction, - ) - instruct.completions.proposed_prefix_for_output_field.extend( - new_instruct.completions.proposed_prefix_for_output_field, + instruct.completions.extend_examples( + new_instruct.completions.examples ) # Just data elif view_data: @@ -392,10 +389,12 @@ def _generate_first_N_candidates( # noqa: N802 ) # Add in our initial prompt as a candidate as well - instruct.completions.proposed_instruction.insert(0, basic_instruction) - instruct.completions.proposed_prefix_for_output_field.insert( - 0, basic_prefix + new_example = dspy.Example( + basic_instruction=basic_instruction, + proposed_instruction=basic_instruction, + proposed_prefix_for_output_field=basic_prefix, ) + instruct.completions.add_example(new_example, 0) candidates[id(predictor)] = instruct.completions evaluated_candidates[id(predictor)] = {} diff --git a/dspy/utils/__init__.py b/dspy/utils/__init__.py index 9f8b201f6b..4460f50907 100644 --- a/dspy/utils/__init__.py +++ b/dspy/utils/__init__.py @@ -1 +1,2 @@ from .dummies import * +from .testing import * diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index 17854d3e56..0d5e232ef0 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -104,7 +104,11 @@ def __call__(self, prompt, _only_completed=True, _return_sorted=False, **kwargs) def get_convo(self, index) -> str: """Get the prompt + anwer from the ith message.""" - return self.history[index]["prompt"] + " " + self.history[index]["response"]["choices"][0]["text"] + return ( + self.history[index]["prompt"] + + " " + + self.history[index]["response"]["choices"][0]["text"] + ) def dummy_rm(passages=()) -> callable: @@ -131,6 +135,7 @@ def inner(query: str, *, k: int, **kwargs): class DummyVectorizer: """Simple vectorizer based on n-grams.""" + def __init__(self, max_length=100, n_gram=2): self.max_length = max_length self.n_gram = n_gram diff --git a/dspy/utils/testing.py b/dspy/utils/testing.py new file mode 100644 index 0000000000..eb642dfb74 --- /dev/null +++ b/dspy/utils/testing.py @@ -0,0 +1,11 @@ +import dspy +import decorator + + +def clean_up_lm_test(func): + def wrapper(func, *args, **kwargs): + dspy.settings.configure(lm=None, backend=None, cache=False) + func(*args, **kwargs) + dspy.settings.configure(lm=None, backend=None, cache=False) + + return decorator.decorator(wrapper, func) diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index a7d5fda668..6c2e0d7c6c 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -9,19 +9,42 @@ import dspy from dspy.backends.template import TemplateBackend -from dspy.functional import predictor, cot, FunctionalModule, TypedPredictor, TypedChainOfThought +from dspy.functional import ( + predictor, + cot, + FunctionalModule, + TypedPredictor, + TypedChainOfThought, +) from dspy.predict.predict import Predict from dspy.primitives.example import Example from dspy.teleprompt.bootstrap import BootstrapFewShot from dspy.teleprompt.vanilla import LabeledFewShot -from dspy.utils.dummies import DummyLanguageModel +from dspy.utils import DummyLanguageModel, DummyLM, clean_up_lm_test +@clean_up_lm_test def test_simple(): @predictor def hard_question(topic: str) -> str: """Think of a hard factual question about a topic.""" + expected = "What is the speed of light?" + lm = DummyLM([expected]) + dspy.settings.configure(lm=lm) + + question = hard_question(topic="Physics") + lm.inspect_history(n=2) + + assert question == expected + + +@clean_up_lm_test +def test_simple_with_backend(): + @predictor + def hard_question(topic: str) -> str: + """Think of a hard factual question about a topic.""" + lm = DummyLanguageModel(answers=[["What is the speed of light?"]]) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend, cache=False) @@ -29,18 +52,20 @@ def hard_question(topic: str) -> str: expected = "What is the speed of light?" question = hard_question(topic="Physics") - lm.inspect_history(n=2) assert question == expected +@clean_up_lm_test def test_list_output(): @predictor def hard_questions(topics: List[str]) -> List[str]: pass expected = ["What is the speed of light?", "What is the speed of sound?"] - lm = DummyLM(['{"value": ["What is the speed of light?", "What is the speed of sound?"]}']) + lm = DummyLM( + ['{"value": ["What is the speed of light?", "What is the speed of sound?"]}'] + ) dspy.settings.configure(lm=lm) question = hard_questions(topics=["Physics", "Music"]) @@ -49,6 +74,7 @@ def hard_questions(topics: List[str]) -> List[str]: assert question == expected +@clean_up_lm_test def test_simple_type(): class Question(pydantic.BaseModel): value: str @@ -57,6 +83,25 @@ class Question(pydantic.BaseModel): def hard_question(topic: str) -> Question: """Think of a hard factual question about a topic.""" + expected = "What is the speed of light?" + lm = DummyLM([f'{{"value": "{expected}"}}']) + dspy.settings.configure(lm=lm) + + question = hard_question(topic="Physics") + + assert isinstance(question, Question) + assert question.value == expected + + +@clean_up_lm_test +def test_simple_type_with_backend(): + class Question(pydantic.BaseModel): + value: str + + @predictor + def hard_question(topic: str) -> Question: + """Think of a hard factual question about a topic.""" + expected = "What is the speed of light?" lm = DummyLanguageModel(answers=[[f'{{"value": "{expected}"}}']]) backend = TemplateBackend(lm=lm) @@ -68,6 +113,7 @@ def hard_question(topic: str) -> Question: assert question.value == expected +@clean_up_lm_test def test_simple_type_input(): class Question(pydantic.BaseModel): value: str @@ -79,6 +125,27 @@ class Answer(pydantic.BaseModel): def answer(question: Question) -> Answer: pass + question = Question(value="What is the speed of light?") + lm = DummyLM([f'{{"value": "3e8"}}']) + dspy.settings.configure(lm=lm) + + result = answer(question=question) + + assert result == Answer(value="3e8") + + +@clean_up_lm_test +def test_simple_type_input_with_backend(): + class Question(pydantic.BaseModel): + value: str + + class Answer(pydantic.BaseModel): + value: str + + @predictor + def answer(question: Question) -> Answer: + pass + question = Question(value="What is the speed of light?") lm = DummyLanguageModel(answers=[[f'{{"value": "3e8"}}']]) @@ -90,11 +157,66 @@ def answer(question: Question) -> Answer: assert result == Answer(value="3e8") +@clean_up_lm_test def test_simple_class(): class Answer(pydantic.BaseModel): value: float certainty: float - comments: List[str] = pydantic.Field(description="At least two comments about the answer") + comments: List[str] = pydantic.Field( + description="At least two comments about the answer" + ) + + class QA(FunctionalModule): + @predictor + def hard_question(self, topic: str) -> str: + """Think of a hard factual question about a topic. It should be answerable with a number.""" + + @cot + def answer(self, question: Annotated[str, "Question to answer"]) -> Answer: + pass + + def forward(self, **kwargs): + question = self.hard_question(**kwargs) + return (question, self.answer(question=question)) + + expected = Answer( + value=3e8, + certainty=0.9, + comments=["It is the speed of light", "It is a constant"], + ) + + lm = DummyLM( + [ + "What is the speed of light?", + "Some bad reasoning, 3e8 m/s.", + "3e8", # Bad answer 1 + "{...}", # Model is asked to create an example + "Some good reasoning...", + expected.model_dump_json(), # Good answer + ] + ) + dspy.settings.configure(lm=lm) + + qa = QA() + assert isinstance(qa, FunctionalModule) + assert isinstance(qa.answer, dspy.Module) + + question, answer = qa(topic="Physics") + + print(qa.answer) + + assert question == "What is the speed of light?" + assert answer == expected + + +@clean_up_lm_test +def test_simple_class_with_backend(): + class Answer(pydantic.BaseModel): + value: float + certainty: float + comments: List[str] = pydantic.Field( + description="At least two comments about the answer" + ) class QA(FunctionalModule): @predictor @@ -119,12 +241,14 @@ def forward(self, **kwargs): answers=[ ["What is the speed of light?"], ["Some bad reasoning, 3e8 m/s\n\nAnswer: 3e8"], - [f"{...}", # Model is asked to create an example - "Some good reasoning...\n\nAnswer: {expected.model_dump_json()}"], + [ # Model is asked to create an example + f"{...}", + ], + [f"Some good reasoning...\n\nAnswer: {expected.model_dump_json()}"], ] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + dspy.settings.configure(backend=backend, cache=False, lm=None) qa = QA() assert isinstance(qa, FunctionalModule) @@ -136,7 +260,8 @@ def forward(self, **kwargs): assert answer == expected -def test_simple_oop(): +@clean_up_lm_test +def test_simple_oop_with_backend(): class Question(pydantic.BaseModel): value: str @@ -194,7 +319,8 @@ def answer(self, question: str) -> str: } -def test_bootstrap_effectiveness(): +@clean_up_lm_test +def test_bootstrap_effectiveness_with_backend(): class SimpleModule(FunctionalModule): @predictor def output(self, input: str) -> str: @@ -230,7 +356,9 @@ def simple_metric(example, prediction, trace=None): backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend, cache=False) - bootstrap = BootstrapFewShot(metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1) + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) # lm.inspect_history(n=2) @@ -272,6 +400,7 @@ def simple_metric(example, prediction, trace=None): ) +@clean_up_lm_test def test_regex(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -282,6 +411,41 @@ class TravelInformation(BaseModel): def flight_information(email: str) -> TravelInformation: pass + email = textwrap.dedent( + """\ + We're excited to welcome you aboard your upcoming flight from + John F. Kennedy International Airport (JFK) to Los Angeles International Airport (LAX) + on December 25, 2022. Here's everything you need to know before you take off: ... + """ + ) + lm = DummyLM( + [ + # Example with a bad origin code. + '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', + # Example to help the model understand + "{...}", + # Fixed + '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', + ] + ) + dspy.settings.configure(lm=lm) + + assert flight_information(email=email) == TravelInformation( + origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) + ) + + +@clean_up_lm_test +def test_regex_with_backend(): + class TravelInformation(BaseModel): + origin: str = Field(pattern=r"^[A-Z]{3}$") + destination: str = Field(pattern=r"^[A-Z]{3}$") + date: datetime.date + + @predictor + def flight_information(email: str) -> TravelInformation: + pass + email = textwrap.dedent( """\ We're excited to welcome you aboard your upcoming flight from @@ -293,6 +457,7 @@ def flight_information(email: str) -> TravelInformation: lm = DummyLanguageModel( answers=[ ['{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}'], + ["{...}"], ['{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}'], ] ) @@ -305,6 +470,7 @@ def flight_information(email: str) -> TravelInformation: ) +@clean_up_lm_test def test_raises(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -315,20 +481,47 @@ class TravelInformation(BaseModel): def flight_information(email: str) -> TravelInformation: pass + lm = DummyLM( + [ + "A list of bad inputs", + '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', + '{"origin": "JFK", "destination": "LAX", "date": "bad date"}', + ] + ) + dspy.settings.configure(lm=lm) + + with pytest.raises(ValueError): + flight_information(email="Some email") + + +@clean_up_lm_test +def test_raises_with_backend(): + class TravelInformation(BaseModel): + origin: str = Field(pattern=r"^[A-Z]{3}$") + destination: str = Field(pattern=r"^[A-Z]{3}$") + date: datetime.date + + @predictor + def flight_information(email: str) -> TravelInformation: + pass + lm = DummyLanguageModel( answers=[ ["A list of bad inputs"], ['{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}'], ['{"origin": "JFK", "destination": "LAX", "date": "bad date"}'], + ["..."], + ["..."], ] ) - backend = TemplateBackend(lm=lm) + backend = TemplateBackend(lm=lm, attempts=1) dspy.settings.configure(backend=backend) with pytest.raises(ValueError): flight_information(email="Some email") +@clean_up_lm_test def test_multi_errors(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -339,10 +532,66 @@ class TravelInformation(BaseModel): def flight_information(email: str) -> TravelInformation: pass + lm = DummyLM( + [ + # First origin is wrong, then destination, then all is good + '{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}', + "{...}", # Example to help the model understand + '{"origin": "JFK", "destination": "LA0", "date": "2022-12-25"}', + "{...}", # Example to help the model understand + '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', + ] + ) + dspy.settings.configure(lm=lm) + + assert flight_information(email="Some email") == TravelInformation( + origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) + ) + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `email`, produce the fields `flight_information`. + + --- + + Follow the following format. + + Email: ${email} + + Past Error in Flight Information: An error to avoid in the future + + Past Error (2) in Flight Information: An error to avoid in the future + + Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} + + --- + + Email: Some email + + Past Error in Flight Information: String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) + + Past Error (2) in Flight Information: String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) + + Flight Information: {"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}""" + ) + + +@clean_up_lm_test +def test_multi_errors_with_backend(): + class TravelInformation(BaseModel): + origin: str = Field(pattern=r"^[A-Z]{3}$") + destination: str = Field(pattern=r"^[A-Z]{3}$") + date: datetime.date + + @predictor + def flight_information(email: str) -> TravelInformation: + pass + lm = DummyLanguageModel( answers=[ ['{"origin": "JF0", "destination": "LAX", "date": "2022-12-25"}'], + ["{...}"], ['{"origin": "JFK", "destination": "LA0", "date": "2022-12-25"}'], + ["{...}"], ['{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}'], ] ) @@ -354,7 +603,6 @@ def flight_information(email: str) -> TravelInformation: origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) ) - assert backend.history[-1].prompt == textwrap.dedent( """\ Given the fields `email`, produce the fields `flight_information`. @@ -401,14 +649,64 @@ def get_user_details() -> UserDetails: # Keep making the mistake (lower case name) until we run # out of retries. - lm = DummyLanguageModel(answers=[['{"name": "lower case name", "age": 25}']]) + lm = DummyLM( + [ + '{"name": "lower case name", "age": 25}', + ] + * 10 + ) + dspy.settings.configure(lm=lm) + + with pytest.raises(ValueError): + get_user_details() + + print(lm.get_convo(-1)) + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields , produce the fields `get_user_details`. + + --- + + Follow the following format. + + Past Error in Get User Details: An error to avoid in the future + Past Error (2) in Get User Details: An error to avoid in the future + Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} + + --- + + Past Error in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + Past Error (2) in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + Get User Details: {"name": "lower case name", "age": 25}""" + ) + + +@clean_up_lm_test +def test_field_validator_with_backend(): + class UserDetails(BaseModel): + name: str + age: int + + @field_validator("name") + @classmethod + def validate_name(cls, v): + if v.upper() != v: + raise ValueError("Name must be in uppercase.") + return v + + @predictor + def get_user_details() -> UserDetails: + pass + + # Keep making the mistake (lower case name) until we run + # out of retries. + lm = DummyLanguageModel(answers=[['{"name": "lower case name", "age": 25}'] * 10]) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend, cache=True) with pytest.raises(ValueError): get_user_details() - print(lm.get_convo(-1)) assert backend.history[-1].prompt == textwrap.dedent( """\ Given the fields , produce the fields `get_user_details`. @@ -433,9 +731,12 @@ def get_user_details() -> UserDetails: ) +@clean_up_lm_test def test_annotated_field(): @predictor - def test(input: Annotated[str, Field(description="description")]) -> Annotated[float, Field(gt=0, lt=1)]: + def test( + input: Annotated[str, Field(description="description")] + ) -> Annotated[float, Field(gt=0, lt=1)]: pass # First try 0, which fails, then try 0.5, which passes @@ -447,13 +748,15 @@ def test(input: Annotated[str, Field(description="description")]) -> Annotated[f assert output == 0.5 +@clean_up_lm_test def test_multiple_outputs(): lm = DummyLM([str(i) for i in range(100)]) dspy.settings.configure(lm=lm) test = TypedPredictor("input -> output") - output = test(input="input", config=dict(n=3)).completions.output - assert output == ["0", "1", "2"] + result = test(input="input", config=dict(n=3)) + + assert [completion.output for completion in result.completions] == ["0", "1", "2"] def test_multiple_outputs_int(): @@ -466,10 +769,11 @@ class TestSignature(dspy.Signature): test = TypedPredictor(TestSignature) - output = test(input=8, config=dict(n=3)).completions.output - assert output == [0, 1, 2] + result = test(input=8, config=dict(n=3)) + assert [completion.output for completion in result.completions] == [0, 1, 2] +@clean_up_lm_test def test_multiple_outputs_int_cot(): # Note: Multiple outputs only work when the language model "speculatively" generates all the outputs in one go. lm = DummyLM( @@ -483,8 +787,8 @@ def test_multiple_outputs_int_cot(): test = TypedChainOfThought("input:str -> output:int") - output = test(input="8", config=dict(n=3)).completions.output - assert output == [0, 1, 2] + results = test(input="8", config=dict(n=3)) + assert [completion.output for completion in results.completions] def test_parse_type_string(): @@ -493,8 +797,8 @@ def test_parse_type_string(): test = TypedPredictor("input:int -> output:int") - output = test(input=8, config=dict(n=3)).completions.output - assert output == [0, 1, 2] + results = test(input=8, config=dict(n=3)) + assert [completion.output for completion in results.completions] == [0, 1, 2] def test_literal(): @@ -560,9 +864,11 @@ class ExampleSignature(dspy.Signature): generator = TypedPredictor(ExampleSignature) examples = generator(config=dict(n=3)) - for ex in examples.completions.fact: - assert isinstance(ex, SyntheticFact) - assert examples.completions.fact[0] == SyntheticFact(fact="The sky is blue", varacity=True) + for completion in examples.completions: + assert isinstance(completion.fact, SyntheticFact), type(completion.fact) + assert examples.completions[0].fact == SyntheticFact( + fact="The sky is blue", varacity=True + ) # If you have examples and want more existing_examples = [ @@ -572,8 +878,8 @@ class ExampleSignature(dspy.Signature): trained = LabeledFewShot().compile(student=generator, trainset=existing_examples) augmented_examples = trained(config=dict(n=3)) - for ex in augmented_examples.completions.fact: - assert isinstance(ex, SyntheticFact) + for completion in augmented_examples.completions: + assert isinstance(completion.fact, SyntheticFact) def test_list_input2(): @@ -604,7 +910,8 @@ class ScoredSignature(dspy.Signature): assert output == "Output" - assert lm.get_convo(-1) == textwrap.dedent("""\ + assert lm.get_convo(-1) == textwrap.dedent( + """\ Given the fields `attempted_signatures`, produce the fields `proposed_signature`. --- @@ -619,7 +926,8 @@ class ScoredSignature(dspy.Signature): Attempted Signatures: [{"string":"string 1","score":0.5},{"string":"string 2","score":0.4},{"string":"string 3","score":0.3}] Reasoning: Let's think step by step in order to Thoughts - Proposed Signature: Output""") + Proposed Signature: Output""" + ) def test_generic_signature(): @@ -656,10 +964,11 @@ def space_in_a(cls, a: str) -> str: _ = ValidatedSignature(a="with space") +@clean_up_lm_test def test_lm_as_validator(): @predictor def is_square(n: int) -> bool: - """Is n a square number?""" + """Is x a square number?""" def check_square(n): assert is_square(n=n) @@ -674,10 +983,10 @@ def next_square(n: int) -> Annotated[int, AfterValidator(check_square)]: m = next_square(n=2) lm.inspect_history(n=2) - assert m == 4 +@clean_up_lm_test def test_annotated_validator(): def is_square(n: int) -> int: root = n**0.5 @@ -733,7 +1042,8 @@ def test_demos(): assert program(input="What is the capital of France?").output == "Paris" - assert lm.get_convo(-1) == textwrap.dedent("""\ + assert lm.get_convo(-1) == textwrap.dedent( + """\ Given the fields `input`, produce the fields `output`. --- @@ -751,7 +1061,8 @@ def test_demos(): --- Input: What is the capital of France? - Output: Paris""") + Output: Paris""" + ) def _test_demos_missing_input(): @@ -763,7 +1074,8 @@ def _test_demos_missing_input(): dspy.settings.configure(lm=DummyLM(["My thoughts", "Paris"])) assert program(input="What is the capital of France?").output == "Paris" - assert dspy.settings.lm.get_convo(-1) == textwrap.dedent("""\ + assert dspy.settings.lm.get_convo(-1) == textwrap.dedent( + """\ Given the fields `input`, produce the fields `output`. --- @@ -783,4 +1095,5 @@ def _test_demos_missing_input(): Input: What is the capital of France? Thoughts: My thoughts - Output: Paris""") + Output: Paris""" + ) diff --git a/tests/predict/test_chain_of_thought.py b/tests/predict/test_chain_of_thought.py index acf2912905..10ebe6995f 100644 --- a/tests/predict/test_chain_of_thought.py +++ b/tests/predict/test_chain_of_thought.py @@ -1,8 +1,7 @@ import textwrap import dspy from dspy import ChainOfThought -from dspy.utils import DummyLM -from dspy.utils.dummies import DummyLanguageModel +from dspy.utils import DummyLM, DummyLanguageModel from dspy.backends import TemplateBackend @@ -40,8 +39,6 @@ def test_initialization_with_string_signature(): def test_initialization_with_string_signature_experimental(): - dspy.settings.configure(experimental=True) - lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend, lm=None, cache=False) diff --git a/tests/predict/test_chain_of_thought_with_hint.py b/tests/predict/test_chain_of_thought_with_hint.py index ff33ccb659..ce77217a42 100644 --- a/tests/predict/test_chain_of_thought_with_hint.py +++ b/tests/predict/test_chain_of_thought_with_hint.py @@ -1,10 +1,53 @@ import dspy from dspy import ChainOfThoughtWithHint -from dspy.utils import DummyLanguageModel +from dspy.utils import DummyLanguageModel, DummyLM, clean_up_lm_test from dspy.backends import TemplateBackend +@clean_up_lm_test def test_cot_with_no_hint(): + lm = DummyLM(["find the number after 1", "2"]) + dspy.settings.configure(lm=lm) + predict = ChainOfThoughtWithHint("question -> answer") + # Check output fields have the right order + assert list(predict.extended_signature2.output_fields.keys()) == [ + "rationale", + "hint", + "answer", + ] + assert predict(question="What is 1+1?").answer == "2" + + final_convo = lm.get_convo(-1) + assert final_convo.endswith( + "Question: What is 1+1?\n" + "Reasoning: Let's think step by step in order to find the number after 1\n" + "Answer: 2" + ) + + +@clean_up_lm_test +def test_cot_with_hint(): + lm = DummyLM(["find the number after 1", "2"]) + dspy.settings.configure(lm=lm) + predict = ChainOfThoughtWithHint("question -> answer") + assert list(predict.extended_signature2.output_fields.keys()) == [ + "rationale", + "hint", + "answer", + ] + assert predict(question="What is 1+1?", hint="think small").answer == "2" + + final_convo = lm.get_convo(-1) + assert final_convo.endswith( + "Question: What is 1+1?\n\n" + "Reasoning: Let's think step by step in order to find the number after 1\n\n" + "Hint: think small\n\n" + "Answer: 2" + ) + + +@clean_up_lm_test +def test_cot_with_no_hint_with_backend(): lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend) @@ -18,7 +61,8 @@ def test_cot_with_no_hint(): assert predict(question="What is 1+1?").answer == "2" -def test_cot_with_hint(): +@clean_up_lm_test +def test_cot_with_hint_with_backend(): lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend) diff --git a/tests/predict/test_multi_chain_comparison.py b/tests/predict/test_multi_chain_comparison.py index 2adb6c8270..9f6fab863f 100644 --- a/tests/predict/test_multi_chain_comparison.py +++ b/tests/predict/test_multi_chain_comparison.py @@ -1,8 +1,9 @@ import dspy -from dspy.utils.dummies import DummyLM, DummyLanguageModel +from dspy.utils import DummyLM, DummyLanguageModel, clean_up_lm_test from dspy.backends import TemplateBackend +@clean_up_lm_test def test_basic_example(): class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" @@ -29,6 +30,43 @@ class BasicQA(dspy.Signature): # Pass signature to MultiChainComparison module compare_answers = dspy.MultiChainComparison(BasicQA) + # Call the MultiChainComparison on the completions + question = "What is the color of the sky?" + lm = DummyLM(["my rationale", "blue"]) + dspy.settings.configure(lm=lm) + final_pred = compare_answers(completions, question=question) + + assert final_pred.rationale == "my rationale" + assert final_pred.answer == "blue" + + +@clean_up_lm_test +def test_basic_example_with_backend(): + class BasicQA(dspy.Signature): + """Answer questions with short factoid answers.""" + + question = dspy.InputField() + answer = dspy.OutputField(desc="often between 1 and 5 words") + + # Example completions generated by a model for reference + completions = [ + dspy.Prediction( + rationale="I recall that during clear days, the sky often appears this color.", + answer="blue", + ), + dspy.Prediction( + rationale="Based on common knowledge, I believe the sky is typically seen as this color.", + answer="green", + ), + dspy.Prediction( + rationale="From images and depictions in media, the sky is frequently represented with this hue.", + answer="blue", + ), + ] + + # Pass signature to MultiChainComparison module + compare_answers = dspy.MultiChainComparison(BasicQA) + # Call the MultiChainComparison on the completions question = "What is the color of the sky?" lm = DummyLanguageModel(answers=[["my rationale\n\nAnswer: blue"]]) diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py index 972657e69f..261573398d 100644 --- a/tests/predict/test_predict.py +++ b/tests/predict/test_predict.py @@ -3,7 +3,7 @@ from dspy import Predict, Signature from dspy.backends.json import JSONBackend from dspy.backends import TemplateBackend -from dspy.utils.dummies import DummyLM, DummyLanguageModel +from dspy.utils import DummyLM, DummyLanguageModel, clean_up_lm_test import copy import textwrap @@ -18,9 +18,8 @@ def test_initialization_with_string_signature(): assert predict.signature.instructions == Signature(signature_string).instructions +@clean_up_lm_test def test_reset_method(): - dsp.settings.get("experimental", False) - predict_instance = Predict("input -> output") predict_instance.lm = "modified" @@ -34,6 +33,7 @@ def test_reset_method(): assert predict_instance.demos == [] +@clean_up_lm_test def test_dump_and_load_state(): predict_instance = Predict("input -> output") predict_instance.lm = "lm_state" @@ -43,6 +43,7 @@ def test_dump_and_load_state(): assert new_instance.lm == "lm_state" +@clean_up_lm_test def test_call_method(): predict_instance = Predict("input -> output") lm = DummyLM(["test output"]) @@ -61,8 +62,8 @@ def test_call_method(): ) -def test_call_method_experimental(): - dspy.settings.configure(experimental=True) +@clean_up_lm_test +def test_call_method_with_backend(): predict_instance = Predict("input -> output") lm = DummyLanguageModel(answers=[["test output"]]) @@ -74,8 +75,6 @@ def test_call_method_experimental(): def test_dump_load_state(): - dspy.settings.configure(experimental=False) - predict_instance = Predict(Signature("input -> output", "original instructions")) dumped_state = predict_instance.dump_state() new_instance = Predict(Signature("input -> output", "new instructions")) @@ -83,18 +82,16 @@ def test_dump_load_state(): assert new_instance.signature.instructions == "original instructions" +@clean_up_lm_test def test_forward_method(): - dspy.settings.configure(experimental=False) - program = Predict("question -> answer") dspy.settings.configure(lm=DummyLM([]), backend=None) result = program(question="What is 1+1?").answer assert result == "No more responses" -def test_forward_method_experimental(): - dspy.settings.configure(experimental=True) - +@clean_up_lm_test +def test_forward_method_with_backend(): lm = DummyLanguageModel(answers=[["No more responses"]]) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend, lm=None) @@ -104,9 +101,8 @@ def test_forward_method_experimental(): assert result == "No more responses" +@clean_up_lm_test def test_forward_method2(): - dspy.settings.configure(experimental=False) - program = Predict("question -> answer1, answer2") dspy.settings.configure(lm=DummyLM(["my first answer", "my second answer"])) result = program(question="What is 1+1?") @@ -114,9 +110,8 @@ def test_forward_method2(): assert result.answer2 == "my second answer" -def test_forward_method2_experimental(): - dspy.settings.configure(experimental=True) - +@clean_up_lm_test +def test_forward_method2_with_backend(): lm = DummyLanguageModel( answers=[[" my first answer\n\nAnswer 2: my second answer"]] ) @@ -136,21 +131,19 @@ def test_config_management(): assert "new_key" in config and config["new_key"] == "value" +@clean_up_lm_test def test_multi_output(): - dspy.settings.configure(experimental=False) - program = Predict("question -> answer", n=2) dspy.settings.configure( - lm=DummyLM(["my first answer", "my second answer"]), backend=False + lm=DummyLM(["my first answer", "my second answer"]), backend=None ) results = program(question="What is 1+1?") assert results.completions[0].answer == "my first answer" assert results.completions[1].answer == "my second answer" -def test_multi_output_experimental(): - dspy.settings.configure(experimental=True) - +@clean_up_lm_test +def test_multi_output_with_backend(): program = Predict("question -> answer", n=2) lm = DummyLanguageModel(answers=[["my first answer", "my second answer"]]) @@ -162,9 +155,8 @@ def test_multi_output_experimental(): assert results.completions[1].answer == "my second answer" -def test_multi_output_json_experimental(): - dspy.settings.configure(experimental=True) - +@clean_up_lm_test +def test_multi_output_json_with_backend(): program = Predict("question -> answer", n=2) lm = DummyLanguageModel( @@ -182,9 +174,8 @@ def test_multi_output_json_experimental(): assert results.completions[1].answer == "my second answer" +@clean_up_lm_test def test_multi_output2(): - dspy.settings.configure(experimental=False) - program = Predict("question -> answer1, answer2", n=2) dspy.settings.configure( lm=DummyLM( @@ -215,6 +206,7 @@ def __init__(self): assert program2.named_predictors() == [("inner", program2.inner)] +@clean_up_lm_test def test_output_only(): class OutputOnlySignature(dspy.Signature): output = dspy.OutputField() diff --git a/tests/predict/test_program_of_thought.py b/tests/predict/test_program_of_thought.py index 558ccf1a6f..5feeb26b08 100644 --- a/tests/predict/test_program_of_thought.py +++ b/tests/predict/test_program_of_thought.py @@ -1,9 +1,8 @@ from dspy import Signature, ProgramOfThought import dspy -from dspy.utils import DummyLM +from dspy.utils import DummyLM, DummyLanguageModel import textwrap -from dspy.utils.dummies import DummyLanguageModel from dspy.backends import TemplateBackend @@ -23,62 +22,12 @@ def test_pot_code_generation(): ) backend = TemplateBackend(lm=lm) dspy.settings.configure(backend=backend, cache=False) - # lm = DummyLM( - # [ - # "Reason_A", - # "```python\nresult = 1+1\n```", - # "Reason_B", - # "2", - # ] - # ) - dspy.settings.configure(lm=lm) res = pot(question="What is 1+1?") assert res.answer == "2" - # assert lm.get_convo(index=-1) == textwrap.dedent( - # """\ - # Given the final code `question`, `final_generated_code`, `code_output`, provide the final `answer`. - # - # --- - # - # Follow the following format. - # - # Question: ${question} - # - # Code: python code that answers the question - # - # Code Output: output of previously-generated python code - # - # Reasoning: Let's think step by step in order to ${produce the answer}. We ... - # - # Answer: often between 1 and 5 words - # - # --- - # - # Question: What is 1+1? - # - # Code: result = 1+1 - # - # Code Output: 2 - # - # Reasoning: Let's think step by step in order to Reason_B - # - # Answer: 2""" - # ) - # def test_pot_code_generation_with_error(): pot = ProgramOfThought(BasicQA) - lm = DummyLM( - [ - "Reason_A", - "```python\nresult = 1+0/0\n```", - "Reason_B", # Error: division by zero - "```python\nresult = 1+1\n```", - "Reason_C", - "2", - ] - ) lm = DummyLanguageModel( answers=[ @@ -91,66 +40,3 @@ def test_pot_code_generation_with_error(): dspy.settings.configure(backend=backend, cache=False) res = pot(question="What is 1+1?") assert res.answer == "2" - - # The first code example failed - # assert lm.get_convo(index=2) == textwrap.dedent( - # """\ - # You are given `question`, `previous_code`, `error` due to an error in previous code. - # Your task is to correct the error and provide the new `generated_code`. - # - # --- - # - # Follow the following format. - # - # Question: ${question} - # - # Previous Code: previously-generated python code that errored - # - # Error: error message from previously-generated python code - # - # Reasoning: Let's think step by step in order to ${produce the generated_code}. We ... - # - # Code: python code that answers the question - # - # --- - # - # Question: What is 1+1? - # - # Previous Code: result = 1+0/0 - # - # Error: division by zero - # - # Reasoning: Let's think step by step in order to Reason_B""" - # ) - # - # # The second code example succeeded - # assert lm.get_convo(-1) == textwrap.dedent( - # """\ - # Given the final code `question`, `final_generated_code`, `code_output`, provide the final `answer`. - # - # --- - # - # Follow the following format. - # - # Question: ${question} - # - # Code: python code that answers the question - # - # Code Output: output of previously-generated python code - # - # Reasoning: Let's think step by step in order to ${produce the answer}. We ... - # - # Answer: often between 1 and 5 words - # - # --- - # - # Question: What is 1+1? - # - # Code: result = 1+1 - # - # Code Output: 2 - # - # Reasoning: Let's think step by step in order to Reason_C - # - # Answer: 2""" - # ) diff --git a/tests/primitives/test_program.py b/tests/primitives/test_program.py index 461fba6c43..92c26961ac 100644 --- a/tests/primitives/test_program.py +++ b/tests/primitives/test_program.py @@ -6,6 +6,7 @@ from dspy.utils import DummyLanguageModel from dspy.backends import TemplateBackend from dspy.utils import DummyLM +from dspy.utils.testing import clean_up_lm_test class HopModule(dspy.Module): @@ -45,6 +46,7 @@ def test_predictors(): ), "All returned items should be instances of PredictMock" +@clean_up_lm_test def test_forward(): dspy.settings.configure(experimental=False) diff --git a/tests/signatures/test_signature.py b/tests/signatures/test_signature.py index d0eb899d13..4984e6d8d9 100644 --- a/tests/signatures/test_signature.py +++ b/tests/signatures/test_signature.py @@ -5,7 +5,7 @@ from typing import List import dspy -from dspy.utils.dummies import DummyLM +from dspy.utils import DummyLM, clean_up_lm_test def test_field_types_and_custom_attributes(): @@ -43,8 +43,12 @@ class TestSignature(Signature): input = InputField(prefix="Modified:") output = OutputField() - assert TestSignature.input_fields["input"].json_schema_extra["prefix"] == "Modified:" - assert TestSignature.output_fields["output"].json_schema_extra["prefix"] == "Output:" + assert ( + TestSignature.input_fields["input"].json_schema_extra["prefix"] == "Modified:" + ) + assert ( + TestSignature.output_fields["output"].json_schema_extra["prefix"] == "Output:" + ) def test_signature_parsing(): @@ -69,7 +73,10 @@ def test_with_updated_field(): assert signature1 is not signature2, "The type should be immutable" for key in signature1.fields.keys(): if key != "input1": - assert signature1.fields[key].json_schema_extra == signature2.fields[key].json_schema_extra + assert ( + signature1.fields[key].json_schema_extra + == signature2.fields[key].json_schema_extra + ) assert signature1.instructions == signature2.instructions @@ -96,14 +103,18 @@ def test_signature_instructions_none(): def test_signature_from_dict(): - signature = Signature({"input1": InputField(), "input2": InputField(), "output": OutputField()}) + signature = Signature( + {"input1": InputField(), "input2": InputField(), "output": OutputField()} + ) for k in ["input1", "input2", "output"]: assert k in signature.fields assert signature.fields[k].annotation == str def test_signature_from_dict(): - signature = Signature({"input1": InputField(), "input2": InputField(), "output": OutputField()}) + signature = Signature( + {"input1": InputField(), "input2": InputField(), "output": OutputField()} + ) assert "input1" in signature.input_fields assert "input2" in signature.input_fields assert "output" in signature.output_fields @@ -180,6 +191,7 @@ class SubSignature(Signature): assert isinstance(value, SubSignature) +@clean_up_lm_test def test_multiline_instructions(): class MySignature(Signature): """First line @@ -193,7 +205,8 @@ class MySignature(Signature): dspy.settings.configure(lm=lm) assert predictor().output == "short answer" - assert lm.get_convo(-1) == textwrap.dedent("""\ + assert lm.get_convo(-1) == textwrap.dedent( + """\ First line Second line @@ -205,4 +218,5 @@ class MySignature(Signature): --- - Output: short answer""") + Output: short answer""" + ) diff --git a/tests/teleprompt/test_copro_optimizer.py b/tests/teleprompt/test_copro_optimizer.py index 64da88b6b1..cf1bc985d6 100644 --- a/tests/teleprompt/test_copro_optimizer.py +++ b/tests/teleprompt/test_copro_optimizer.py @@ -2,7 +2,7 @@ import dspy from dspy.backends.template import TemplateBackend from dspy.teleprompt.signature_opt import COPRO -from dspy.utils.dummies import DummyLM, DummyLanguageModel +from dspy.utils import DummyLM, DummyLanguageModel, clean_up_lm_test from dspy import Example @@ -25,9 +25,7 @@ def simple_metric(example, prediction): def test_signature_optimizer_initialization(): - optimizer = COPRO( - metric=simple_metric, breadth=2, depth=1, init_temperature=1.4 - ) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) assert optimizer.metric == simple_metric, "Metric not correctly initialized" assert optimizer.breadth == 2, "Breadth not correctly initialized" assert optimizer.depth == 1, "Depth not correctly initialized" @@ -46,11 +44,34 @@ def forward(self, **kwargs): return self.predictor(**kwargs) +@clean_up_lm_test def test_signature_optimizer_optimization_process(): - optimizer = COPRO( - metric=simple_metric, breadth=2, depth=1, init_temperature=1.4 + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + dspy.settings.configure( + lm=DummyLM(["Optimized instruction 1", "Optimized instruction 2"]) ) + student = SimpleModule("input -> output") + + # Assuming the compile method of COPRO requires a student module, a development set, and evaluation kwargs + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) + + # Check that the optimized student has been modified from the original + # This check can be more specific based on how the optimization modifies the student + assert optimized_student is not student, "Optimization did not modify the student" + + # Further tests can be added to verify the specifics of the optimization process, + # such as checking the instructions of the optimized student's predictors. + + +@clean_up_lm_test +def test_signature_optimizer_optimization_process_with_backend(): + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + lm = DummyLanguageModel( answers=[ [ @@ -79,10 +100,34 @@ def test_signature_optimizer_optimization_process(): # such as checking the instructions of the optimized student's predictors. +@clean_up_lm_test def test_signature_optimizer_statistics_tracking(): - optimizer = COPRO( - metric=simple_metric, breadth=2, depth=1, init_temperature=1.4 + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer.track_stats = True # Enable statistics tracking + + dspy.settings.configure(lm=DummyLM(["Optimized instruction"])) + student = SimpleModule("input -> output") + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, ) + + # Verify that statistics have been tracked and attached to the optimized student + assert hasattr( + optimized_student, "total_calls" + ), "Total calls statistic not tracked" + assert hasattr( + optimized_student, "results_best" + ), "Best results statistics not tracked" + + +# Assuming the setup_signature_optimizer fixture and simple_metric function are defined as before + + +@clean_up_lm_test +def test_signature_optimizer_statistics_tracking_with_backend(): + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking lm = DummyLanguageModel( @@ -112,24 +157,17 @@ def test_signature_optimizer_statistics_tracking(): # Assuming the setup_signature_optimizer fixture and simple_metric function are defined as before +@clean_up_lm_test def test_optimization_and_output_verification(): - lm = DummyLanguageModel( - answers=[ - [ - "Optimized Prompt\n\nProposed Prefix For Output Field: Optimized Prefix: " - ], - ["The color of the sky\n\Output: blue"], - ["What the fox says\n\nOutput: Ring-ding-ding-ding-dingeringeding"], - ["the color of the sky\n\nOutput: blue"], - ["What the fox says\n\nOutput: Ring-ding-ding-ding-dingeringeding"], - ["Generate the capital of France\n\nOptimized Prefix: No more responses"], + lm = DummyLM( + [ + "Optimized Prompt", + "Optimized Prefix", ] ) - backend = TemplateBackend(lm=lm, attempts=1) - dspy.settings.configure(backend=backend, cache=False) - optimizer = COPRO( - metric=simple_metric, breadth=2, depth=1, init_temperature=1.4 - ) + dspy.settings.configure(lm=lm) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + student = SimpleModule("input -> output") # Compile the student with the optimizer @@ -138,13 +176,16 @@ def test_optimization_and_output_verification(): trainset=trainset, eval_kwargs={"num_threads": 1, "display_progress": False}, ) + # Simulate calling the optimized student with a new input test_input = "What is the capital of France?" prediction = optimized_student(input=test_input) + print(lm.get_convo(-1)) + assert prediction.output == "No more responses" - assert backend.history[-1].prompt == textwrap.dedent( + assert lm.get_convo(-1) == textwrap.dedent( """\ Optimized Prompt @@ -153,16 +194,14 @@ def test_optimization_and_output_verification(): Follow the following format. Input: ${input} - Reasoning: Let's think step by step in order to ${produce the output}. We ... - - Optimized Prefix: ${output} + Optimized Prefix ${output} --- Input: What is the capital of France? - - Reasoning: Let's think step by step in order to""" + Reasoning: Let's think step by step in order to No more responses + Optimized Prefix No more responses""" ) @@ -177,9 +216,7 @@ def test_statistics_tracking_during_optimization(): backend = TemplateBackend(lm=lm, attempts=5) dspy.settings.configure(backend=backend, cache=False) - optimizer = COPRO( - metric=simple_metric, breadth=2, depth=1, init_temperature=1.4 - ) + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking student = SimpleModule("input -> output") diff --git a/tests/teleprompt/test_mipro_optimizer.py b/tests/teleprompt/test_mipro_optimizer.py index 506aaa555f..09d66cd949 100644 --- a/tests/teleprompt/test_mipro_optimizer.py +++ b/tests/teleprompt/test_mipro_optimizer.py @@ -3,11 +3,9 @@ import re import dspy from dsp.modules import LM -from dspy.backends.lm.base import BaseLM, GeneratedContent from dspy.teleprompt.signature_opt_bayesian import MIPRO -from dspy.utils.dummies import DummyLM, DummyLanguageModel +from dspy.utils import DummyLM, clean_up_lm_test from dspy import Example -from dspy.backends import TemplateBackend # Define a simple metric function for testing @@ -45,45 +43,6 @@ def simple_metric(example, prediction, trace=None): ] -class ConditionalLanguageModel(BaseLM): - def generate(self, prompt: str, n: int = 1, **kwargs) -> list[GeneratedContent]: - if prompt.endswith("Observations:"): - answer = "(*silence*)" - elif prompt.endswith("Proposed Instruction:"): - answer = " Input: " - elif prompt.endswith("Proposed Prefix For Output Field:"): - answer = " Output: " - elif prompt.endswith("Summary:"): - answer = " summarizing..." - else: - pairs = re.findall(r"Input: (.*)\n\nOutput: (.*)", prompt) - - last = re.search(r"Input: (.*)\n\nReasoning: (.*)$", prompt) - current_question = last.group(1) - - if match := re.match(r"What is the capital of (.*?)\?", current_question): - country = match.group(1) - # If we had a previous example of a question about a capital, the model - # has learned the format, and will answer with question correctly. - if any("capital" in question for question, _ in pairs): - answer = (capitals | extra_capitals)[country] - # Otherwise, it is confused and will answer with the country's name. - else: - answer = country - - # For other questions, the model will answer with the last word of the question. - else: - answer = current_question.split()[-1] - - answer = "think deeply.\n\nOutput: " + answer - - dummy_response = [{"message": {"content": answer}} for _ in range(n)] - return dummy_response - - def count_tokens(self, prompt: str) -> int: - return len(prompt) - - class ConditionalLM(LM): def __init__(self): super().__init__("conditional-lm") @@ -99,13 +58,15 @@ def basic_request(self, prompt, num_candidates=1, **kwargs): elif prompt.endswith("Summary:"): answer = " summarizing..." else: - pairs = re.findall(r"Input: (.*?)\n(?:Reasoning:.*?\n)?Output: (.*?)\n", prompt, re.DOTALL) + pairs = re.findall( + r"Input: (.*?)\n(?:Reasoning:.*?\n)?Output: (.*?)\n", prompt, re.DOTALL + ) # breakpoint() print("PROMPT:", prompt) print("PAIRS:", pairs) - last = re.search(r"Input: (.*)\nReasoning: (.*)$", prompt) + last = re.search(r"Input: (.*)\nReasoning:(.*)$", prompt) current_question = last.group(1) if match := re.match(r"What is the capital of (.*?)\?", current_question): @@ -121,13 +82,13 @@ def basic_request(self, prompt, num_candidates=1, **kwargs): else: answer = current_question.split()[-1] - answer = "think deeply.\nOutput: " + answer + answer = " think deeply.\nOutput: " + answer RED, GREEN, RESET = "\033[91m", "\033[92m", "\033[0m" - print("=== DummyLM ===") - print(prompt, end="") - print(f"{RED}{answer}{RESET}") - print("===") + # print("=== DummyLM ===") + # print(prompt, end="") + # print(f"{RED}{answer}{RESET}") + # print("===") dummy_response = {"choices": []} for _ in range(num_candidates): @@ -157,17 +118,23 @@ def get_convo(self, index): """get the prompt + anwer from the ith message""" return ( self.history[index]["prompt"] - + " " + # + " " + self.history[index]["response"]["choices"][0]["text"] ) def test_bayesian_signature_optimizer_initialization(): optimizer = MIPRO( - metric=simple_metric, num_candidates=10, init_temperature=1.4, verbose=True, track_stats=True + metric=simple_metric, + num_candidates=10, + init_temperature=1.4, + verbose=True, + track_stats=True, ) assert optimizer.metric == simple_metric, "Metric not correctly initialized" - assert optimizer.num_candidates == 10, "Incorrect 'num_candidates' parameter initialization" + assert ( + optimizer.num_candidates == 10 + ), "Incorrect 'num_candidates' parameter initialization" assert ( optimizer.init_temperature == 1.4 ), "Initial temperature not correctly initialized" @@ -185,10 +152,10 @@ def forward(self, **kwargs): return self.predictor(**kwargs) +@clean_up_lm_test def test_signature_optimizer_optimization_process(): - lm = ConditionalLanguageModel() - backend = TemplateBackend(lm=lm, attempts=5) - dspy.settings.configure(backend=backend, cache=False) + lm = ConditionalLM() + dspy.settings.configure(lm=lm) student = SimpleModule(signature="input -> output") @@ -214,12 +181,11 @@ def test_signature_optimizer_optimization_process(): assert len(optimized_student.predictor.demos) == 5 +@clean_up_lm_test def test_signature_optimizer_bad_lm(): - lm = DummyLanguageModel( - answers=[[f"Optimized instruction {i}\n\nOutput: a"] for i in range(65)] + dspy.settings.configure( + lm=DummyLM([f"Optimized instruction {i}" for i in range(30)]) ) - backend = TemplateBackend(lm=lm, attempts=5) - dspy.settings.configure(backend=backend, cache=False) student = SimpleModule(signature="input -> output") optimizer = MIPRO( metric=simple_metric, @@ -244,12 +210,12 @@ def test_signature_optimizer_bad_lm(): ) +@clean_up_lm_test def test_optimization_and_output_verification(): # Make a language model that is always right, except on the last # example in the train set. - lm = ConditionalLanguageModel() - backend = TemplateBackend(lm=lm, attempts=5) - dspy.settings.configure(backend=backend, cache=False) + lm = ConditionalLM() + dspy.settings.configure(lm=lm) optimizer = MIPRO( metric=simple_metric, @@ -276,9 +242,12 @@ def test_optimization_and_output_verification(): test_input = "What is the capital of Spain?" prediction = optimized_student(input=test_input) + print("CORRECT ANSWER") + print(lm.get_convo(-1)) + assert prediction.output == "Madrid" - print(backend.history[-1].prompt) - assert backend.history[-1].prompt == textwrap.dedent( + + expected_lm_output = textwrap.dedent( """\ Input: @@ -287,36 +256,29 @@ def test_optimization_and_output_verification(): Follow the following format. Input: ${input} - Reasoning: Let's think step by step in order to ${produce the output}. We ... - Output: ${output} --- Input: What is the capital of France? - Reasoning: Let's think step by step in order to think deeply. Output: Paris --- Input: What is the capital of Norway? - - Reasoning: Let's think step by step in order to think deeply. - Output: Oslo --- Input: What does the fox say? - Output: Ring-ding-ding-ding-dingeringeding! --- Input: What is the capital of Spain? - - Reasoning: Let's think step by step in order to""" + Reasoning: Let's think step by step in order to think deeply. + Output: Madrid""" ) - assert lm.get_convo(-1) == expected_lm_output \ No newline at end of file + assert lm.get_convo(-1) == expected_lm_output, lm.get_convo(-1) From 64093bb43a712c7d5c36627c685e4dc6576d2ae7 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Mon, 18 Mar 2024 21:14:23 -0400 Subject: [PATCH 231/243] chore: ruff fixes for backends --- dspy/backends/base.py | 12 ++++++---- dspy/backends/lm/__init__.py | 2 +- dspy/backends/lm/base.py | 11 ++++----- dspy/backends/lm/litellm.py | 8 +++---- dspy/backends/template.py | 43 ++++++++++++++++++++---------------- 5 files changed, 43 insertions(+), 33 deletions(-) diff --git a/dspy/backends/base.py b/dspy/backends/base.py index 4ec35f3e94..b4b73f47d8 100644 --- a/dspy/backends/base.py +++ b/dspy/backends/base.py @@ -3,8 +3,8 @@ from pydantic import BaseModel, Field -from dspy.signatures.signature import Signature, ensure_signature from dspy.primitives.prediction import Completions +from dspy.signatures.signature import Signature, ensure_signature class BaseBackend(BaseModel, ABC): @@ -16,12 +16,17 @@ class BaseBackend(BaseModel, ABC): def __call__( self, signature: Signature, - config: dict[str, t.Any] = {}, + config: dict[str, t.Any] = None, attempts: int = 1, **kwargs, ) -> Completions: + if config is None: + config = {} + # Allow overriding the attempts at the Backend Initialization Step attempts = max(attempts, self.attempts) + if attempts < 1: + raise ValueError("'attempts' argument passed must be greater than 0.") # Recursively complete generation, until at least one complete completion is available. signature = ensure_signature(signature) @@ -53,11 +58,10 @@ def __call__( i += 1 - assert completions is not None completions.remove_incomplete() if len(completions) == 0: raise Exception( - "Generation failed, recursively attempts to complete did not succeed." + "Generation failed, recursively attempts to complete did not succeed.", ) self.history.append(completions) diff --git a/dspy/backends/lm/__init__.py b/dspy/backends/lm/__init__.py index 6638854db5..c0368b203f 100644 --- a/dspy/backends/lm/__init__.py +++ b/dspy/backends/lm/__init__.py @@ -1 +1 @@ -from .litellm import * +from .litellm import LiteLM diff --git a/dspy/backends/lm/base.py b/dspy/backends/lm/base.py index 979e9645af..bbcf117fc2 100644 --- a/dspy/backends/lm/base.py +++ b/dspy/backends/lm/base.py @@ -1,12 +1,12 @@ -import dspy import os -from pathlib import Path import typing as t from abc import ABC, abstractmethod +from pathlib import Path -from pydantic import BaseModel, Field from joblib import Memory +from pydantic import BaseModel, Field +import dspy _cachedir = os.environ.get("DSP_CACHEDIR") or str(Path.home() / ".joblib_cache") _cache_memory = Memory(_cachedir, verbose=0) @@ -23,7 +23,7 @@ class LMOutput(BaseModel): class BaseLM(BaseModel, ABC): history: list[LMOutput] = Field(default_factory=list) - def __init__(self, *args, **kwargs): + def __init__(self, *args: t.Any, **kwargs): super().__init__(*args, **kwargs) self._generate_with_cache = _cache_memory.cache(self.generate) @@ -33,7 +33,8 @@ def __call__(self, prompt: str, **kwargs) -> LMOutput: generations = generator(prompt, **kwargs) # This is necessary to satisfy the type checked for memoized functions - assert generations is not None + if generations is None: + raise ValueError("Generator failed to create generations.") output = LMOutput(prompt=prompt, generations=generations, kwargs=kwargs) self.history.append(output) diff --git a/dspy/backends/lm/litellm.py b/dspy/backends/lm/litellm.py index 8781053fc4..332159afc8 100644 --- a/dspy/backends/lm/litellm.py +++ b/dspy/backends/lm/litellm.py @@ -3,7 +3,6 @@ from litellm import ModelResponse, completion, token_counter from pydantic import Field - from .base import BaseLM, GeneratedContent @@ -32,10 +31,11 @@ def generate( messages=[{"role": "user", "content": prompt}], **options, ) - assert type(response) == ModelResponse - choices = [dict(c) for c in response.choices if c["finish_reason"] != "length"] - return choices + if type(response) != ModelResponse: + raise AssertionError("Response from completion incorrect type/format") + + return [dict(c) for c in response.choices if c["finish_reason"] != "length"] def count_tokens(self, prompt: str) -> int: """Counts the number of tokens for a specific prompt.""" diff --git a/dspy/backends/template.py b/dspy/backends/template.py index e021cee70c..65e6917d3c 100644 --- a/dspy/backends/template.py +++ b/dspy/backends/template.py @@ -1,10 +1,10 @@ -from dspy.signatures.signature import Signature, SignatureMeta +import typing as t + from dspy.primitives.example import Example -from dspy.primitives.template import Template from dspy.primitives.prediction import Completions +from dspy.primitives.template import Template +from dspy.signatures.signature import Signature, SignatureMeta - -import typing as t from .base import BaseBackend from .lm.litellm import BaseLM @@ -17,18 +17,24 @@ class TemplateBackend(BaseBackend): def generate( self, signature: Signature, - demos: list[str] = [], - config: dict[str, t.Any] = {}, + demos: list[str] = None, + config: dict[str, t.Any] = None, **kwargs, ) -> Completions: - """Wrap the signature and demos into an example, and pass through the Language Model, returning Signature compliant output""" + """Wrap the signature and demos into an example, and pass through the Language Model, returning Signature compliant output.""" + if config is None: + config = {} - if not all(k in kwargs for k in signature.input_fields): - present = [k for k in signature.input_fields if k in kwargs] - missing = [k for k in signature.input_fields if k not in kwargs] - print( - f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." - ) + if demos is None: + demos = [] + + # TODO: Move this check to logging + # if not all(k in kwargs for k in signature.input_fields): + # present = [k for k in signature.input_fields if k in kwargs] + # missing = [k for k in signature.input_fields if k not in kwargs] + # print( + # f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}.", + # ) # Generate Example example = Example(demos=demos, **kwargs) @@ -37,8 +43,8 @@ def generate( template = Template(signature) # Clean Up Kwargs Before Sending Through Language Model - for input in signature.input_fields: - del kwargs[input] + for field in signature.input_fields: + del kwargs[field] pred = self.lm(template(example), **config) @@ -48,13 +54,12 @@ def generate( for prediction in pred.generations ] - assert type(signature) == SignatureMeta, type(signature) + if type(signature) != SignatureMeta: + raise AssertionError("Signature not provided appropriately.") - completions = Completions.new( + return Completions.new( signature=signature, examples=extracted_examples, prompt=pred.prompt, kwargs=pred.kwargs, ) - - return completions From d007233a148b2daeceeb0dab38e50de852ad22d5 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 11:02:04 -0400 Subject: [PATCH 232/243] chore: ruff check and fix --- dspy/backends/instructor.py | 1 - dspy/backends/json.py | 6 ++-- dspy/backends/lm/__init__.py | 1 + dspy/evaluate/loss.py | 2 +- dspy/functional/functional.py | 26 +++++++------- dspy/predict/aggregation.py | 2 +- dspy/predict/predict.py | 8 ++--- dspy/primitives/prediction.py | 12 +++---- dspy/primitives/template.py | 16 +++++---- dspy/teleprompt/bootstrap.py | 10 +++--- dspy/teleprompt/copro_optimizer.py | 26 +++++++------- dspy/teleprompt/mipro_optimizer.py | 56 +++++++++++++++--------------- dspy/teleprompt/signature_opt.py | 9 ++--- dspy/utils/dummies.py | 11 +++--- dspy/utils/testing.py | 3 +- 15 files changed, 93 insertions(+), 96 deletions(-) diff --git a/dspy/backends/instructor.py b/dspy/backends/instructor.py index 1606ea06e2..5ec48851ee 100644 --- a/dspy/backends/instructor.py +++ b/dspy/backends/instructor.py @@ -1,4 +1,3 @@ -import typing as t from dspy.backends.lm.base import MinimalLM from dspy.signatures.signature import Signature diff --git a/dspy/backends/json.py b/dspy/backends/json.py index c80bdf2328..7271cef833 100644 --- a/dspy/backends/json.py +++ b/dspy/backends/json.py @@ -1,10 +1,10 @@ import json import typing as t -from dspy.signatures.signature import Signature + from dspy.primitives.example import Example -from dspy.primitives.template import Template from dspy.primitives.prediction import Completions - +from dspy.primitives.template import Template +from dspy.signatures.signature import Signature from .base import BaseBackend from .lm import BaseLM diff --git a/dspy/backends/lm/__init__.py b/dspy/backends/lm/__init__.py index c0368b203f..6f0908e8bf 100644 --- a/dspy/backends/lm/__init__.py +++ b/dspy/backends/lm/__init__.py @@ -1 +1,2 @@ +from .base import BaseLM from .litellm import LiteLM diff --git a/dspy/evaluate/loss.py b/dspy/evaluate/loss.py index 5f7d56aca0..d2731b03e5 100644 --- a/dspy/evaluate/loss.py +++ b/dspy/evaluate/loss.py @@ -1,5 +1,5 @@ from dataclasses import dataclass, field -from datetime import datetime, UTC +from datetime import UTC, datetime from typing import Callable from snoop import snoop diff --git a/dspy/functional/functional.py b/dspy/functional/functional.py index 2fc3ada9a8..044bf6f9ce 100644 --- a/dspy/functional/functional.py +++ b/dspy/functional/functional.py @@ -8,8 +8,8 @@ import dspy from dsp.templates import passages2text -from dspy.signatures.signature import ensure_signature, make_signature from dspy.primitives.prediction import Completions, Prediction +from dspy.signatures.signature import ensure_signature, make_signature def predictor(func) -> dspy.Module: @@ -152,18 +152,18 @@ def parse(x): inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel) ): type_ = pydantic.create_model( - "Output", value=(type_, ...), __base__=pydantic.BaseModel + "Output", value=(type_, ...), __base__=pydantic.BaseModel, ) to_json = lambda x, type_=type_: type_( - value=x + value=x, ).model_dump_json()[ 9:-1 ] # {"value":"123"} from_json = lambda x, type_=type_: type_.model_validate_json( - '{"value":' + x + "}" + '{"value":' + x + "}", ).value schema = json.dumps( - type_.model_json_schema()["properties"]["value"] + type_.model_json_schema()["properties"]["value"], ) else: to_json = lambda x: x.model_dump_json() @@ -178,13 +178,13 @@ def parse(x): and issubclass(type_, pydantic.BaseModel) ): type_ = pydantic.create_model( - "Output", value=(type_, ...), __base__=pydantic.BaseModel + "Output", value=(type_, ...), __base__=pydantic.BaseModel, ) to_json = lambda x, type_=type_: type_( - value=x + value=x, ).model_dump_json() from_json = lambda x, type_=type_: type_.model_validate_json( - x + x, ).value schema = json.dumps(type_.model_json_schema()) else: @@ -207,7 +207,7 @@ def parse(x): x if isinstance(x, str) else to_json(x) ), parser=lambda x, from_json=from_json: from_json( - _unwrap_json(x) + _unwrap_json(x), ), type_=type_, ) @@ -220,7 +220,7 @@ def parse(x): elif typing.get_origin(type_) in (List, list, Tuple, tuple): (inner_type,) = typing.get_args(type_) if inspect.isclass(inner_type) and issubclass( - inner_type, pydantic.BaseModel + inner_type, pydantic.BaseModel, ): format_ = ( lambda x: x @@ -323,7 +323,7 @@ def forward(self, **kwargs) -> dspy.Prediction: examples.append(example) completions = Completions.new( - signature=signature, examples=examples, prompt="unknown", kwargs={} + signature=signature, examples=examples, prompt="unknown", kwargs={}, ) pred = Prediction.from_completions(completions) @@ -375,7 +375,7 @@ def _func_to_signature(func): annotation = annotations.get("return", str) if typing.get_origin(annotation) is Annotated: desc = next( - (arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None + (arg for arg in typing.get_args(annotation) if isinstance(arg, str)), None, ) if desc is not None: kwargs["desc"] = desc @@ -395,5 +395,5 @@ def _unwrap_json(output): if not output.startswith("{") or not output.endswith("}"): raise ValueError("json output should start and end with { and }") return ujson.dumps( - ujson.loads(output) + ujson.loads(output), ) # ujson is a bit more robust than the standard json diff --git a/dspy/predict/aggregation.py b/dspy/predict/aggregation.py index 45e25def06..38843e59ac 100644 --- a/dspy/predict/aggregation.py +++ b/dspy/predict/aggregation.py @@ -1,5 +1,5 @@ from dsp.utils import normalize_text -from dspy.primitives.prediction import Completions, Prediction +from dspy.primitives.prediction import Prediction default_normalize = lambda s: normalize_text(s) or None diff --git a/dspy/predict/predict.py b/dspy/predict/predict.py index 35db8cd817..70c61ff5cc 100644 --- a/dspy/predict/predict.py +++ b/dspy/predict/predict.py @@ -1,7 +1,7 @@ -import dspy import random import dsp +import dspy from dspy.predict.parameter import Parameter from dspy.primitives.prediction import Completions, Prediction from dspy.signatures.signature import ensure_signature, signature_to_template @@ -73,7 +73,7 @@ def forward(self, **kwargs): present = [k for k in signature.input_fields if k in kwargs] missing = [k for k in signature.input_fields if k not in kwargs] print( - f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." + f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}.", ) completions = backend(signature, demos=demos, config=config, **kwargs) @@ -99,7 +99,7 @@ def forward(self, **kwargs): num_generations = config.get("n") if num_generations is None: num_generations = lm.kwargs.get( - "n", lm.kwargs.get("num_generations", None) + "n", lm.kwargs.get("num_generations", None), ) if (temperature is None or temperature <= 0.15) and num_generations > 1: @@ -114,7 +114,7 @@ def forward(self, **kwargs): present = [k for k in signature.input_fields if k in kwargs] missing = [k for k in signature.input_fields if k not in kwargs] print( - f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}." + f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}.", ) # Switch to legacy format for dsp.generate diff --git a/dspy/primitives/prediction.py b/dspy/primitives/prediction.py index 63c1357614..811d586c26 100644 --- a/dspy/primitives/prediction.py +++ b/dspy/primitives/prediction.py @@ -1,9 +1,11 @@ import typing as t +from collections import Counter + from pydantic import BaseModel, ConfigDict -from dspy.primitives.example import Example -from dspy.signatures.signature import SignatureMeta, Signature + from dsp.utils import normalize_text -from collections import Counter +from dspy.primitives.example import Example +from dspy.signatures.signature import Signature, SignatureMeta default_normalize = lambda s: normalize_text(s) or None @@ -81,9 +83,7 @@ def get_farthest_example(self) -> Example: def filter(self, field: str, value: str): i = 0 while i < len(self.examples): - if field not in self.examples[i]: - del self.examples[i] - elif normalize_text(self.examples[i][field]) != value: + if field not in self.examples[i] or normalize_text(self.examples[i][field]) != value: del self.examples[i] else: i += 1 diff --git a/dspy/primitives/template.py b/dspy/primitives/template.py index 3801cd54be..ebaab58300 100644 --- a/dspy/primitives/template.py +++ b/dspy/primitives/template.py @@ -1,14 +1,16 @@ -import regex import typing as t -from dspy.signatures.signature import Signature + +import regex + from dspy.primitives.example import Example +from dspy.signatures.signature import Signature def passages_to_text(passages: t.Iterable[str]) -> str: assert len(passages) > 0 if len(passages) > 1: return "\n".join( - [f"[{idx + 1}] <<{text}>>" for idx, text in enumerate(passages)] + [f"[{idx + 1}] <<{text}>>" for idx, text in enumerate(passages)], ) else: return passages[0] @@ -68,7 +70,7 @@ def query(self, example: Example, is_demo: bool) -> str: format_handler = self._get_format_handler(name) result.append( - f"{field.json_schema_extra['prefix']} {format_handler(example[name])}" + f"{field.json_schema_extra['prefix']} {format_handler(example[name])}", ) for name, field in self.signature.output_fields.items(): @@ -79,7 +81,7 @@ def query(self, example: Example, is_demo: bool) -> str: break elif name in example: result.append( - f"{field.json_schema_extra['prefix']} {format_handler(example[name])}" + f"{field.json_schema_extra['prefix']} {format_handler(example[name])}", ) return "\n\n".join(result) @@ -97,7 +99,7 @@ def guidelines(self, is_json: bool = False) -> str: field_strings.append(f"{name}: {field.json_schema_extra['desc']}") else: field_strings.append( - f"{field.json_schema_extra['prefix']} {field.json_schema_extra['desc']}" + f"{field.json_schema_extra['prefix']} {field.json_schema_extra['desc']}", ) return result + "\n\n".join(field_strings) @@ -173,7 +175,7 @@ def extract(self, example: Example, raw_pred: str) -> Example: return example def __call__( - self, example: Example, show_guidelines: bool = True, is_json: bool = False + self, example: Example, show_guidelines: bool = True, is_json: bool = False, ) -> str: prompt_spans = [] diff --git a/dspy/teleprompt/bootstrap.py b/dspy/teleprompt/bootstrap.py index 40eca81601..b2735664ea 100644 --- a/dspy/teleprompt/bootstrap.py +++ b/dspy/teleprompt/bootstrap.py @@ -85,7 +85,7 @@ def _prepare_student_and_teacher(self, student, teacher): ): teleprompter = LabeledFewShot(k=self.max_labeled_demos) self.teacher = teleprompter.compile( - self.teacher.reset_copy(), trainset=self.trainset + self.teacher.reset_copy(), trainset=self.trainset, ) def _prepare_predictor_mappings(self): @@ -98,7 +98,7 @@ def _prepare_predictor_mappings(self): ), "Student and teacher must have the same number of predictors." for (name1, predictor1), (name2, predictor2) in zip( - student.named_predictors(), teacher.named_predictors() + student.named_predictors(), teacher.named_predictors(), ): assert ( name1 == name2 @@ -107,7 +107,7 @@ def _prepare_predictor_mappings(self): predictor2.signature ,), f"Student and teacher must have the same signatures. {type(predictor1.signature)} != {type(predictor2.signature)}" assert id(predictor1) != id( - predictor2 + predictor2, ), "Student and teacher must be different objects." name2predictor[name1] = None # dict(student=predictor1, teacher=predictor2) @@ -205,7 +205,7 @@ def _bootstrap_one_example(self, example, round_idx=0): if "dspy_uuid" in example: demo = Example( - augmented=True, dspy_uuid=example.dspy_uuid, **inputs, **outputs + augmented=True, dspy_uuid=example.dspy_uuid, **inputs, **outputs, ) else: # TODO: FIXME: This is a hack. RandomSearch will complain for now in this edge case. @@ -241,7 +241,7 @@ def _train(self): augmented_demos = self.name2traces[name][: self.max_bootstrapped_demos] sample_size = min( - self.max_labeled_demos - len(augmented_demos), len(raw_demos) + self.max_labeled_demos - len(augmented_demos), len(raw_demos), ) sample_size = max(0, sample_size) diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index eceac84ce5..e95d400dd2 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -37,10 +37,10 @@ class BasicGenerateInstruction(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" basic_instruction = dspy.InputField( - desc="The initial instructions before optimization" + desc="The initial instructions before optimization", ) proposed_instruction = dspy.OutputField( - desc="The improved instructions for the language model" + desc="The improved instructions for the language model", ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", @@ -55,7 +55,7 @@ class GenerateInstructionGivenAttempts(dspy.Signature): attempted_instructions = dspy.InputField(format=dsp.passages2text) proposed_instruction = dspy.OutputField( - desc="The improved instructions for the language model" + desc="The improved instructions for the language model", ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", @@ -85,7 +85,7 @@ def __init__( def _check_candidates_equal(self, candidate1, candidate2): for p1, p2 in zip( - candidate1["program"].predictors(), candidate2["program"].predictors() + candidate1["program"].predictors(), candidate2["program"].predictors(), ): if ( self._get_signature(p1).instructions @@ -123,7 +123,7 @@ def _print_signature(self, predictor): signature = self._get_signature(predictor) print(f"i: {signature.instructions}") print( - f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}" + f"p: {list(signature.fields.values())[-1].json_schema_extra['prefix']}", ) print() @@ -204,7 +204,7 @@ def compile(self, student, *, trainset, eval_kwargs): # Go through our module's predictors for p_i, (p_old, p_new) in enumerate( - zip(module.predictors(), module_clone.predictors()) + zip(module.predictors(), module_clone.predictors()), ): candidates_ = latest_candidates[ id(p_old) @@ -244,7 +244,7 @@ def compile(self, student, *, trainset, eval_kwargs): score = evaluate(module_clone, devset=trainset, **eval_kwargs) if self.verbose and self.prompt_model: print( - f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}" + f"prompt_model.inspect_history(n=1) {self.prompt_model.inspect_history(n=1)}", ) total_calls += 1 if self.verbose: @@ -281,7 +281,7 @@ def compile(self, student, *, trainset, eval_kwargs): results_latest[id(p_old)]["depth"].append(d) results_latest[id(p_old)]["max"].append(max(latest_scores)) results_latest[id(p_old)]["average"].append( - sum(latest_scores) / len(latest_scores) + sum(latest_scores) / len(latest_scores), ) results_latest[id(p_old)]["min"].append(min(latest_scores)) results_latest[id(p_old)]["std"].append(np.std(latest_scores)) @@ -329,7 +329,7 @@ def compile(self, student, *, trainset, eval_kwargs): results_best[id(p_base)]["depth"].append(d) results_best[id(p_base)]["max"].append(max(scores)) results_best[id(p_base)]["average"].append( - sum(scores) / len(scores) + sum(scores) / len(scores), ) results_best[id(p_base)]["min"].append(min(scores)) results_best[id(p_base)]["std"].append(np.std(scores)) @@ -337,13 +337,13 @@ def compile(self, student, *, trainset, eval_kwargs): for i in range(shortest_len - 1, -1, -1): # breakpoint() attempts.append( - f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}' + f'Instruction #{shortest_len-i}: {best_predictors[i]["instruction"]}', ) attempts.append( - f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}' + f'Prefix #{shortest_len-i}: {best_predictors[i]["prefix"]}', ) attempts.append( - f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}' + f'Resulting Score #{shortest_len-i}: {best_predictors[i]["score"]}', ) # Generate next batch of potential prompts to optimize, with previous attempts as input @@ -366,7 +366,7 @@ def compile(self, student, *, trainset, eval_kwargs): # Get candidates for each predictor new_candidates[id(p_base)] = instr.completions all_candidates[id(p_base)].completions.extend_examples( - instr.completions + instr.completions, ) if self.verbose and self.prompt_model: diff --git a/dspy/teleprompt/mipro_optimizer.py b/dspy/teleprompt/mipro_optimizer.py index 808c9f7103..c6938302ee 100644 --- a/dspy/teleprompt/mipro_optimizer.py +++ b/dspy/teleprompt/mipro_optimizer.py @@ -49,10 +49,10 @@ class BasicGenerateInstruction(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" basic_instruction = dspy.InputField( - desc="The initial instructions before optimization" + desc="The initial instructions before optimization", ) proposed_instruction = dspy.OutputField( - desc="The improved instructions for the language model" + desc="The improved instructions for the language model", ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", @@ -63,11 +63,11 @@ class BasicGenerateInstructionWithDataObservations(Signature): """You are an instruction optimizer for large language models. I will give you a ``signature`` of fields (inputs and outputs) in English. I will also give you some ``observations`` I have made about the dataset and task. Your task is to propose an instruction that will lead a good language model to perform the task well. Don't be afraid to be creative.""" basic_instruction = dspy.InputField( - desc="The initial instructions before optimization" + desc="The initial instructions before optimization", ) observations = dspy.InputField(desc="Observations about the dataset and task") proposed_instruction = dspy.OutputField( - desc="The improved instructions for the language model" + desc="The improved instructions for the language model", ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", @@ -82,12 +82,12 @@ class BasicGenerateInstructionWithExamples(dspy.Signature): # attempted_instructions = dspy.InputField(format=str, desc="Previously attempted task instructions, along with their resulting validation score, and an example of the instruction in use on a sample from our dataset.") basic_instruction = dspy.InputField( - desc="The initial instructions before optimization" + desc="The initial instructions before optimization", ) # examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") proposed_instruction = dspy.OutputField( - desc="The improved instructions for the language model" + desc="The improved instructions for the language model", ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", @@ -103,10 +103,10 @@ class BasicGenerateInstructionWithExamplesAndDataObservations(dspy.Signature): observations = dspy.InputField(desc="Observations about the dataset and task") examples = dspy.InputField(format=dsp.passages2text, desc="Example(s) of the task") basic_instruction = dspy.InputField( - desc="The initial instructions before optimization" + desc="The initial instructions before optimization", ) proposed_instruction = dspy.OutputField( - desc="The improved instructions for the language model" + desc="The improved instructions for the language model", ) proposed_prefix_for_output_field = dspy.OutputField( desc="The string at the end of the prompt, which will help the model start solving the task", @@ -131,7 +131,7 @@ class DatasetDescriptor(dspy.Signature): examples = dspy.InputField(desc="Sample data points from the dataset") observations = dspy.OutputField( - desc="Somethings that holds true for most or all of the data you observed" + desc="Somethings that holds true for most or all of the data you observed", ) @@ -145,7 +145,7 @@ class DatasetDescriptorWithPriorObservations(dspy.Signature): examples = dspy.InputField(desc="Sample data points from the dataset") prior_observations = dspy.InputField( - desc="Some prior observations I made about the data" + desc="Some prior observations I made about the data", ) observations = dspy.OutputField( desc="Somethings that holds true for most or all of the data you observed or COMPLETE if you have nothing to add", @@ -197,18 +197,18 @@ def _print_model_history(self, model, n=1): def _observe_data(self, trainset, max_iterations=10): upper_lim = min(len(trainset), self.view_data_batch_size) observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)( - examples=(trainset[0:upper_lim].__repr__()) + examples=(trainset[0:upper_lim].__repr__()), ) observations = observation["observations"] skips = 0 iterations = 0 for b in range( - self.view_data_batch_size, len(trainset), self.view_data_batch_size + self.view_data_batch_size, len(trainset), self.view_data_batch_size, ): upper_lim = min(len(trainset), b + self.view_data_batch_size) output = dspy.Predict( - DatasetDescriptorWithPriorObservations, n=1, temperature=1.0 + DatasetDescriptorWithPriorObservations, n=1, temperature=1.0, )( prior_observations=observations, examples=(trainset[b:upper_lim].__repr__()), @@ -227,7 +227,7 @@ def _observe_data(self, trainset, max_iterations=10): observations += output["observations"] summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)( - observations=observations + observations=observations, ) return summary.summary @@ -302,13 +302,13 @@ def _generate_first_N_candidates( # noqa: N802 if example_set_i not in example_set: example_set[example_set_i] = [] fields_to_use = signature_to_template( - predictor.signature + predictor.signature, ).fields _input_variable_names = list( - self._get_signature(predictor).input_fields.keys() + self._get_signature(predictor).input_fields.keys(), ) example_string = self._create_example_string( - fields_to_use, example + fields_to_use, example, ) example_set[example_set_i].append(example_string) example_sets[id(predictor)] = example_set @@ -343,7 +343,7 @@ def _generate_first_N_candidates( # noqa: N802 instruct = new_instruct else: instruct.completions.extend_examples( - new_instruct.completions.examples + new_instruct.completions.examples, ) # Just data elif view_data: @@ -359,7 +359,7 @@ def _generate_first_N_candidates( # noqa: N802 elif view_examples: instruct = None for i in range( - 1, self.num_candidates + 1, self.num_candidates, ): # Note: skip over the first example set which is empty new_instruct = dspy.Predict( BasicGenerateInstructionWithExamples, @@ -450,7 +450,7 @@ def compile( and prompt models you intend to use. If the projected costs exceed your budget or expectations, you may consider: {YELLOW}- Reducing the number of trials (`num_trials`), the size of the trainset, or the number of LM calls in your program.{ENDC} - {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""" + {YELLOW}- Using a cheaper task model to optimize the prompt.{ENDC}""", ) user_confirmation_message = textwrap.dedent( @@ -460,7 +460,7 @@ def compile( If you would like to bypass this confirmation step in future executions, set the {YELLOW}`requires_permission_to_run`{ENDC} flag to {YELLOW}`False`.{ENDC} {YELLOW}Awaiting your input...{ENDC} - """ + """, ) print(user_message) @@ -515,12 +515,12 @@ def compile( teacher_settings=self.teacher_settings, ) candidate_program = tp.compile( - student=module.deepcopy(), trainset=shuffled_trainset + student=module.deepcopy(), trainset=shuffled_trainset, ) # Store the candidate demos for module_p, candidate_p in zip( - module.predictors(), candidate_program.predictors() + module.predictors(), candidate_program.predictors(), ): if id(module_p) not in demo_candidates: demo_candidates[id(module_p)] = [] @@ -564,7 +564,7 @@ def objective(trial): trial_logs[trial_num] = {} for p_old, p_new in zip( - baseline_program.predictors(), candidate_program.predictors() + baseline_program.predictors(), candidate_program.predictors(), ): # Get instruction candidates for our given predictor p_instruction_candidates = instruction_candidates[id(p_old)] @@ -596,7 +596,7 @@ def objective(trial): ) selected_prefix = ( selected_candidate.proposed_prefix_for_output_field.strip( - '"' + '"', ).strip() ) @@ -633,14 +633,14 @@ def objective(trial): end_index = min((i + 1) * batch_size, len(trainset)) split_trainset = trainset[start_index:end_index] split_score = evaluate( - candidate_program, devset=split_trainset, display_table=0 + candidate_program, devset=split_trainset, display_table=0, ) if self.verbose: print(f"{i}st split score: {split_score}") total_score += split_score * len(split_trainset) curr_weighted_avg_score = total_score / min( - (i + 1) * 100, len(trainset) + (i + 1) * 100, len(trainset), ) if self.verbose: print(f"curr average score: {curr_weighted_avg_score}") @@ -677,7 +677,7 @@ def objective(trial): # Run the trial objective_function = create_objective( - module, instruction_candidates, demo_candidates, evaluate, trainset + module, instruction_candidates, demo_candidates, evaluate, trainset, ) sampler = optuna.samplers.TPESampler(seed=seed) study = optuna.create_study(direction="maximize", sampler=sampler) diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index dc14bb57bc..696f5c1631 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -1,11 +1,6 @@ + + from .copro_optimizer import COPRO -import dsp -import dspy -from dspy.teleprompt.teleprompt import Teleprompter -from dspy.signatures import Signature -from dspy.evaluate.evaluate import Evaluate -from collections import defaultdict -from dspy.primitives.example import Example """ =============================================================== diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index 0d5e232ef0..ea80df36ad 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -1,26 +1,25 @@ import random import re +import typing as t from typing import Union import numpy as np from dsp.modules import LM from dsp.utils.utils import dotdict - -import typing as t from dspy.backends.lm.base import BaseLM, GeneratedContent from dspy.primitives.example import Example from dspy.primitives.prediction import ( Completions, ) -from dspy.signatures.signature import Signature, InputField, OutputField +from dspy.signatures.signature import InputField, OutputField, Signature class DummyLM(LM): """Dummy language model for unit testing purposes.""" def __init__( - self, answers: Union[list[str], dict[str, str]], follow_examples: bool = False + self, answers: Union[list[str], dict[str, str]], follow_examples: bool = False, ): """Initializes the dummy language model. Parameters: @@ -51,7 +50,7 @@ def basic_request(self, prompt, n=1, **kwargs) -> dict[str, list[dict[str, str]] # the "Follow the following format" section. answer = possible_answers[-1] print( - f"DummyLM got found previous example for {prefix} with value {answer=}" + f"DummyLM got found previous example for {prefix} with value {answer=}", ) else: print(f"DummyLM couldn't find previous example for {prefix=}") @@ -59,7 +58,7 @@ def basic_request(self, prompt, n=1, **kwargs) -> dict[str, list[dict[str, str]] if answer is None: if isinstance(self.answers, dict): answer = next( - (v for k, v in self.answers.items() if k in prompt), None + (v for k, v in self.answers.items() if k in prompt), None, ) else: if len(self.answers) > 0: diff --git a/dspy/utils/testing.py b/dspy/utils/testing.py index eb642dfb74..ec29d74491 100644 --- a/dspy/utils/testing.py +++ b/dspy/utils/testing.py @@ -1,6 +1,7 @@ -import dspy import decorator +import dspy + def clean_up_lm_test(func): def wrapper(func, *args, **kwargs): From 627391fbc4ce59617b834b6caf0b3fc5b69a39e5 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 11:14:20 -0400 Subject: [PATCH 233/243] fix: remove typing.Self --- dspy/primitives/prediction.py | 2 +- poetry.lock | 45 ++++++++++++++++++----------------- pyproject.toml | 1 + 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/dspy/primitives/prediction.py b/dspy/primitives/prediction.py index 811d586c26..f81139bb76 100644 --- a/dspy/primitives/prediction.py +++ b/dspy/primitives/prediction.py @@ -158,7 +158,7 @@ def get_majority( self, field: t.Optional[str] = None, normalize: t.Callable[[str], t.Optional[str]] = default_normalize, - ) -> t.Self: + ): if normalize is None: normalize = lambda x: x diff --git a/poetry.lock b/poetry.lock index 988b324884..037b1dd0a2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "aiohttp" @@ -916,7 +916,7 @@ files = [ name = "decorator" version = "5.1.1" description = "Decorators for Humans" -optional = true +optional = false python-versions = ">=3.5" files = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, @@ -959,7 +959,7 @@ profile = ["gprof2dot (>=2022.7.29)"] name = "distro" version = "1.9.0" description = "Distro - an OS platform information API" -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, @@ -2942,6 +2942,7 @@ description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ + {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_aarch64.whl", hash = "sha256:75d6498c96d9adb9435f2bbdbddb479805ddfb97b5c1b32395c694185c20ca57"}, {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c6428836d20fe7e327191c175791d38570e10762edc588fb46749217cd444c74"}, {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-win_amd64.whl", hash = "sha256:991905ffa2144cb603d8ca7962d75c35334ae82bf92820b6ba78157277da1ad2"}, ] @@ -4488,28 +4489,28 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.3.1" +version = "0.3.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.3.1-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:6b82e3937d0d76554cd5796bc3342a7d40de44494d29ff490022d7a52c501744"}, - {file = "ruff-0.3.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ae7954c8f692b70e6a206087ae3988acc9295d84c550f8d90b66c62424c16771"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b730f56ccf91225da0f06cfe421e83b8cc27b2a79393db9c3df02ed7e2bbc01"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c78bfa85637668f47bd82aa2ae17de2b34221ac23fea30926f6409f9e37fc927"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6abaad602d6e6daaec444cbf4d9364df0a783e49604c21499f75bb92237d4af"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5f0c21b6914c3c9a25a59497cbb1e5b6c2d8d9beecc9b8e03ee986e24eee072e"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:434c3fc72e6311c85cd143c4c448b0e60e025a9ac1781e63ba222579a8c29200"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78a7025e6312cbba496341da5062e7cdd47d95f45c1b903e635cdeb1ba5ec2b9"}, - {file = "ruff-0.3.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52b02bb46f1a79b0c1fa93f6495bc7e77e4ef76e6c28995b4974a20ed09c0833"}, - {file = "ruff-0.3.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:11b5699c42f7d0b771c633d620f2cb22e727fb226273aba775a91784a9ed856c"}, - {file = "ruff-0.3.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:54e5dca3e411772b51194b3102b5f23b36961e8ede463776b289b78180df71a0"}, - {file = "ruff-0.3.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:951efb610c5844e668bbec4f71cf704f8645cf3106e13f283413969527ebfded"}, - {file = "ruff-0.3.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:09c7333b25e983aabcf6e38445252cff0b4745420fc3bda45b8fce791cc7e9ce"}, - {file = "ruff-0.3.1-py3-none-win32.whl", hash = "sha256:d937f9b99ebf346e0606c3faf43c1e297a62ad221d87ef682b5bdebe199e01f6"}, - {file = "ruff-0.3.1-py3-none-win_amd64.whl", hash = "sha256:c0318a512edc9f4e010bbaab588b5294e78c5cdc9b02c3d8ab2d77c7ae1903e3"}, - {file = "ruff-0.3.1-py3-none-win_arm64.whl", hash = "sha256:d3b60e44240f7e903e6dbae3139a65032ea4c6f2ad99b6265534ff1b83c20afa"}, - {file = "ruff-0.3.1.tar.gz", hash = "sha256:d30db97141fc2134299e6e983a6727922c9e03c031ae4883a6d69461de722ae7"}, + {file = "ruff-0.3.3-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:973a0e388b7bc2e9148c7f9be8b8c6ae7471b9be37e1cc732f8f44a6f6d7720d"}, + {file = "ruff-0.3.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfa60d23269d6e2031129b053fdb4e5a7b0637fc6c9c0586737b962b2f834493"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eca7ff7a47043cf6ce5c7f45f603b09121a7cc047447744b029d1b719278eb5"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7d3f6762217c1da954de24b4a1a70515630d29f71e268ec5000afe81377642d"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b24c19e8598916d9c6f5a5437671f55ee93c212a2c4c569605dc3842b6820386"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5a6cbf216b69c7090f0fe4669501a27326c34e119068c1494f35aaf4cc683778"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352e95ead6964974b234e16ba8a66dad102ec7bf8ac064a23f95371d8b198aab"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d6ab88c81c4040a817aa432484e838aaddf8bfd7ca70e4e615482757acb64f8"}, + {file = "ruff-0.3.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79bca3a03a759cc773fca69e0bdeac8abd1c13c31b798d5bb3c9da4a03144a9f"}, + {file = "ruff-0.3.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2700a804d5336bcffe063fd789ca2c7b02b552d2e323a336700abb8ae9e6a3f8"}, + {file = "ruff-0.3.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd66469f1a18fdb9d32e22b79f486223052ddf057dc56dea0caaf1a47bdfaf4e"}, + {file = "ruff-0.3.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45817af234605525cdf6317005923bf532514e1ea3d9270acf61ca2440691376"}, + {file = "ruff-0.3.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:0da458989ce0159555ef224d5b7c24d3d2e4bf4c300b85467b08c3261c6bc6a8"}, + {file = "ruff-0.3.3-py3-none-win32.whl", hash = "sha256:f2831ec6a580a97f1ea82ea1eda0401c3cdf512cf2045fa3c85e8ef109e87de0"}, + {file = "ruff-0.3.3-py3-none-win_amd64.whl", hash = "sha256:be90bcae57c24d9f9d023b12d627e958eb55f595428bafcb7fec0791ad25ddfc"}, + {file = "ruff-0.3.3-py3-none-win_arm64.whl", hash = "sha256:0171aab5fecdc54383993389710a3d1227f2da124d76a2784a7098e818f92d61"}, + {file = "ruff-0.3.3.tar.gz", hash = "sha256:38671be06f57a2f8aba957d9f701ea889aa5736be806f18c0cd03d6ff0cbca8d"}, ] [[package]] @@ -6360,4 +6361,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "6e26821e00623255ff2a819bb045f4b8834f4d08f290f8256731319cf2fc89bc" +content-hash = "d639d5ee9ac75528c0f0815ea65b0f2c6c6390221e489d047dd36cc9d6631b00" diff --git a/pyproject.toml b/pyproject.toml index 85a415c364..a7d4b456b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,6 +119,7 @@ torch = "^2.2.1" pytest-mock = "^3.12.0" ruff = "^0.3.0" black = "^24.2.0" +decorator = "^5.1.1" [tool.poetry.extras] chromadb = ["chromadb"] qdrant = ["qdrant-client", "fastembed"] From 873e779e562f1bc5e9a902ff7cccf6ae34a9d2f1 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 11:47:37 -0400 Subject: [PATCH 234/243] chore: clean up tests/predict --- tests/predict/test_chain_of_thought.py | 62 +++---- .../test_chain_of_thought_with_hint.py | 102 ++++++----- tests/predict/test_multi_chain_comparison.py | 20 +-- tests/predict/test_predict.py | 158 ++++++++-------- tests/predict/test_program_of_thought.py | 129 +++++++++++++- tests/predict/test_react.py | 168 +++++++++++------- tests/predict/test_retry.py | 139 ++++++++++----- 7 files changed, 480 insertions(+), 298 deletions(-) diff --git a/tests/predict/test_chain_of_thought.py b/tests/predict/test_chain_of_thought.py index 10ebe6995f..a115e36fd9 100644 --- a/tests/predict/test_chain_of_thought.py +++ b/tests/predict/test_chain_of_thought.py @@ -9,44 +9,46 @@ def test_initialization_with_string_signature(): dspy.settings.configure(experimental=False) lm = DummyLM(["find the number after 1", "2"]) - dspy.settings.configure(lm=lm) - predict = ChainOfThought("question -> answer") - assert list(predict.extended_signature.output_fields.keys()) == [ - "rationale", - "answer", - ] - assert predict(question="What is 1+1?").answer == "2" + with dspy.settings.context(lm=lm, backend=None): - print(lm.get_convo(-1)) - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields `question`, produce the fields `answer`. + predict = ChainOfThought("question -> answer") + assert list(predict.extended_signature.output_fields.keys()) == [ + "rationale", + "answer", + ] + assert predict(question="What is 1+1?").answer == "2" - --- + print(lm.get_convo(-1)) + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `question`, produce the fields `answer`. - Follow the following format. + --- - Question: ${question} - Reasoning: Let's think step by step in order to ${produce the answer}. We ... - Answer: ${answer} + Follow the following format. - --- + Question: ${question} + Reasoning: Let's think step by step in order to ${produce the answer}. We ... + Answer: ${answer} - Question: What is 1+1? - Reasoning: Let's think step by step in order to find the number after 1 - Answer: 2""" - ) + --- + + Question: What is 1+1? + Reasoning: Let's think step by step in order to find the number after 1 + Answer: 2""" + ) def test_initialization_with_string_signature_experimental(): lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, lm=None, cache=False) - predict = ChainOfThought("question -> answer") - assert list(predict.extended_signature.output_fields.keys()) == [ - "rationale", - "answer", - ] - output = predict(question="What is 1+1?") - assert output.answer == "2" - assert output.rationale == "find the number after 1" + with dspy.settings.context(backend=backend, lm=None, cache=False): + + predict = ChainOfThought("question -> answer") + assert list(predict.extended_signature.output_fields.keys()) == [ + "rationale", + "answer", + ] + output = predict(question="What is 1+1?") + assert output.answer == "2" + assert output.rationale == "find the number after 1" diff --git a/tests/predict/test_chain_of_thought_with_hint.py b/tests/predict/test_chain_of_thought_with_hint.py index ce77217a42..29961ce5df 100644 --- a/tests/predict/test_chain_of_thought_with_hint.py +++ b/tests/predict/test_chain_of_thought_with_hint.py @@ -1,75 +1,73 @@ import dspy from dspy import ChainOfThoughtWithHint -from dspy.utils import DummyLanguageModel, DummyLM, clean_up_lm_test +from dspy.utils import DummyLanguageModel, DummyLM from dspy.backends import TemplateBackend -@clean_up_lm_test def test_cot_with_no_hint(): lm = DummyLM(["find the number after 1", "2"]) - dspy.settings.configure(lm=lm) - predict = ChainOfThoughtWithHint("question -> answer") - # Check output fields have the right order - assert list(predict.extended_signature2.output_fields.keys()) == [ - "rationale", - "hint", - "answer", - ] - assert predict(question="What is 1+1?").answer == "2" + with dspy.settings.context(lm=lm, backend=None): - final_convo = lm.get_convo(-1) - assert final_convo.endswith( - "Question: What is 1+1?\n" - "Reasoning: Let's think step by step in order to find the number after 1\n" - "Answer: 2" - ) + predict = ChainOfThoughtWithHint("question -> answer") + # Check output fields have the right order + assert list(predict.extended_signature2.output_fields.keys()) == [ + "rationale", + "hint", + "answer", + ] + assert predict(question="What is 1+1?").answer == "2" + + final_convo = lm.get_convo(-1) + assert final_convo.endswith( + "Question: What is 1+1?\n" + "Reasoning: Let's think step by step in order to find the number after 1\n" + "Answer: 2" + ) -@clean_up_lm_test def test_cot_with_hint(): lm = DummyLM(["find the number after 1", "2"]) - dspy.settings.configure(lm=lm) - predict = ChainOfThoughtWithHint("question -> answer") - assert list(predict.extended_signature2.output_fields.keys()) == [ - "rationale", - "hint", - "answer", - ] - assert predict(question="What is 1+1?", hint="think small").answer == "2" + with dspy.settings.context(lm=lm, backend=None): + + predict = ChainOfThoughtWithHint("question -> answer") + assert list(predict.extended_signature2.output_fields.keys()) == [ + "rationale", + "hint", + "answer", + ] + assert predict(question="What is 1+1?", hint="think small").answer == "2" - final_convo = lm.get_convo(-1) - assert final_convo.endswith( - "Question: What is 1+1?\n\n" - "Reasoning: Let's think step by step in order to find the number after 1\n\n" - "Hint: think small\n\n" - "Answer: 2" - ) + final_convo = lm.get_convo(-1) + assert final_convo.endswith( + "Question: What is 1+1?\n\n" + "Reasoning: Let's think step by step in order to find the number after 1\n\n" + "Hint: think small\n\n" + "Answer: 2" + ) -@clean_up_lm_test def test_cot_with_no_hint_with_backend(): lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) - predict = ChainOfThoughtWithHint("question -> answer") - # Check output fields have the right order - assert list(predict.extended_signature2.output_fields.keys()) == [ - "rationale", - "hint", - "answer", - ] - assert predict(question="What is 1+1?").answer == "2" + with dspy.settings.context(lm=None, backend=backend): + predict = ChainOfThoughtWithHint("question -> answer") + # Check output fields have the right order + assert list(predict.extended_signature2.output_fields.keys()) == [ + "rationale", + "hint", + "answer", + ] + assert predict(question="What is 1+1?").answer == "2" -@clean_up_lm_test def test_cot_with_hint_with_backend(): lm = DummyLanguageModel(answers=[["find the number after 1\n\nAnswer: 2"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) - predict = ChainOfThoughtWithHint("question -> answer") - assert list(predict.extended_signature2.output_fields.keys()) == [ - "rationale", - "hint", - "answer", - ] - assert predict(question="What is 1+1?", hint="think small").answer == "2" + with dspy.settings.context(backend=backend, lm=None): + predict = ChainOfThoughtWithHint("question -> answer") + assert list(predict.extended_signature2.output_fields.keys()) == [ + "rationale", + "hint", + "answer", + ] + assert predict(question="What is 1+1?", hint="think small").answer == "2" diff --git a/tests/predict/test_multi_chain_comparison.py b/tests/predict/test_multi_chain_comparison.py index 9f6fab863f..8ed9ecb120 100644 --- a/tests/predict/test_multi_chain_comparison.py +++ b/tests/predict/test_multi_chain_comparison.py @@ -1,9 +1,8 @@ import dspy -from dspy.utils import DummyLM, DummyLanguageModel, clean_up_lm_test +from dspy.utils import DummyLM, DummyLanguageModel from dspy.backends import TemplateBackend -@clean_up_lm_test def test_basic_example(): class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" @@ -33,14 +32,13 @@ class BasicQA(dspy.Signature): # Call the MultiChainComparison on the completions question = "What is the color of the sky?" lm = DummyLM(["my rationale", "blue"]) - dspy.settings.configure(lm=lm) - final_pred = compare_answers(completions, question=question) + with dspy.settings.context(lm=lm, backend=None): + final_pred = compare_answers(completions, question=question) - assert final_pred.rationale == "my rationale" - assert final_pred.answer == "blue" + assert final_pred.rationale == "my rationale" + assert final_pred.answer == "blue" -@clean_up_lm_test def test_basic_example_with_backend(): class BasicQA(dspy.Signature): """Answer questions with short factoid answers.""" @@ -71,8 +69,8 @@ class BasicQA(dspy.Signature): question = "What is the color of the sky?" lm = DummyLanguageModel(answers=[["my rationale\n\nAnswer: blue"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - final_pred = compare_answers(completions, question=question) + with dspy.settings.context(backend=backend, cache=False, lm=None): + final_pred = compare_answers(completions, question=question) - assert final_pred.rationale == "my rationale" - assert final_pred.answer == "blue" + assert final_pred.rationale == "my rationale" + assert final_pred.answer == "blue" diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py index 261573398d..d1f2bac28c 100644 --- a/tests/predict/test_predict.py +++ b/tests/predict/test_predict.py @@ -3,7 +3,7 @@ from dspy import Predict, Signature from dspy.backends.json import JSONBackend from dspy.backends import TemplateBackend -from dspy.utils import DummyLM, DummyLanguageModel, clean_up_lm_test +from dspy.utils import DummyLM, DummyLanguageModel import copy import textwrap @@ -18,7 +18,6 @@ def test_initialization_with_string_signature(): assert predict.signature.instructions == Signature(signature_string).instructions -@clean_up_lm_test def test_reset_method(): predict_instance = Predict("input -> output") predict_instance.lm = "modified" @@ -33,7 +32,6 @@ def test_reset_method(): assert predict_instance.demos == [] -@clean_up_lm_test def test_dump_and_load_state(): predict_instance = Predict("input -> output") predict_instance.lm = "lm_state" @@ -43,35 +41,32 @@ def test_dump_and_load_state(): assert new_instance.lm == "lm_state" -@clean_up_lm_test def test_call_method(): predict_instance = Predict("input -> output") lm = DummyLM(["test output"]) - dspy.settings.configure(lm=lm) - result = predict_instance(input="test input") - assert result.output == "test output" - assert lm.get_convo(-1) == ( - "Given the fields `input`, produce the fields `output`.\n" - "\n---\n\n" - "Follow the following format.\n\n" - "Input: ${input}\n" - "Output: ${output}\n" - "\n---\n\n" - "Input: test input\n" - "Output: test output" - ) + with dspy.settings.context(lm=lm): + result = predict_instance(input="test input") + assert result.output == "test output" + assert lm.get_convo(-1) == ( + "Given the fields `input`, produce the fields `output`.\n" + "\n---\n\n" + "Follow the following format.\n\n" + "Input: ${input}\n" + "Output: ${output}\n" + "\n---\n\n" + "Input: test input\n" + "Output: test output" + ) -@clean_up_lm_test def test_call_method_with_backend(): predict_instance = Predict("input -> output") lm = DummyLanguageModel(answers=[["test output"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, lm=None) - - result = predict_instance(input="test input") - assert result.output == "test output" + with dspy.settings.context(backend=backend, lm=None, cache=False): + result = predict_instance(input="test input") + assert result.output == "test output" def test_dump_load_state(): @@ -82,46 +77,41 @@ def test_dump_load_state(): assert new_instance.signature.instructions == "original instructions" -@clean_up_lm_test def test_forward_method(): program = Predict("question -> answer") - dspy.settings.configure(lm=DummyLM([]), backend=None) - result = program(question="What is 1+1?").answer - assert result == "No more responses" + with dspy.settings.context(lm=DummyLM([]), backend=None): + result = program(question="What is 1+1?").answer + assert result == "No more responses" -@clean_up_lm_test def test_forward_method_with_backend(): lm = DummyLanguageModel(answers=[["No more responses"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, lm=None) + with dspy.settings.context(backend=backend, lm=None): + program = Predict("question -> answer") + result = program(question="What is 1+1?").answer + assert result == "No more responses" - program = Predict("question -> answer") - result = program(question="What is 1+1?").answer - assert result == "No more responses" - -@clean_up_lm_test def test_forward_method2(): program = Predict("question -> answer1, answer2") - dspy.settings.configure(lm=DummyLM(["my first answer", "my second answer"])) - result = program(question="What is 1+1?") - assert result.answer1 == "my first answer" - assert result.answer2 == "my second answer" + with dspy.settings.context(lm=DummyLM(["my first answer", "my second answer"]), backend=None): + result = program(question="What is 1+1?") + assert result.answer1 == "my first answer" + assert result.answer2 == "my second answer" -@clean_up_lm_test def test_forward_method2_with_backend(): lm = DummyLanguageModel( answers=[[" my first answer\n\nAnswer 2: my second answer"]] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, lm=None) + with dspy.settings.context(backend=backend, lm=None): - program = Predict("question -> answer1, answer2") - result = program(question="What is 1+1?") - assert result.answer1 == "my first answer" - assert result.answer2 == "my second answer" + program = Predict("question -> answer1, answer2") + result = program(question="What is 1+1?") + assert result.answer1 == "my first answer" + assert result.answer2 == "my second answer" def test_config_management(): @@ -131,31 +121,26 @@ def test_config_management(): assert "new_key" in config and config["new_key"] == "value" -@clean_up_lm_test def test_multi_output(): program = Predict("question -> answer", n=2) - dspy.settings.configure( - lm=DummyLM(["my first answer", "my second answer"]), backend=None - ) - results = program(question="What is 1+1?") - assert results.completions[0].answer == "my first answer" - assert results.completions[1].answer == "my second answer" + lm = DummyLM(["my first answer", "my second answer"]) + with dspy.settings.context(lm=lm, backend=None): + results = program(question="What is 1+1?") + assert results.completions[0].answer == "my first answer" + assert results.completions[1].answer == "my second answer" -@clean_up_lm_test def test_multi_output_with_backend(): program = Predict("question -> answer", n=2) lm = DummyLanguageModel(answers=[["my first answer", "my second answer"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, lm=None) + with dspy.settings.context(backend=backend, lm=None): + results = program(question="What is 1+1?") + assert results.completions[0].answer == "my first answer" + assert results.completions[1].answer == "my second answer" - results = program(question="What is 1+1?") - assert results.completions[0].answer == "my first answer" - assert results.completions[1].answer == "my second answer" - -@clean_up_lm_test def test_multi_output_json_with_backend(): program = Predict("question -> answer", n=2) @@ -168,28 +153,26 @@ def test_multi_output_json_with_backend(): ] ) backend = JSONBackend(lm=lm) - dspy.settings.configure(backend=backend, lm=None) - - results = program(question="What is 1+1?") - assert results.completions[1].answer == "my second answer" + with dspy.settings.context(backend=backend, lm=None): + results = program(question="What is 1+1?") + assert results.completions[1].answer == "my second answer" -@clean_up_lm_test def test_multi_output2(): program = Predict("question -> answer1, answer2", n=2) - dspy.settings.configure( - lm=DummyLM( + lm=DummyLM( [ "my 0 answer\nAnswer 2: my 2 answer", "my 1 answer\nAnswer 2: my 3 answer", ], ) - ) - results = program(question="What is 1+1?") - assert results.completions[0].answer1 == "my 0 answer" - assert results.completions[1].answer1 == "my 1 answer" - assert results.completions[0].answer2 == "my 2 answer" - assert results.completions[1].answer2 == "my 3 answer" + + with dspy.settings.context(lm=lm, backend=None): + results = program(question="What is 1+1?") + assert results.completions[0].answer1 == "my 0 answer" + assert results.completions[1].answer1 == "my 1 answer" + assert results.completions[0].answer2 == "my 2 answer" + assert results.completions[1].answer2 == "my 3 answer" def test_named_predictors(): @@ -206,7 +189,6 @@ def __init__(self): assert program2.named_predictors() == [("inner", program2.inner)] -@clean_up_lm_test def test_output_only(): class OutputOnlySignature(dspy.Signature): output = dspy.OutputField() @@ -214,20 +196,20 @@ class OutputOnlySignature(dspy.Signature): predictor = Predict(OutputOnlySignature) lm = DummyLM(["short answer"]) - dspy.settings.configure(lm=lm) - assert predictor().output == "short answer" - - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields , produce the fields `output`. - - --- - - Follow the following format. - - Output: ${output} - - --- - - Output: short answer""" - ) + with dspy.settings.context(lm=lm, backend=None): + assert predictor().output == "short answer" + + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields , produce the fields `output`. + + --- + + Follow the following format. + + Output: ${output} + + --- + + Output: short answer""" + ) diff --git a/tests/predict/test_program_of_thought.py b/tests/predict/test_program_of_thought.py index 5feeb26b08..1e1d907101 100644 --- a/tests/predict/test_program_of_thought.py +++ b/tests/predict/test_program_of_thought.py @@ -10,9 +10,121 @@ class BasicQA(Signature): question = dspy.InputField() answer = dspy.OutputField(desc="often between 1 and 5 words") - def test_pot_code_generation(): pot = ProgramOfThought(BasicQA) + lm = DummyLM([ + "Reason_A", + "```python\nresult = 1+1\n```", + "Reason_B", + "2", + ]) + with dspy.settings.context(lm=lm, backend=None): + res = pot(question="What is 1+1?") + assert res.answer == "2" + assert lm.get_convo(index=-1) == textwrap.dedent("""\ + Given the final code `question`, `final_generated_code`, `code_output`, provide the final `answer`. + + --- + + Follow the following format. + + Question: ${question} + + Code: python code that answers the question + + Code Output: output of previously-generated python code + + Reasoning: Let's think step by step in order to ${produce the answer}. We ... + + Answer: often between 1 and 5 words + + --- + + Question: What is 1+1? + + Code: result = 1+1 + + Code Output: 2 + + Reasoning: Let's think step by step in order to Reason_B + + Answer: 2""") + +def test_pot_code_generation_with_error(): + pot = ProgramOfThought(BasicQA) + lm = DummyLM([ + "Reason_A", + "```python\nresult = 1+0/0\n```", + "Reason_B", # Error: division by zero + "```python\nresult = 1+1\n```", + "Reason_C", + "2", + ]) + with dspy.settings.context(lm=lm, backend=None): + res = pot(question="What is 1+1?") + assert res.answer == "2" + + # The first code example failed + assert lm.get_convo(index=2) == textwrap.dedent("""\ + You are given `question`, `previous_code`, `error` due to an error in previous code. + Your task is to correct the error and provide the new `generated_code`. + + --- + + Follow the following format. + + Question: ${question} + + Previous Code: previously-generated python code that errored + + Error: error message from previously-generated python code + + Reasoning: Let's think step by step in order to ${produce the generated_code}. We ... + + Code: python code that answers the question + + --- + + Question: What is 1+1? + + Previous Code: result = 1+0/0 + + Error: division by zero + + Reasoning: Let's think step by step in order to Reason_B""") + + # The second code example succeeded + assert lm.get_convo(-1) == textwrap.dedent("""\ + Given the final code `question`, `final_generated_code`, `code_output`, provide the final `answer`. + + --- + + Follow the following format. + + Question: ${question} + + Code: python code that answers the question + + Code Output: output of previously-generated python code + + Reasoning: Let's think step by step in order to ${produce the answer}. We ... + + Answer: often between 1 and 5 words + + --- + + Question: What is 1+1? + + Code: result = 1+1 + + Code Output: 2 + + Reasoning: Let's think step by step in order to Reason_C + + Answer: 2""") + +def test_pot_code_generation_with_backend(): + pot = ProgramOfThought(BasicQA) lm = DummyLanguageModel( answers=[ @@ -21,12 +133,12 @@ def test_pot_code_generation(): ] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - res = pot(question="What is 1+1?") - assert res.answer == "2" + with dspy.settings.context(backend=backend, lm=None, cache=False): + res = pot(question="What is 1+1?") + assert res.answer == "2" -def test_pot_code_generation_with_error(): +def test_pot_code_generation_with_error_with_backend(): pot = ProgramOfThought(BasicQA) lm = DummyLanguageModel( @@ -37,6 +149,7 @@ def test_pot_code_generation_with_error(): ] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - res = pot(question="What is 1+1?") - assert res.answer == "2" + with dspy.settings.context(backend=backend, lm=None, cache=False): + + res = pot(question="What is 1+1?") + assert res.answer == "2" diff --git a/tests/predict/test_react.py b/tests/predict/test_react.py index 4eb8ec0d69..1a46c21d9d 100644 --- a/tests/predict/test_react.py +++ b/tests/predict/test_react.py @@ -4,41 +4,111 @@ def test_example_no_tools(): + # Createa a simple dataset which the model will use with the Retrieve tool. + lm = dspy.utils.DummyLM( + [ + "Initial thoughts", # Thought_1 + "Finish[blue]", # Action_1 + ] + ) + with dspy.settings.context(lm=lm, rm=dummy_rm(), backend=None): + + program = dspy.ReAct("question -> answer") + + # Check default tools + assert isinstance(program.tools["Finish"], dspy.Example) + + # Call the ReAct module on a particular input + question = "What is the color of the sky?" + result = program(question=question) + assert result.answer == "blue" + + # For debugging + print("---") + for row in lm.history: + print(row["prompt"]) + print("Response:", row["response"]["choices"][0]["text"]) + print("---") + + assert lm.get_convo(-1).endswith( + "Question: What is the color of the sky?\n" + "Thought 1: Initial thoughts\n" + "Action 1: Finish[blue]" + ) + +def test_example_search(): + # Createa a simple dataset which the model will use with the Retrieve tool. + lm = dspy.utils.DummyLM( + [ + "Initial thoughts", # Thought_1 + "Search[the color of the sky]", # Thought_1 + "More thoughts", # Thought_2 + "Finish[blue]", # Action_2 + ] + ) + rm = dummy_rm( + [ + "We all know the color of the sky is blue.", + "Somethng about the sky colors", + "This sentence is completely irellevant to answer the question.", + "Let's add some more sentences to act as summy passages.", + "Let's add some more sentences to act as summy passages.", + "Let's add some more sentences to act as summy passages.", + ] + ) + with dspy.settings.context(lm=lm, rm=rm, backend=None): + + program = dspy.ReAct("question -> answer") + + # Check default tools + assert len(program.tools) == 2 + assert isinstance(program.tools["Search"], dspy.Retrieve) + assert isinstance(program.tools["Finish"], dspy.Example) + + # Call the ReAct module on a particular input + question = "What is the color of the sky?" + result = program(question=question) + assert result.answer == "blue" + + # For debugging + print(lm.get_convo(-1)) + + assert lm.get_convo(-1).endswith( + "Question: What is the color of the sky?\n\n" + "Thought 1: Initial thoughts\n\n" + "Action 1: Search[the color of the sky]\n\n" + "Observation 1:\n" + "[1] «We all know the color of the sky is blue.»\n" + "[2] «Somethng about the sky colors»\n" + "[3] «This sentence is completely irellevant to answer the question.»\n\n" + "Thought 2: More thoughts\n\n" + "Action 2: Finish[blue]" + ) + +def test_example_no_tools_with_backend(): # Createa a simple dataset which the model will use with the Retrieve tool. lm = DummyLanguageModel(answers=[["Initial thoughts\n\nAction 1: Finish[blue]"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - # lm = dspy.utils.DummyLM( - # [ - # "Initial thoughts", # Thought_1 - # "Finish[blue]", # Action_1 - # ] - # ) - # dspy.settings.configure(lm=lm, rm=dummy_rm()) + with dspy.settings.context(backend=backend, lm=None, cache=False): - program = dspy.ReAct("question -> answer") + program = dspy.ReAct("question -> answer") - # Check default tools - assert isinstance(program.tools["Finish"], dspy.Example) + # Check default tools + assert isinstance(program.tools["Finish"], dspy.Example) - # Call the ReAct module on a particular input - question = "What is the color of the sky?" - result = program(question=question) - assert result.answer == "blue" + # Call the ReAct module on a particular input + question = "What is the color of the sky?" + result = program(question=question) + assert result.answer == "blue" - # For debugging - print("---") - for event in backend.history: - print(event.prompt) + # For debugging + print("---") + for event in backend.history: + print(event.prompt) - # assert lm.get_convo(-1).endswith( - # "Question: What is the color of the sky?\n" - # "Thought 1: Initial thoughts\n" - # "Action 1: Finish[blue]" - # ) -def test_example_search(): +def test_example_search_with_backend(): # Createa a simple dataset which the model will use with the Retrieve tool. lm = DummyLanguageModel( answers=[ @@ -48,14 +118,6 @@ def test_example_search(): ] ) backend = TemplateBackend(lm=lm) - # lm = dspy.utils.DummyLM( - # [ - # "Initial thoughts", # Thought_1 - # "Search[the color of the sky]", # Thought_1 - # "More thoughts", # Thought_2 - # "Finish[blue]", # Action_2 - # ] - # ) rm = dummy_rm( [ "We all know the color of the sky is blue.", @@ -66,31 +128,15 @@ def test_example_search(): "Let's add some more sentences to act as summy passages.", ] ) - dspy.settings.configure(backend=backend, rm=rm, cache=False) - - program = dspy.ReAct("question -> answer") - - # Check default tools - assert len(program.tools) == 2 - assert isinstance(program.tools["Search"], dspy.Retrieve) - assert isinstance(program.tools["Finish"], dspy.Example) - - # Call the ReAct module on a particular input - question = "What is the color of the sky?" - result = program(question=question) - assert result.answer == "blue" - - # For debugging - # print(lm.get_convo(-1)) - # - # assert lm.get_convo(-1).endswith( - # "Question: What is the color of the sky?\n\n" - # "Thought 1: Initial thoughts\n\n" - # "Action 1: Search[the color of the sky]\n\n" - # "Observation 1:\n" - # "[1] «We all know the color of the sky is blue.»\n" - # "[2] «Somethng about the sky colors»\n" - # "[3] «This sentence is completely irellevant to answer the question.»\n\n" - # "Thought 2: More thoughts\n\n" - # "Action 2: Finish[blue]" - # ) + with dspy.settings.context(backend=backend, lm=None, rm=rm, cache=False): + program = dspy.ReAct("question -> answer") + + # Check default tools + assert len(program.tools) == 2 + assert isinstance(program.tools["Search"], dspy.Retrieve) + assert isinstance(program.tools["Finish"], dspy.Example) + + # Call the ReAct module on a particular input + question = "What is the color of the sky?" + result = program(question=question) + assert result.answer == "blue" diff --git a/tests/predict/test_retry.py b/tests/predict/test_retry.py index c9750befc4..dc98167bb8 100644 --- a/tests/predict/test_retry.py +++ b/tests/predict/test_retry.py @@ -1,10 +1,9 @@ import functools import dspy -from dspy.utils import DummyLanguageModel +from dspy.utils import DummyLanguageModel, DummyLM from dspy.backends import TemplateBackend from dspy.primitives.assertions import assert_transform_module, backtrack_handler - def test_retry_simple(): predict = dspy.Predict("question -> answer") retry_module = dspy.Retry(predict) @@ -14,57 +13,101 @@ def test_retry_simple(): assert f"past_{field}" in retry_module.new_signature.input_fields assert "feedback" in retry_module.new_signature.input_fields + lm = DummyLM(["blue"]) + with dspy.settings.context(lm=lm, backend=None): + result = retry_module.forward( + question="What color is the sky?", + past_outputs={"answer": "red"}, + feedback="Try harder", + ) + assert result.answer == "blue" + + print(lm.get_convo(-1)) + assert lm.get_convo(-1).endswith( + "Question: What color is the sky?\n\n" + "Past Answer: red\n\n" + "Instructions: Try harder\n\n" + "Answer: blue" + ) + +def test_retry_forward_with_feedback(): + # First we make a mistake, then we fix it + lm = DummyLM(["red", "blue"]) + with dspy.settings.context(lm=lm, backend=None, trace=[]): + + class SimpleModule(dspy.Module): + def __init__(self): + super().__init__() + self.predictor = dspy.Predict("question -> answer") + + def forward(self, **kwargs): + result = self.predictor(**kwargs) + print(f"SimpleModule got {result.answer=}") + dspy.Suggest(result.answer == "blue", "Please think harder") + return result + + program = SimpleModule() + program = assert_transform_module( + program.map_named_predictors(dspy.Retry), + functools.partial(backtrack_handler, max_backtracks=1), + ) + + result = program(question="What color is the sky?") + + assert result.answer == "blue" + + print(lm.get_convo(-1)) + assert lm.get_convo(-1).endswith( + "Question: What color is the sky?\n\n" + "Past Answer: red\n\n" + "Instructions: Please think harder\n\n" + "Answer: blue" + ) + +def test_retry_simple_with_backend(): + predict = dspy.Predict("question -> answer") + retry_module = dspy.Retry(predict) + + # Test Retry has created the correct new signature + for field in predict.signature.output_fields: + assert f"past_{field}" in retry_module.new_signature.input_fields + assert "feedback" in retry_module.new_signature.input_fields + lm = DummyLanguageModel(answers=[["blue"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - result = retry_module.forward( - question="What color is the sky?", - past_outputs={"answer": "red"}, - feedback="Try harder", - ) - - assert result.answer == "blue" + with dspy.settings.context(backend=backend, lm=None, cache=False): + result = retry_module.forward( + question="What color is the sky?", + past_outputs={"answer": "red"}, + feedback="Try harder", + ) - # print(lm.get_convo(-1)) - # assert lm.get_convo(-1).endswith( - # "Question: What color is the sky?\n\n" - # "Past Answer: red\n\n" - # "Instructions: Try harder\n\n" - # "Answer: blue" - # ) + assert result.answer == "blue" -def test_retry_forward_with_feedback(): +def test_retry_forward_with_feedback_with_backend(): # First we make a mistake, then we fix it lm = DummyLanguageModel(answers=[["red"], ["blue"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, trace=[], cache=False) - - class SimpleModule(dspy.Module): - def __init__(self): - super().__init__() - self.predictor = dspy.Predict("question -> answer") - - def forward(self, **kwargs): - result = self.predictor(**kwargs) - print(f"SimpleModule got {result.answer=}") - dspy.Suggest(result.answer == "blue", "Please think harder") - return result - - program = SimpleModule() - program = assert_transform_module( - program.map_named_predictors(dspy.Retry), - functools.partial(backtrack_handler, max_backtracks=1), - ) - - result = program(question="What color is the sky?") - - assert result.answer == "blue" - - # print(lm.get_convo(-1)) - # assert lm.get_convo(-1).endswith( - # "Question: What color is the sky?\n\n" - # "Past Answer: red\n\n" - # "Instructions: Please think harder\n\n" - # "Answer: blue" - # ) + with dspy.settings.context(backend=backend, lm=None, trace=[], cache=False): + + class SimpleModule(dspy.Module): + def __init__(self): + super().__init__() + self.predictor = dspy.Predict("question -> answer") + + def forward(self, **kwargs): + result = self.predictor(**kwargs) + print(f"SimpleModule got {result.answer=}") + dspy.Suggest(result.answer == "blue", "Please think harder") + return result + + program = SimpleModule() + program = assert_transform_module( + program.map_named_predictors(dspy.Retry), + functools.partial(backtrack_handler, max_backtracks=1), + ) + + result = program(question="What color is the sky?") + + assert result.answer == "blue" From b2f76241e89d4d14068c4a14aad0842bf07bd0d0 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 11:50:00 -0400 Subject: [PATCH 235/243] chore: clean up tests/backends --- tests/backends/test_json_backend.py | 12 ++--- tests/backends/test_template_backend.py | 62 ++++++++++++------------- 2 files changed, 36 insertions(+), 38 deletions(-) diff --git a/tests/backends/test_json_backend.py b/tests/backends/test_json_backend.py index 5ba7cc968c..db48ba0f99 100644 --- a/tests/backends/test_json_backend.py +++ b/tests/backends/test_json_backend.py @@ -25,10 +25,10 @@ def test_backend_complete_generation(): ] ) backend = JSONBackend(lm=dummy_lm) - dspy.configure(backend=backend) + with dspy.settings.context(backend=backend, lm=None, cache=False): - # Generate Sample Signature - n = 5 - x = backend(Emotion, sentence="This is a positive sentence", n=n) - assert len(x) == n - assert x.sentiment == "Joy" + # Generate Sample Signature + n = 5 + x = backend(Emotion, sentence="This is a positive sentence", n=n) + assert len(x) == n + assert x.sentiment == "Joy" diff --git a/tests/backends/test_template_backend.py b/tests/backends/test_template_backend.py index d755ccd3d6..edfee4a087 100644 --- a/tests/backends/test_template_backend.py +++ b/tests/backends/test_template_backend.py @@ -31,14 +31,13 @@ def test_backend_complete_generation(): # Initialize Backend dummy_lm = DummyLanguageModel(answers=[["Joy", "Joy", "Joy", "Joy", "Joy"]]) backend = TemplateBackend(lm=dummy_lm) - dspy.settings.configure(backend=backend) - - # Generate Sample Signature - n = 5 - x = backend(Emotion, sentence="This is a positive sentence", n=n) - assert len(x) == n - assert x.examples[0].sentence == "This is a positive sentence" - assert x.examples[0].sentiment == "Joy" + with dspy.settings.context(backend=backend, lm=None): + # Generate Sample Signature + n = 5 + x = backend(Emotion, sentence="This is a positive sentence", n=n) + assert len(x) == n + assert x.examples[0].sentence == "This is a positive sentence" + assert x.examples[0].sentiment == "Joy" def test_backend_with_recover(): @@ -52,17 +51,16 @@ def test_backend_with_recover(): ], ) backend = TemplateBackend(lm=dummy_lm) - dspy.settings.configure(backend=backend) - - # Generate Incomplete on the first try - # Nothing should be returned from the generation as no results were complete - n = 1 - with pytest.raises(Exception): - backend( - COTCheckCitationFaithfulness, - context=["The 21-year-old made seven appearances for the Hammers."], - text="Lee scored 3 goals for Colchester United.", - ) + with dspy.settings.context(backend=backend, lm=None): + # Generate Incomplete on the first try + # Nothing should be returned from the generation as no results were complete + n = 1 + with pytest.raises(Exception): + backend( + COTCheckCitationFaithfulness, + context=["The 21-year-old made seven appearances for the Hammers."], + text="Lee scored 3 goals for Colchester United.", + ) # Initialize Backend dummy_lm = DummyLanguageModel( @@ -74,19 +72,19 @@ def test_backend_with_recover(): ] ) backend = TemplateBackend(lm=dummy_lm) - dspy.settings.configure(backend=backend) + with dspy.settings.context(backend=backend, lm=None, cache=False): - # Generate Complete after recovery - n = 1 - x = backend( - COTCheckCitationFaithfulness, - context=["The 21-year-old made seven appearances for the Hammers."], - text="Lee scored 3 goals for Colchester United.", - attempts=2, - n=n, - ) + # Generate Complete after recovery + n = 1 + x = backend( + COTCheckCitationFaithfulness, + context=["The 21-year-old made seven appearances for the Hammers."], + text="Lee scored 3 goals for Colchester United.", + attempts=2, + n=n, + ) - assert x.examples[0].rationale is not None + assert x.examples[0].rationale is not None - assert x.rationale - assert x.faithfulness + assert x.rationale + assert x.faithfulness From 29179d245a712668e20c424f886c52cfc182b215 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 11:54:08 -0400 Subject: [PATCH 236/243] chore: clean up tests/evaluate --- tests/evaluate/test_evaluate.py | 67 +++++++++++++++++++++++---------- tests/evaluate/test_metrics.py | 4 +- 2 files changed, 49 insertions(+), 22 deletions(-) diff --git a/tests/evaluate/test_evaluate.py b/tests/evaluate/test_evaluate.py index aebaf61a72..ae532b5071 100644 --- a/tests/evaluate/test_evaluate.py +++ b/tests/evaluate/test_evaluate.py @@ -28,33 +28,60 @@ def test_evaluate_initialization(): def test_evaluate_call(): + lm=DummyLM({"What is 1+1?": "2", "What is 2+2?": "4"}) + with dspy.settings.context(lm=lm, backend=None, cache=False): + devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] + program = Predict("question -> answer") + assert program(question="What is 1+1?").answer == "2" + ev = Evaluate( + devset=devset, + metric=answer_exact_match, + display_progress=False, + ) + score = ev(program) + assert score == 100.0 + +def test_evaluate_call_with_backend(): lm = DummyLanguageModel(answers=[[" 2"], [" 4"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] - program = Predict("question -> answer") - ev = Evaluate( - devset=devset, - metric=answer_exact_match, - display_progress=False, - ) - score = ev(program) - assert score == 100.0 + with dspy.settings.context(backend=backend, lm=None, cache=False): + devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] + program = Predict("question -> answer") + ev = Evaluate( + devset=devset, + metric=answer_exact_match, + display_progress=False, + ) + score = ev(program) + assert score == 100.0 def test_evaluate_call_bad(): + lm=DummyLM({"What is 1+1?": "0", "What is 2+2?": "0"}) + with dspy.settings.context(backend=None, lm=lm): + devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] + program = Predict("question -> answer") + ev = Evaluate( + devset=devset, + metric=answer_exact_match, + display_progress=False, + ) + score = ev(program) + assert score == 0.0 + +def test_evaluate_call_bad_with_backend(): lm = DummyLanguageModel(answers=[[" 0"], [" 0"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] - program = Predict("question -> answer") - ev = Evaluate( - devset=devset, - metric=answer_exact_match, - display_progress=False, - ) - score = ev(program) - assert score == 0.0 + with dspy.settings.context(backend=backend, lm=None, cache=False): + devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] + program = Predict("question -> answer") + ev = Evaluate( + devset=devset, + metric=answer_exact_match, + display_progress=False, + ) + score = ev(program) + assert score == 0.0 def test_evaluate_display_table(): diff --git a/tests/evaluate/test_metrics.py b/tests/evaluate/test_metrics.py index f04148251b..e91d26f160 100644 --- a/tests/evaluate/test_metrics.py +++ b/tests/evaluate/test_metrics.py @@ -1,6 +1,6 @@ # FILEPATH: /Users/ahle/repos/dspy/tests/evaluate/test_metrics.py -import dsp, dspy +import dspy from dspy.evaluate.metrics import answer_exact_match from dspy.predict import Predict @@ -29,4 +29,4 @@ def test_answer_exact_match_no_match(): ).with_inputs("question") pred = Predict("question -> answer") pred.answer = "3" - assert not answer_exact_match(example, pred) \ No newline at end of file + assert not answer_exact_match(example, pred) From 33b6b384b19fd5c0022dd01428c150120aa1cb25 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 12:09:59 -0400 Subject: [PATCH 237/243] chore: clean up tests/functional --- tests/functional/test_functional.py | 618 ++++++++++++++-------------- 1 file changed, 307 insertions(+), 311 deletions(-) diff --git a/tests/functional/test_functional.py b/tests/functional/test_functional.py index 6c2e0d7c6c..b426d2b278 100644 --- a/tests/functional/test_functional.py +++ b/tests/functional/test_functional.py @@ -20,10 +20,9 @@ from dspy.primitives.example import Example from dspy.teleprompt.bootstrap import BootstrapFewShot from dspy.teleprompt.vanilla import LabeledFewShot -from dspy.utils import DummyLanguageModel, DummyLM, clean_up_lm_test +from dspy.utils import DummyLanguageModel, DummyLM -@clean_up_lm_test def test_simple(): @predictor def hard_question(topic: str) -> str: @@ -31,15 +30,14 @@ def hard_question(topic: str) -> str: expected = "What is the speed of light?" lm = DummyLM([expected]) - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=lm, backend=None): - question = hard_question(topic="Physics") - lm.inspect_history(n=2) + question = hard_question(topic="Physics") + lm.inspect_history(n=2) - assert question == expected + assert question == expected -@clean_up_lm_test def test_simple_with_backend(): @predictor def hard_question(topic: str) -> str: @@ -47,16 +45,15 @@ def hard_question(topic: str) -> str: lm = DummyLanguageModel(answers=[["What is the speed of light?"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) + with dspy.settings.context(backend=backend, lm=None, cache=False): - expected = "What is the speed of light?" + expected = "What is the speed of light?" - question = hard_question(topic="Physics") + question = hard_question(topic="Physics") - assert question == expected + assert question == expected -@clean_up_lm_test def test_list_output(): @predictor def hard_questions(topics: List[str]) -> List[str]: @@ -66,15 +63,14 @@ def hard_questions(topics: List[str]) -> List[str]: lm = DummyLM( ['{"value": ["What is the speed of light?", "What is the speed of sound?"]}'] ) - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=lm, backend=None): - question = hard_questions(topics=["Physics", "Music"]) - lm.inspect_history(n=2) + question = hard_questions(topics=["Physics", "Music"]) + lm.inspect_history(n=2) - assert question == expected + assert question == expected -@clean_up_lm_test def test_simple_type(): class Question(pydantic.BaseModel): value: str @@ -85,15 +81,14 @@ def hard_question(topic: str) -> Question: expected = "What is the speed of light?" lm = DummyLM([f'{{"value": "{expected}"}}']) - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=lm, backend=None): - question = hard_question(topic="Physics") + question = hard_question(topic="Physics") - assert isinstance(question, Question) - assert question.value == expected + assert isinstance(question, Question) + assert question.value == expected -@clean_up_lm_test def test_simple_type_with_backend(): class Question(pydantic.BaseModel): value: str @@ -105,15 +100,14 @@ def hard_question(topic: str) -> Question: expected = "What is the speed of light?" lm = DummyLanguageModel(answers=[[f'{{"value": "{expected}"}}']]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + with dspy.settings.context(backend=backend, lm=None): - question = hard_question(topic="Physics") + question = hard_question(topic="Physics") - assert isinstance(question, Question) - assert question.value == expected + assert isinstance(question, Question) + assert question.value == expected -@clean_up_lm_test def test_simple_type_input(): class Question(pydantic.BaseModel): value: str @@ -127,14 +121,13 @@ def answer(question: Question) -> Answer: question = Question(value="What is the speed of light?") lm = DummyLM([f'{{"value": "3e8"}}']) - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=lm, backend=None): - result = answer(question=question) + result = answer(question=question) - assert result == Answer(value="3e8") + assert result == Answer(value="3e8") -@clean_up_lm_test def test_simple_type_input_with_backend(): class Question(pydantic.BaseModel): value: str @@ -150,14 +143,13 @@ def answer(question: Question) -> Answer: lm = DummyLanguageModel(answers=[[f'{{"value": "3e8"}}']]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + with dspy.settings.context(lm=None, backend=backend): - result = answer(question=question) + result = answer(question=question) - assert result == Answer(value="3e8") + assert result == Answer(value="3e8") -@clean_up_lm_test def test_simple_class(): class Answer(pydantic.BaseModel): value: float @@ -195,21 +187,21 @@ def forward(self, **kwargs): expected.model_dump_json(), # Good answer ] ) - dspy.settings.configure(lm=lm) - qa = QA() - assert isinstance(qa, FunctionalModule) - assert isinstance(qa.answer, dspy.Module) + with dspy.settings.context(lm=lm, backend=None): + + qa = QA() + assert isinstance(qa, FunctionalModule) + assert isinstance(qa.answer, dspy.Module) - question, answer = qa(topic="Physics") + question, answer = qa(topic="Physics") - print(qa.answer) + print(qa.answer) - assert question == "What is the speed of light?" - assert answer == expected + assert question == "What is the speed of light?" + assert answer == expected -@clean_up_lm_test def test_simple_class_with_backend(): class Answer(pydantic.BaseModel): value: float @@ -248,19 +240,18 @@ def forward(self, **kwargs): ] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False, lm=None) + with dspy.settings.context(backend=backend, lm=None, cache=False): - qa = QA() - assert isinstance(qa, FunctionalModule) - assert isinstance(qa.answer, dspy.Module) + qa = QA() + assert isinstance(qa, FunctionalModule) + assert isinstance(qa.answer, dspy.Module) - question, answer = qa(topic="Physics") + question, answer = qa(topic="Physics") - assert question == "What is the speed of light?" - assert answer == expected + assert question == "What is the speed of light?" + assert answer == expected -@clean_up_lm_test def test_simple_oop_with_backend(): class Question(pydantic.BaseModel): value: str @@ -274,12 +265,12 @@ class MySignature(dspy.Signature): expected = "What is the speed of light?" lm = DummyLanguageModel(answers=[[Question(value=expected).model_dump_json()]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) + with dspy.settings.context(backend=backend, lm=None, cache=False): - question = program(topic="Physics").output + question = program(topic="Physics").output - assert isinstance(question, Question) - assert question.value == expected + assert isinstance(question, Question) + assert question.value == expected def test_equivalent_signatures(): @@ -319,7 +310,6 @@ def answer(self, question: str) -> str: } -@clean_up_lm_test def test_bootstrap_effectiveness_with_backend(): class SimpleModule(FunctionalModule): @predictor @@ -354,53 +344,50 @@ def simple_metric(example, prediction, trace=None): answers=[["blue"], ["blue"], ["Ring-ding-ding-ding-dingeringeding!"]] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) + with dspy.settings.context(backend=backend, cache=False, lm=None): - bootstrap = BootstrapFewShot( - metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 - ) - compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) - - # lm.inspect_history(n=2) + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) - # Check that the compiled student has the correct demos - _, predict = next(compiled_student.named_sub_modules(Predict, skip_compiled=False)) - demos = predict.demos - assert len(demos) == 1 - assert demos[0].input == trainset[0].input - assert demos[0].output == trainset[0].output + # Check that the compiled student has the correct demos + _, predict = next(compiled_student.named_sub_modules(Predict, skip_compiled=False)) + demos = predict.demos + assert len(demos) == 1 + assert demos[0].input == trainset[0].input + assert demos[0].output == trainset[0].output - # Test the compiled student's prediction. - prediction = compiled_student(input=trainset[0].input) - assert prediction == trainset[0].output + # Test the compiled student's prediction. + prediction = compiled_student(input=trainset[0].input) + assert prediction == trainset[0].output - assert backend.history[-1].prompt == textwrap.dedent( - """\ - Given the fields `input`, produce the fields `output`. + assert backend.history[-1].prompt == textwrap.dedent( + """\ + Given the fields `input`, produce the fields `output`. - --- + --- - Follow the following format. + Follow the following format. - Input: ${input} - - Output: ${output} + Input: ${input} + + Output: ${output} - --- + --- - Input: What is the color of the sky? + Input: What is the color of the sky? - Output: blue + Output: blue - --- + --- - Input: What is the color of the sky? + Input: What is the color of the sky? - Output:""" - ) + Output:""" + ) -@clean_up_lm_test def test_regex(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -428,14 +415,13 @@ def flight_information(email: str) -> TravelInformation: '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', ] ) - dspy.settings.configure(lm=lm) - - assert flight_information(email=email) == TravelInformation( - origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) - ) + + with dspy.settings.context(lm=lm, backend=None): + assert flight_information(email=email) == TravelInformation( + origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) + ) -@clean_up_lm_test def test_regex_with_backend(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -462,15 +448,14 @@ def flight_information(email: str) -> TravelInformation: ] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) + with dspy.settings.context(backend=backend, lm=None, cache=False): - predict = flight_information(email=email) - assert predict == TravelInformation( - origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) - ) + predict = flight_information(email=email) + assert predict == TravelInformation( + origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) + ) -@clean_up_lm_test def test_raises(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -488,13 +473,12 @@ def flight_information(email: str) -> TravelInformation: '{"origin": "JFK", "destination": "LAX", "date": "bad date"}', ] ) - dspy.settings.configure(lm=lm) - with pytest.raises(ValueError): - flight_information(email="Some email") + with dspy.settings.context(lm=lm, backend=None): + with pytest.raises(ValueError): + flight_information(email="Some email") -@clean_up_lm_test def test_raises_with_backend(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -515,13 +499,12 @@ def flight_information(email: str) -> TravelInformation: ] ) backend = TemplateBackend(lm=lm, attempts=1) - dspy.settings.configure(backend=backend) - with pytest.raises(ValueError): - flight_information(email="Some email") + with dspy.settings.context(backend=backend, lm=None): + with pytest.raises(ValueError): + flight_information(email="Some email") -@clean_up_lm_test def test_multi_errors(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -542,40 +525,40 @@ def flight_information(email: str) -> TravelInformation: '{"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}', ] ) - dspy.settings.configure(lm=lm) - assert flight_information(email="Some email") == TravelInformation( - origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) - ) - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields `email`, produce the fields `flight_information`. + with dspy.settings.context(lm=lm, backend=None): + + assert flight_information(email="Some email") == TravelInformation( + origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) + ) + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `email`, produce the fields `flight_information`. - --- + --- - Follow the following format. + Follow the following format. - Email: ${email} + Email: ${email} - Past Error in Flight Information: An error to avoid in the future + Past Error in Flight Information: An error to avoid in the future - Past Error (2) in Flight Information: An error to avoid in the future + Past Error (2) in Flight Information: An error to avoid in the future - Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} + Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} - --- + --- - Email: Some email + Email: Some email - Past Error in Flight Information: String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) + Past Error in Flight Information: String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) - Past Error (2) in Flight Information: String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) + Past Error (2) in Flight Information: String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) - Flight Information: {"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}""" - ) + Flight Information: {"origin": "JFK", "destination": "LAX", "date": "2022-12-25"}""" + ) -@clean_up_lm_test def test_multi_errors_with_backend(): class TravelInformation(BaseModel): origin: str = Field(pattern=r"^[A-Z]{3}$") @@ -597,38 +580,39 @@ def flight_information(email: str) -> TravelInformation: ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) - assert flight_information(email="Some email") == TravelInformation( - origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) - ) + with dspy.settings.context(backend=backend, lm=None, cache=False): - assert backend.history[-1].prompt == textwrap.dedent( - """\ - Given the fields `email`, produce the fields `flight_information`. + assert flight_information(email="Some email") == TravelInformation( + origin="JFK", destination="LAX", date=datetime.date(2022, 12, 25) + ) - --- + assert backend.history[-1].prompt == textwrap.dedent( + """\ + Given the fields `email`, produce the fields `flight_information`. - Follow the following format. + --- - Email: ${email} + Follow the following format. - Past Error in Flight Information: An error to avoid in the future + Email: ${email} - Past Error (2) in Flight Information: An error to avoid in the future + Past Error in Flight Information: An error to avoid in the future - Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} + Past Error (2) in Flight Information: An error to avoid in the future - --- + Flight Information: ${flight_information}. Respond with a single JSON object. JSON Schema: {"properties": {"origin": {"pattern": "^[A-Z]{3}$", "title": "Origin", "type": "string"}, "destination": {"pattern": "^[A-Z]{3}$", "title": "Destination", "type": "string"}, "date": {"format": "date", "title": "Date", "type": "string"}}, "required": ["origin", "destination", "date"], "title": "TravelInformation", "type": "object"} - Email: Some email + --- - Past Error in Flight Information: String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) + Email: Some email - Past Error (2) in Flight Information: String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) + Past Error in Flight Information: String should match pattern '^[A-Z]{3}$': origin (error type: string_pattern_mismatch) - Flight Information:""" - ) + Past Error (2) in Flight Information: String should match pattern '^[A-Z]{3}$': destination (error type: string_pattern_mismatch) + + Flight Information:""" + ) def test_field_validator(): @@ -655,33 +639,33 @@ def get_user_details() -> UserDetails: ] * 10 ) - dspy.settings.configure(lm=lm) - with pytest.raises(ValueError): - get_user_details() + with dspy.settings.context(lm=lm, backend=None): - print(lm.get_convo(-1)) - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields , produce the fields `get_user_details`. + with pytest.raises(ValueError): + get_user_details() - --- + print(lm.get_convo(-1)) + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields , produce the fields `get_user_details`. - Follow the following format. + --- - Past Error in Get User Details: An error to avoid in the future - Past Error (2) in Get User Details: An error to avoid in the future - Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} + Follow the following format. - --- + Past Error in Get User Details: An error to avoid in the future + Past Error (2) in Get User Details: An error to avoid in the future + Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} - Past Error in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) - Past Error (2) in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) - Get User Details: {"name": "lower case name", "age": 25}""" - ) + --- + + Past Error in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + Past Error (2) in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + Get User Details: {"name": "lower case name", "age": 25}""" + ) -@clean_up_lm_test def test_field_validator_with_backend(): class UserDetails(BaseModel): name: str @@ -702,36 +686,36 @@ def get_user_details() -> UserDetails: # out of retries. lm = DummyLanguageModel(answers=[['{"name": "lower case name", "age": 25}'] * 10]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=True) - with pytest.raises(ValueError): - get_user_details() + with dspy.settings.context(backend=backend, cache=True, lm=None): - assert backend.history[-1].prompt == textwrap.dedent( - """\ - Given the fields , produce the fields `get_user_details`. + with pytest.raises(ValueError): + get_user_details() - --- + assert backend.history[-1].prompt == textwrap.dedent( + """\ + Given the fields , produce the fields `get_user_details`. - Follow the following format. + --- - Past Error in Get User Details: An error to avoid in the future + Follow the following format. - Past Error (2) in Get User Details: An error to avoid in the future + Past Error in Get User Details: An error to avoid in the future - Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} + Past Error (2) in Get User Details: An error to avoid in the future - --- + Get User Details: ${get_user_details}. Respond with a single JSON object. JSON Schema: {"properties": {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": "UserDetails", "type": "object"} - Past Error in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + --- - Past Error (2) in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + Past Error in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) - Get User Details:""" - ) + Past Error (2) in Get User Details: Value error, Name must be in uppercase.: name (error type: value_error) + + Get User Details:""" + ) -@clean_up_lm_test def test_annotated_field(): @predictor def test( @@ -741,39 +725,40 @@ def test( # First try 0, which fails, then try 0.5, which passes lm = DummyLM(["0", "0.5"]) - dspy.settings.configure(lm=lm) - output = test(input="input") + with dspy.settings.context(lm=lm, backend=None): + + output = test(input="input") - assert output == 0.5 + assert output == 0.5 -@clean_up_lm_test def test_multiple_outputs(): lm = DummyLM([str(i) for i in range(100)]) - dspy.settings.configure(lm=lm) - test = TypedPredictor("input -> output") - result = test(input="input", config=dict(n=3)) + with dspy.settings.context(lm=lm, backend=None): - assert [completion.output for completion in result.completions] == ["0", "1", "2"] + test = TypedPredictor("input -> output") + result = test(input="input", config=dict(n=3)) + + assert [completion.output for completion in result.completions] == ["0", "1", "2"] def test_multiple_outputs_int(): lm = DummyLM([str(i) for i in range(100)]) - dspy.settings.configure(lm=lm) - class TestSignature(dspy.Signature): - input: int = dspy.InputField() - output: int = dspy.OutputField() + with dspy.settings.context(lm=lm, backend=None): + + class TestSignature(dspy.Signature): + input: int = dspy.InputField() + output: int = dspy.OutputField() - test = TypedPredictor(TestSignature) + test = TypedPredictor(TestSignature) - result = test(input=8, config=dict(n=3)) - assert [completion.output for completion in result.completions] == [0, 1, 2] + result = test(input=8, config=dict(n=3)) + assert [completion.output for completion in result.completions] == [0, 1, 2] -@clean_up_lm_test def test_multiple_outputs_int_cot(): # Note: Multiple outputs only work when the language model "speculatively" generates all the outputs in one go. lm = DummyLM( @@ -783,44 +768,48 @@ def test_multiple_outputs_int_cot(): "thoughts 2\nOutput: 2\n", ] ) - dspy.settings.configure(lm=lm) + + with dspy.settings.context(lm=lm, backend=None): - test = TypedChainOfThought("input:str -> output:int") + test = TypedChainOfThought("input:str -> output:int") - results = test(input="8", config=dict(n=3)) - assert [completion.output for completion in results.completions] + results = test(input="8", config=dict(n=3)) + assert [completion.output for completion in results.completions] def test_parse_type_string(): lm = DummyLM([str(i) for i in range(100)]) - dspy.settings.configure(lm=lm) - test = TypedPredictor("input:int -> output:int") + with dspy.settings.context(lm=lm, backend=None): + + test = TypedPredictor("input:int -> output:int") - results = test(input=8, config=dict(n=3)) - assert [completion.output for completion in results.completions] == [0, 1, 2] + results = test(input=8, config=dict(n=3)) + assert [completion.output for completion in results.completions] == [0, 1, 2] def test_literal(): lm = DummyLM([f'{{"value": "{i}"}}' for i in range(100)]) - dspy.settings.configure(lm=lm) - @predictor - def f() -> Literal["2", "3"]: - pass + with dspy.settings.context(lm=lm, backend=None): - assert f() == "2" + @predictor + def f() -> Literal["2", "3"]: + pass + + assert f() == "2" def test_literal_int(): lm = DummyLM([f'{{"value": {i}}}' for i in range(100)]) - dspy.settings.configure(lm=lm) - @predictor - def f() -> Literal[2, 3]: - pass + with dspy.settings.context(lm=lm, backend=None): - assert f() == 2 + @predictor + def f() -> Literal[2, 3]: + pass + + assert f() == 2 def test_fields_on_base_signature(): @@ -833,11 +822,12 @@ class SimpleOutput(dspy.Signature): "0.5", # Good output ] ) - dspy.settings.configure(lm=lm) - predictor = TypedPredictor(SimpleOutput) + with dspy.settings.context(lm=lm, backend=None): + + predictor = TypedPredictor(SimpleOutput) - assert predictor().output == 0.5 + assert predictor().output == 0.5 def test_synthetic_data_gen(): @@ -860,26 +850,27 @@ class ExampleSignature(dspy.Signature): '{"fact": "The earth is a cube", "varacity": false}', ] ) - dspy.settings.configure(lm=lm) - - generator = TypedPredictor(ExampleSignature) - examples = generator(config=dict(n=3)) - for completion in examples.completions: - assert isinstance(completion.fact, SyntheticFact), type(completion.fact) - assert examples.completions[0].fact == SyntheticFact( - fact="The sky is blue", varacity=True - ) - # If you have examples and want more - existing_examples = [ - dspy.Example(fact="The sky is blue", varacity=True), - dspy.Example(fact="The sky is green", varacity=False), - ] - trained = LabeledFewShot().compile(student=generator, trainset=existing_examples) + with dspy.settings.context(lm=lm, backend=None): - augmented_examples = trained(config=dict(n=3)) - for completion in augmented_examples.completions: - assert isinstance(completion.fact, SyntheticFact) + generator = TypedPredictor(ExampleSignature) + examples = generator(config=dict(n=3)) + for completion in examples.completions: + assert isinstance(completion.fact, SyntheticFact), type(completion.fact) + assert examples.completions[0].fact == SyntheticFact( + fact="The sky is blue", varacity=True + ) + + # If you have examples and want more + existing_examples = [ + dspy.Example(fact="The sky is blue", varacity=True), + dspy.Example(fact="The sky is green", varacity=False), + ] + trained = LabeledFewShot().compile(student=generator, trainset=existing_examples) + + augmented_examples = trained(config=dict(n=3)) + for completion in augmented_examples.completions: + assert isinstance(completion.fact, SyntheticFact) def test_list_input2(): @@ -896,38 +887,39 @@ class ScoredSignature(dspy.Signature): program = TypedChainOfThought(ScoredSignature) lm = DummyLM(["Thoughts", "Output"]) - dspy.settings.configure(lm=lm) - output = program( - attempted_signatures=[ - ScoredString(string="string 1", score=0.5), - ScoredString(string="string 2", score=0.4), - ScoredString(string="string 3", score=0.3), - ] - ).proposed_signature + with dspy.settings.context(lm=lm, backend=None): - print(lm.get_convo(-1)) + output = program( + attempted_signatures=[ + ScoredString(string="string 1", score=0.5), + ScoredString(string="string 2", score=0.4), + ScoredString(string="string 3", score=0.3), + ] + ).proposed_signature - assert output == "Output" + print(lm.get_convo(-1)) - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields `attempted_signatures`, produce the fields `proposed_signature`. + assert output == "Output" - --- + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `attempted_signatures`, produce the fields `proposed_signature`. - Follow the following format. + --- - Attempted Signatures: ${attempted_signatures} - Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ... - Proposed Signature: ${proposed_signature} + Follow the following format. - --- + Attempted Signatures: ${attempted_signatures} + Reasoning: Let's think step by step in order to ${produce the proposed_signature}. We ... + Proposed Signature: ${proposed_signature} - Attempted Signatures: [{"string":"string 1","score":0.5},{"string":"string 2","score":0.4},{"string":"string 3","score":0.3}] - Reasoning: Let's think step by step in order to Thoughts - Proposed Signature: Output""" - ) + --- + + Attempted Signatures: [{"string":"string 1","score":0.5},{"string":"string 2","score":0.4},{"string":"string 3","score":0.3}] + Reasoning: Let's think step by step in order to Thoughts + Proposed Signature: Output""" + ) def test_generic_signature(): @@ -942,9 +934,10 @@ class GenericSignature(dspy.Signature, Generic[T]): assert predictor.signature.instructions == "My signature" lm = DummyLM(["23"]) - dspy.settings.configure(lm=lm) - assert predictor().output == 23 + with dspy.settings.context(lm=lm, backend=None): + + assert predictor().output == 23 def test_field_validator_in_signature(): @@ -964,7 +957,6 @@ def space_in_a(cls, a: str) -> str: _ = ValidatedSignature(a="with space") -@clean_up_lm_test def test_lm_as_validator(): @predictor def is_square(n: int) -> bool: @@ -979,14 +971,14 @@ def next_square(n: int) -> Annotated[int, AfterValidator(check_square)]: """What is the next square number after n?""" lm = DummyLM(["3", "False", "4", "True"]) - dspy.settings.configure(lm=lm) + + with dspy.settings.context(lm=lm, backend=None): - m = next_square(n=2) - lm.inspect_history(n=2) - assert m == 4 + m = next_square(n=2) + lm.inspect_history(n=2) + assert m == 4 -@clean_up_lm_test def test_annotated_validator(): def is_square(n: int) -> int: root = n**0.5 @@ -1001,12 +993,13 @@ class MySignature(dspy.Signature): next_square: Annotated[int, AfterValidator(is_square)] = dspy.OutputField() lm = DummyLM(["3", "4"]) - dspy.settings.configure(lm=lm) + + with dspy.settings.context(lm=lm, backend=None): - m = TypedPredictor(MySignature)(n=2).next_square - lm.inspect_history(n=2) + m = TypedPredictor(MySignature)(n=2).next_square + lm.inspect_history(n=2) - assert m == 4 + assert m == 4 def test_annotated_validator_functional(): @@ -1020,12 +1013,13 @@ def next_square(n: int) -> Annotated[int, AfterValidator(is_square)]: """What is the next square number after n?""" lm = DummyLM(["3", "4"]) - dspy.settings.configure(lm=lm) - m = next_square(n=2) - lm.inspect_history(n=2) + with dspy.settings.context(lm=lm, backend=None): + + m = next_square(n=2) + lm.inspect_history(n=2) - assert m == 4 + assert m == 4 def test_demos(): @@ -1038,31 +1032,32 @@ def test_demos(): ) lm = DummyLM(["Paris"]) - dspy.settings.configure(lm=lm) - assert program(input="What is the capital of France?").output == "Paris" + with dspy.settings.context(lm=lm, backend=None): - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields `input`, produce the fields `output`. + assert program(input="What is the capital of France?").output == "Paris" - --- + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `input`, produce the fields `output`. - Follow the following format. + --- - Input: ${input} - Output: ${output} + Follow the following format. - --- + Input: ${input} + Output: ${output} - Input: What is the speed of light? - Output: 3e8 + --- - --- + Input: What is the speed of light? + Output: 3e8 - Input: What is the capital of France? - Output: Paris""" - ) + --- + + Input: What is the capital of France? + Output: Paris""" + ) def _test_demos_missing_input(): @@ -1071,29 +1066,30 @@ def _test_demos_missing_input(): student=dspy.TypedPredictor("input -> output, thoughts"), trainset=[ex.with_inputs("input") for ex in demos], ) - dspy.settings.configure(lm=DummyLM(["My thoughts", "Paris"])) - assert program(input="What is the capital of France?").output == "Paris" - assert dspy.settings.lm.get_convo(-1) == textwrap.dedent( - """\ - Given the fields `input`, produce the fields `output`. + with dspy.settings.context(lm=DummyLM(["My thoughts", "Paris"]), backend=None): + assert program(input="What is the capital of France?").output == "Paris" - --- + assert dspy.settings.lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `input`, produce the fields `output`. - Follow the following format. + --- - Input: ${input} - Thoughts: ${thoughts} - Output: ${output} + Follow the following format. - --- + Input: ${input} + Thoughts: ${thoughts} + Output: ${output} - Input: What is the speed of light? - Output: 3e8 + --- - --- + Input: What is the speed of light? + Output: 3e8 - Input: What is the capital of France? - Thoughts: My thoughts - Output: Paris""" - ) + --- + + Input: What is the capital of France? + Thoughts: My thoughts + Output: Paris""" + ) From 7100ae1f681da46ccdb4ae2de322955d89aff529 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 12:12:10 -0400 Subject: [PATCH 238/243] chore: clean up tests/primitives --- tests/primitives/test_program.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/tests/primitives/test_program.py b/tests/primitives/test_program.py index 92c26961ac..d7eb4e2ff3 100644 --- a/tests/primitives/test_program.py +++ b/tests/primitives/test_program.py @@ -3,10 +3,8 @@ Module, set_attribute_by_name, ) # Adjust the import based on your file structure -from dspy.utils import DummyLanguageModel +from dspy.utils import DummyLanguageModel, DummyLM from dspy.backends import TemplateBackend -from dspy.utils import DummyLM -from dspy.utils.testing import clean_up_lm_test class HopModule(dspy.Module): @@ -46,27 +44,22 @@ def test_predictors(): ), "All returned items should be instances of PredictMock" -@clean_up_lm_test def test_forward(): - dspy.settings.configure(experimental=False) program = HopModule() - dspy.settings.configure( - lm=DummyLM({"What is 1+1?": "let me check", "let me check": "2"}) - ) - result = program(question="What is 1+1?").answer - assert result == "2" + lm=DummyLM({"What is 1+1?": "let me check", "let me check": "2"}) + with dspy.settings.context(lm=lm, backend=None): + result = program(question="What is 1+1?").answer + assert result == "2" -def test_forward_experimental(): - dspy.settings.configure(experimental=True) - +def test_forward_with_backend(): program = HopModule() lm = DummyLanguageModel(answers=[["let me check"], ["2"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False, lm=None) - result = program(question="What is 1+1?").answer - assert result == "2" + with dspy.settings.context(backend=backend, cache=False, lm=None): + result = program(question="What is 1+1?").answer + assert result == "2" def test_nested_named_predictors(): From ee1f0540d9c3ab5985dea30ff1355b14f568baff Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 12:13:09 -0400 Subject: [PATCH 239/243] chore: clean up tests/signatures --- tests/signatures/test_signature.py | 39 +++++++++++++++--------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/tests/signatures/test_signature.py b/tests/signatures/test_signature.py index 4984e6d8d9..af07e0e9dd 100644 --- a/tests/signatures/test_signature.py +++ b/tests/signatures/test_signature.py @@ -5,7 +5,7 @@ from typing import List import dspy -from dspy.utils import DummyLM, clean_up_lm_test +from dspy.utils import DummyLM def test_field_types_and_custom_attributes(): @@ -191,7 +191,6 @@ class SubSignature(Signature): assert isinstance(value, SubSignature) -@clean_up_lm_test def test_multiline_instructions(): class MySignature(Signature): """First line @@ -202,21 +201,21 @@ class MySignature(Signature): predictor = dspy.Predict(MySignature) lm = DummyLM(["short answer"]) - dspy.settings.configure(lm=lm) - assert predictor().output == "short answer" - - assert lm.get_convo(-1) == textwrap.dedent( - """\ - First line - Second line - - --- - - Follow the following format. - - Output: ${output} - - --- - - Output: short answer""" - ) + with dspy.settings.context(lm=lm, backend=None): + assert predictor().output == "short answer" + + assert lm.get_convo(-1) == textwrap.dedent( + """\ + First line + Second line + + --- + + Follow the following format. + + Output: ${output} + + --- + + Output: short answer""" + ) From 6d5a5ee14e06897a0352f80eaced7b9a281a1610 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 12:24:04 -0400 Subject: [PATCH 240/243] chore: clean up tests/teleprompt --- tests/teleprompt/test_bootstrap.py | 275 +++++++++++++++++------ tests/teleprompt/test_copro_optimizer.py | 247 ++++++++++---------- tests/teleprompt/test_knn_fewshot.py | 36 +-- tests/teleprompt/test_mipro_optimizer.py | 188 ++++++++-------- 4 files changed, 432 insertions(+), 314 deletions(-) diff --git a/tests/teleprompt/test_bootstrap.py b/tests/teleprompt/test_bootstrap.py index 65cc4783b7..6540ef7a63 100644 --- a/tests/teleprompt/test_bootstrap.py +++ b/tests/teleprompt/test_bootstrap.py @@ -1,7 +1,7 @@ import pytest import dspy from dspy.predict import Predict -from dspy.utils.dummies import DummyLanguageModel +from dspy.utils.dummies import DummyLanguageModel, DummyLM from dspy import Example from dspy.teleprompt import BootstrapFewShot from dspy.backends import TemplateBackend @@ -40,93 +40,196 @@ def __init__(self, signature): def forward(self, **kwargs): return self.predictor(**kwargs) - def test_compile_with_predict_instances(): # Create Predict instances for student and teacher # Note that dspy.Predict is not itself a module, so we can't use it directly here student = SimpleModule("input -> output") teacher = SimpleModule("input -> output") + lm = DummyLM(["Initial thoughts", "Finish[blue]"]) + with dspy.settings.context(lm=lm, backend=None): + + # Initialize BootstrapFewShot and compile the student + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile( + student, teacher=teacher, trainset=trainset, valset=valset + ) + + assert compiled_student is not None, "Failed to compile student" + assert ( + hasattr(compiled_student, "_compiled") and compiled_student._compiled + ), "Student compilation flag not set" + +def test_compile_with_predict_instances_with_backend(): + # Create Predict instances for student and teacher + # Note that dspy.Predict is not itself a module, so we can't use it directly here + student = SimpleModule("input -> output") + teacher = SimpleModule("input -> output") + lm = DummyLanguageModel(answers=[["Initial thoughts", "Finish[blue]"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False) + with dspy.settings.context(backend=backend, lm=None, cache=False): - # Initialize BootstrapFewShot and compile the student - bootstrap = BootstrapFewShot( - metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 - ) - compiled_student = bootstrap.compile( - student, teacher=teacher, trainset=trainset, valset=valset - ) - - assert compiled_student is not None, "Failed to compile student" - assert ( - hasattr(compiled_student, "_compiled") and compiled_student._compiled - ), "Student compilation flag not set" + # Initialize BootstrapFewShot and compile the student + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile( + student, teacher=teacher, trainset=trainset, valset=valset + ) + assert compiled_student is not None, "Failed to compile student" + assert ( + hasattr(compiled_student, "_compiled") and compiled_student._compiled + ), "Student compilation flag not set" def test_bootstrap_effectiveness(): # This test verifies if the bootstrapping process improves the student's predictions student = SimpleModule("input -> output") teacher = SimpleModule("input -> output") + lm = DummyLM(["blue", "Ring-ding-ding-ding-dingeringeding!"], follow_examples=True) + with dspy.settings.context(lm=lm, trace=[]): + + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile( + student, teacher=teacher, trainset=trainset, valset=valset + ) + + # Check that the compiled student has the correct demos + assert len(compiled_student.predictor.demos) == 1 + assert compiled_student.predictor.demos[0].input == trainset[0].input + assert compiled_student.predictor.demos[0].output == trainset[0].output + + # Test the compiled student's prediction. + # We are using a DummyLM with follow_examples=True, which means that + # even though it would normally reply with "Ring-ding-ding-ding-dingeringeding!" + # on the second output, if it seems an example that perfectly matches the + # prompt, it will use that instead. That is why we expect "blue" here. + prediction = compiled_student(input=trainset[0].input) + assert prediction.output == trainset[0].output + + # For debugging + print("Convo") + print(lm.get_convo(-1)) + + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Given the fields `input`, produce the fields `output`. + + --- + + Follow the following format. + + Input: ${input} + Output: ${output} + + --- + + Input: What is the color of the sky? + Output: blue + + --- + + Input: What is the color of the sky? + Output: blue""" + ) + +def test_bootstrap_effectiveness_with_backend(): + # This test verifies if the bootstrapping process improves the student's predictions + student = SimpleModule("input -> output") + teacher = SimpleModule("input -> output") lm = DummyLanguageModel( answers=[["blue"], ["blue"], ["Ring-dint-ding-ding-dingeringeding!"]] ) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend, cache=False, trace=[]) - # - # lm = DummyLM(["blue", "Ring-ding-ding-ding-dingeringeding!"], follow_examples=True) - # dspy.settings.configure(lm=lm, trace=[]) + with dspy.settings.context(backend=backend, cache=False, trace=[], lm=None): - bootstrap = BootstrapFewShot( - metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 - ) - compiled_student = bootstrap.compile( - student, teacher=teacher, trainset=trainset, valset=valset - ) + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile( + student, teacher=teacher, trainset=trainset, valset=valset + ) - # Check that the compiled student has the correct demos - assert len(compiled_student.predictor.demos) == 1 - assert compiled_student.predictor.demos[0].input == trainset[0].input - assert compiled_student.predictor.demos[0].output == trainset[0].output + # Check that the compiled student has the correct demos + assert len(compiled_student.predictor.demos) == 1 + assert compiled_student.predictor.demos[0].input == trainset[0].input + assert compiled_student.predictor.demos[0].output == trainset[0].output - # Test the compiled student's prediction. - # We are using a DummyLM with follow_examples=True, which means that - # even though it would normally reply with "Ring-ding-ding-ding-dingeringeding!" - # on the second output, if it seems an example that perfectly matches the - # prompt, it will use that instead. That is why we expect "blue" here. - prediction = compiled_student(input=trainset[0].input) - assert prediction.output == trainset[0].output + # Test the compiled student's prediction. + # We are using a DummyLM with follow_examples=True, which means that + # even though it would normally reply with "Ring-ding-ding-ding-dingeringeding!" + # on the second output, if it seems an example that perfectly matches the + # prompt, it will use that instead. That is why we expect "blue" here. + prediction = compiled_student(input=trainset[0].input) + assert prediction.output == trainset[0].output - # For debugging - assert backend.history[-1].prompt == textwrap.dedent( - """\ - Given the fields `input`, produce the fields `output`. + # For debugging + assert backend.history[-1].prompt == textwrap.dedent( + """\ + Given the fields `input`, produce the fields `output`. - --- + --- - Follow the following format. + Follow the following format. - Input: ${input} + Input: ${input} - Output: ${output} + Output: ${output} - --- + --- - Input: What is the color of the sky? + Input: What is the color of the sky? - Output: blue + Output: blue - --- + --- - Input: What is the color of the sky? + Input: What is the color of the sky? - Output:""" + Output:""" + ) + +def test_error_handling_during_bootstrap(): + """ + Test to verify error handling during the bootstrapping process + """ + + class BuggyModule(dspy.Module): + def __init__(self, signature): + super().__init__() + self.predictor = Predict(signature) + + def forward(self, **kwargs): + raise RuntimeError("Simulated error") + + student = SimpleModule("input -> output") + teacher = BuggyModule("input -> output") + + # Setup DummyLM to simulate an error scenario + lm = DummyLM( + [ + "Initial thoughts", # Simulate initial teacher's prediction + ] ) + with dspy.settings.context(lm=lm, backend=None): + bootstrap = BootstrapFewShot( + metric=simple_metric, + max_bootstrapped_demos=1, + max_labeled_demos=1, + max_errors=1, + ) -def test_error_handling_during_bootstrap(): + with pytest.raises(RuntimeError, match="Simulated error"): + bootstrap.compile(student, teacher=teacher, trainset=trainset, valset=valset) + +def test_error_handling_during_bootstrap_with_backend(): """ Test to verify error handling during the bootstrapping process """ @@ -145,20 +248,46 @@ def forward(self, **kwargs): # Setup DummyLM to simulate an error scenario lm = DummyLanguageModel(answers=[["Initial thoughts"]]) backend = TemplateBackend(lm=lm, attempts=1) - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=None, backend=backend, cache=False): - bootstrap = BootstrapFewShot( - metric=simple_metric, - max_bootstrapped_demos=1, - max_labeled_demos=1, - max_errors=1, + bootstrap = BootstrapFewShot( + metric=simple_metric, + max_bootstrapped_demos=1, + max_labeled_demos=1, + max_errors=1, + ) + + with pytest.raises(RuntimeError, match="Simulated error"): + bootstrap.compile(student, teacher=teacher, trainset=trainset, valset=valset) + +def test_validation_set_usage(): + """ + Test to ensure the validation set is correctly used during bootstrapping + """ + student = SimpleModule("input -> output") + teacher = SimpleModule("input -> output") + + lm = DummyLM( + [ + "Initial thoughts", + "Finish[blue]", # Expected output for both training and validation + ] ) + with dspy.settings.context(lm=lm, backend=None): - with pytest.raises(RuntimeError, match="Simulated error"): - bootstrap.compile(student, teacher=teacher, trainset=trainset, valset=valset) + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile( + student, teacher=teacher, trainset=trainset, valset=valset + ) + # Check that validation examples are part of student's demos after compilation + assert len(compiled_student.predictor.demos) >= len( + valset + ), "Validation set not used in compiled student demos" -def test_validation_set_usage(): +def test_validation_set_usage_with_backend(): """ Test to ensure the validation set is correctly used during bootstrapping """ @@ -167,16 +296,16 @@ def test_validation_set_usage(): lm = DummyLanguageModel(answers=[["Initial thoughts"], ["Finish[blue]"]]) backend = TemplateBackend(lm=lm) - dspy.settings.configure(backend=backend) - - bootstrap = BootstrapFewShot( - metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 - ) - compiled_student = bootstrap.compile( - student, teacher=teacher, trainset=trainset, valset=valset - ) - - # Check that validation examples are part of student's demos after compilation - assert len(compiled_student.predictor.demos) >= len( - valset - ), "Validation set not used in compiled student demos" + with dspy.settings.context(backend=backend, lm=None): + + bootstrap = BootstrapFewShot( + metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1 + ) + compiled_student = bootstrap.compile( + student, teacher=teacher, trainset=trainset, valset=valset + ) + + # Check that validation examples are part of student's demos after compilation + assert len(compiled_student.predictor.demos) >= len( + valset + ), "Validation set not used in compiled student demos" diff --git a/tests/teleprompt/test_copro_optimizer.py b/tests/teleprompt/test_copro_optimizer.py index cf1bc985d6..7c2ce5c902 100644 --- a/tests/teleprompt/test_copro_optimizer.py +++ b/tests/teleprompt/test_copro_optimizer.py @@ -44,31 +44,29 @@ def forward(self, **kwargs): return self.predictor(**kwargs) -@clean_up_lm_test def test_signature_optimizer_optimization_process(): optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) - dspy.settings.configure( - lm=DummyLM(["Optimized instruction 1", "Optimized instruction 2"]) - ) + lm=DummyLM(["Optimized instruction 1", "Optimized instruction 2"]) - student = SimpleModule("input -> output") + with dspy.settings.context(lm=lm, backend=None): - # Assuming the compile method of COPRO requires a student module, a development set, and evaluation kwargs - optimized_student = optimizer.compile( - student, - trainset=trainset, - eval_kwargs={"num_threads": 1, "display_progress": False}, - ) + student = SimpleModule("input -> output") + + # Assuming the compile method of COPRO requires a student module, a development set, and evaluation kwargs + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) - # Check that the optimized student has been modified from the original - # This check can be more specific based on how the optimization modifies the student - assert optimized_student is not student, "Optimization did not modify the student" + # Check that the optimized student has been modified from the original + # This check can be more specific based on how the optimization modifies the student + assert optimized_student is not student, "Optimization did not modify the student" - # Further tests can be added to verify the specifics of the optimization process, - # such as checking the instructions of the optimized student's predictors. + # Further tests can be added to verify the specifics of the optimization process, + # such as checking the instructions of the optimized student's predictors. -@clean_up_lm_test def test_signature_optimizer_optimization_process_with_backend(): optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) @@ -80,52 +78,49 @@ def test_signature_optimizer_optimization_process_with_backend(): ] ) backend = TemplateBackend(lm=lm, attempts=5) - dspy.settings.configure(backend=backend, cache=False) - # dspy.settings.configure( - # lm=DummyLM(["Optimized instruction 1", "Optimized instruction 2"]) - # ) - student = SimpleModule("input -> output") - - # Assuming the compile method of COPRO requires a student module, a development set, and evaluation kwargs - optimized_student = optimizer.compile( - student, - trainset=trainset, - eval_kwargs={"num_threads": 1, "display_progress": False}, - ) - # Check that the optimized student has been modified from the original - # This check can be more specific based on how the optimization modifies the student - assert optimized_student is not student, "Optimization did not modify the student" + with dspy.settings.context(lm=None, backend=backend, cache=False): + student = SimpleModule("input -> output") + + # Assuming the compile method of COPRO requires a student module, a development set, and evaluation kwargs + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) + # Check that the optimized student has been modified from the original + # This check can be more specific based on how the optimization modifies the student + assert optimized_student is not student, "Optimization did not modify the student" - # Further tests can be added to verify the specifics of the optimization process, - # such as checking the instructions of the optimized student's predictors. + # Further tests can be added to verify the specifics of the optimization process, + # such as checking the instructions of the optimized student's predictors. -@clean_up_lm_test def test_signature_optimizer_statistics_tracking(): optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking + lm=DummyLM(["Optimized instruction"]) - dspy.settings.configure(lm=DummyLM(["Optimized instruction"])) - student = SimpleModule("input -> output") - optimized_student = optimizer.compile( - student, - trainset=trainset, - eval_kwargs={"num_threads": 1, "display_progress": False}, - ) + with dspy.settings.context(lm=lm, backend=None): - # Verify that statistics have been tracked and attached to the optimized student - assert hasattr( - optimized_student, "total_calls" - ), "Total calls statistic not tracked" - assert hasattr( - optimized_student, "results_best" - ), "Best results statistics not tracked" + student = SimpleModule("input -> output") + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) + + # Verify that statistics have been tracked and attached to the optimized student + assert hasattr( + optimized_student, "total_calls" + ), "Total calls statistic not tracked" + assert hasattr( + optimized_student, "results_best" + ), "Best results statistics not tracked" # Assuming the setup_signature_optimizer fixture and simple_metric function are defined as before -@clean_up_lm_test def test_signature_optimizer_statistics_tracking_with_backend(): optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking @@ -136,28 +131,26 @@ def test_signature_optimizer_statistics_tracking_with_backend(): ] ) backend = TemplateBackend(lm=lm, attempts=5) - dspy.settings.configure(backend=backend, cache=False) - # dspy.settings.configure(lm=DummyLM(["Optimized instruction"])) - student = SimpleModule("input -> output") - optimized_student = optimizer.compile( - student, - trainset=trainset, - eval_kwargs={"num_threads": 1, "display_progress": False}, - ) - - # Verify that statistics have been tracked and attached to the optimized student - assert hasattr( - optimized_student, "total_calls" - ), "Total calls statistic not tracked" - assert hasattr( - optimized_student, "results_best" - ), "Best results statistics not tracked" + with dspy.settings.context(lm=None, backend=backend, cache=False): + student = SimpleModule("input -> output") + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) + + # Verify that statistics have been tracked and attached to the optimized student + assert hasattr( + optimized_student, "total_calls" + ), "Total calls statistic not tracked" + assert hasattr( + optimized_student, "results_best" + ), "Best results statistics not tracked" # Assuming the setup_signature_optimizer fixture and simple_metric function are defined as before -@clean_up_lm_test def test_optimization_and_output_verification(): lm = DummyLM( [ @@ -165,44 +158,44 @@ def test_optimization_and_output_verification(): "Optimized Prefix", ] ) - dspy.settings.configure(lm=lm) - optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + with dspy.settings.context(lm=lm, backend=None): + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) - student = SimpleModule("input -> output") + student = SimpleModule("input -> output") - # Compile the student with the optimizer - optimized_student = optimizer.compile( - student, - trainset=trainset, - eval_kwargs={"num_threads": 1, "display_progress": False}, - ) + # Compile the student with the optimizer + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) - # Simulate calling the optimized student with a new input - test_input = "What is the capital of France?" - prediction = optimized_student(input=test_input) + # Simulate calling the optimized student with a new input + test_input = "What is the capital of France?" + prediction = optimized_student(input=test_input) - print(lm.get_convo(-1)) + print(lm.get_convo(-1)) - assert prediction.output == "No more responses" + assert prediction.output == "No more responses" - assert lm.get_convo(-1) == textwrap.dedent( - """\ - Optimized Prompt + assert lm.get_convo(-1) == textwrap.dedent( + """\ + Optimized Prompt - --- + --- - Follow the following format. + Follow the following format. - Input: ${input} - Reasoning: Let's think step by step in order to ${produce the output}. We ... - Optimized Prefix ${output} + Input: ${input} + Reasoning: Let's think step by step in order to ${produce the output}. We ... + Optimized Prefix ${output} - --- + --- - Input: What is the capital of France? - Reasoning: Let's think step by step in order to No more responses - Optimized Prefix No more responses""" - ) + Input: What is the capital of France? + Reasoning: Let's think step by step in order to No more responses + Optimized Prefix No more responses""" + ) def test_statistics_tracking_during_optimization(): @@ -214,36 +207,36 @@ def test_statistics_tracking_during_optimization(): ] ) backend = TemplateBackend(lm=lm, attempts=5) - dspy.settings.configure(backend=backend, cache=False) - - optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) - optimizer.track_stats = True # Enable statistics tracking - - student = SimpleModule("input -> output") - optimized_student = optimizer.compile( - student, - trainset=trainset, - eval_kwargs={"num_threads": 1, "display_progress": False}, - ) - - # Verify that statistics have been tracked - assert hasattr( - optimized_student, "total_calls" - ), "Optimizer did not track total metric calls" - assert optimized_student.total_calls > 0, "Optimizer reported no metric calls" - - # Check if the results_best and results_latest contain valid statistics - assert ( - "results_best" in optimized_student.__dict__ - ), "Optimizer did not track the best results" - assert ( - "results_latest" in optimized_student.__dict__ - ), "Optimizer did not track the latest results" - assert ( - len(optimized_student.results_best) > 0 - ), "Optimizer did not properly populate the best results statistics" - assert ( - len(optimized_student.results_latest) > 0 - ), "Optimizer did not properly populate the latest results statistics" - - # Additional detailed checks can be added here to verify the contents of the tracked statistics + with dspy.settings.context(backend=backend, lm=None, cache=False): + + optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) + optimizer.track_stats = True # Enable statistics tracking + + student = SimpleModule("input -> output") + optimized_student = optimizer.compile( + student, + trainset=trainset, + eval_kwargs={"num_threads": 1, "display_progress": False}, + ) + + # Verify that statistics have been tracked + assert hasattr( + optimized_student, "total_calls" + ), "Optimizer did not track total metric calls" + assert optimized_student.total_calls > 0, "Optimizer reported no metric calls" + + # Check if the results_best and results_latest contain valid statistics + assert ( + "results_best" in optimized_student.__dict__ + ), "Optimizer did not track the best results" + assert ( + "results_latest" in optimized_student.__dict__ + ), "Optimizer did not track the latest results" + assert ( + len(optimized_student.results_best) > 0 + ), "Optimizer did not properly populate the best results statistics" + assert ( + len(optimized_student.results_latest) > 0 + ), "Optimizer did not properly populate the latest results statistics" + + # Additional detailed checks can be added here to verify the contents of the tracked statistics diff --git a/tests/teleprompt/test_knn_fewshot.py b/tests/teleprompt/test_knn_fewshot.py index b267d3dce8..dab67eb3f4 100644 --- a/tests/teleprompt/test_knn_fewshot.py +++ b/tests/teleprompt/test_knn_fewshot.py @@ -51,22 +51,22 @@ def _test_knn_few_shot_compile(setup_knn_few_shot): # Setup DummyLM with a response for a query similar to one of the training examples lm = DummyLM(["Madrid", "10"]) - dspy.settings.configure(lm=lm) # Responses for the capital of Spain and the result of 5+5) + with dspy.settings.context(lm=lm, backend=None): - knn_few_shot = setup_knn_few_shot - trainset = knn_few_shot.KNN.trainset - compiled_student = knn_few_shot.compile(student, teacher=teacher, trainset=trainset, valset=None) - - assert len(compiled_student.predictor.demos) == 1 - assert compiled_student.predictor.demos[0].input == trainset[0].input - assert compiled_student.predictor.demos[0].output == trainset[0].output - - # Simulate a query that is similar to one of the training examples - output = compiled_student.forward(input = "What is the capital of Spain?").output - - print("CONVO") - print(lm.get_convo(-1)) - - # Validate that the output corresponds to one of the expected DummyLM responses - # This assumes the compiled_student's forward method will execute the predictor with the given query - assert output in ["Madrid", "10"], "The compiled student did not return the correct output based on the query" + knn_few_shot = setup_knn_few_shot + trainset = knn_few_shot.KNN.trainset + compiled_student = knn_few_shot.compile(student, teacher=teacher, trainset=trainset, valset=None) + + assert len(compiled_student.predictor.demos) == 1 + assert compiled_student.predictor.demos[0].input == trainset[0].input + assert compiled_student.predictor.demos[0].output == trainset[0].output + + # Simulate a query that is similar to one of the training examples + output = compiled_student.forward(input = "What is the capital of Spain?").output + + print("CONVO") + print(lm.get_convo(-1)) + + # Validate that the output corresponds to one of the expected DummyLM responses + # This assumes the compiled_student's forward method will execute the predictor with the given query + assert output in ["Madrid", "10"], "The compiled student did not return the correct output based on the query" diff --git a/tests/teleprompt/test_mipro_optimizer.py b/tests/teleprompt/test_mipro_optimizer.py index 09d66cd949..e41d0e1242 100644 --- a/tests/teleprompt/test_mipro_optimizer.py +++ b/tests/teleprompt/test_mipro_optimizer.py @@ -4,7 +4,7 @@ import dspy from dsp.modules import LM from dspy.teleprompt.signature_opt_bayesian import MIPRO -from dspy.utils import DummyLM, clean_up_lm_test +from dspy.utils import DummyLM from dspy import Example @@ -152,54 +152,22 @@ def forward(self, **kwargs): return self.predictor(**kwargs) -@clean_up_lm_test def test_signature_optimizer_optimization_process(): lm = ConditionalLM() - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=lm, backend=None): - student = SimpleModule(signature="input -> output") + student = SimpleModule(signature="input -> output") - optimizer = MIPRO( - metric=simple_metric, - num_candidates=10, - init_temperature=1.4, - verbose=False, - track_stats=False, - ) - - # Adjustments: Include required parameters for the compile method - optimized_student = optimizer.compile( - student=student, - trainset=trainset, - num_trials=10, - max_bootstrapped_demos=3, - max_labeled_demos=5, - eval_kwargs={"num_threads": 1, "display_progress": False}, - requires_permission_to_run=False, - ) - - assert len(optimized_student.predictor.demos) == 5 - - -@clean_up_lm_test -def test_signature_optimizer_bad_lm(): - dspy.settings.configure( - lm=DummyLM([f"Optimized instruction {i}" for i in range(30)]) - ) - student = SimpleModule(signature="input -> output") - optimizer = MIPRO( - metric=simple_metric, - num_candidates=10, - init_temperature=1.4, - verbose=False, - track_stats=False, - ) + optimizer = MIPRO( + metric=simple_metric, + num_candidates=10, + init_temperature=1.4, + verbose=False, + track_stats=False, + ) - # Krista: when the code tries to generate bootstrapped examples, the examples are generated using DummyLM, - # which only outputs "Optimized instruction i" this means that none of the bootstrapped examples are successful, - # and therefore the set of examples that we're using to generate new prompts is empty - with pytest.raises(ValueError): - _optimized_student = optimizer.compile( + # Adjustments: Include required parameters for the compile method + optimized_student = optimizer.compile( student=student, trainset=trainset, num_trials=10, @@ -209,76 +177,104 @@ def test_signature_optimizer_bad_lm(): requires_permission_to_run=False, ) + assert len(optimized_student.predictor.demos) == 5 + + +def test_signature_optimizer_bad_lm(): + lm=DummyLM([f"Optimized instruction {i}" for i in range(30)]) + with dspy.settings.context(lm=lm, backend=None): + student = SimpleModule(signature="input -> output") + optimizer = MIPRO( + metric=simple_metric, + num_candidates=10, + init_temperature=1.4, + verbose=False, + track_stats=False, + ) + + # Krista: when the code tries to generate bootstrapped examples, the examples are generated using DummyLM, + # which only outputs "Optimized instruction i" this means that none of the bootstrapped examples are successful, + # and therefore the set of examples that we're using to generate new prompts is empty + with pytest.raises(ValueError): + _optimized_student = optimizer.compile( + student=student, + trainset=trainset, + num_trials=10, + max_bootstrapped_demos=3, + max_labeled_demos=5, + eval_kwargs={"num_threads": 1, "display_progress": False}, + requires_permission_to_run=False, + ) + -@clean_up_lm_test def test_optimization_and_output_verification(): # Make a language model that is always right, except on the last # example in the train set. lm = ConditionalLM() - dspy.settings.configure(lm=lm) + with dspy.settings.context(lm=lm, backend=None): + + optimizer = MIPRO( + metric=simple_metric, + num_candidates=10, + init_temperature=1.4, + verbose=False, + track_stats=True, + ) - optimizer = MIPRO( - metric=simple_metric, - num_candidates=10, - init_temperature=1.4, - verbose=False, - track_stats=True, - ) + student = SimpleModule("input -> output") - student = SimpleModule("input -> output") - - # Compile the student with the optimizer - optimized_student = optimizer.compile( - student=student, - trainset=trainset, - num_trials=4, - max_bootstrapped_demos=2, - max_labeled_demos=3, - eval_kwargs={"num_threads": 1, "display_progress": False}, - requires_permission_to_run=False, - ) + # Compile the student with the optimizer + optimized_student = optimizer.compile( + student=student, + trainset=trainset, + num_trials=4, + max_bootstrapped_demos=2, + max_labeled_demos=3, + eval_kwargs={"num_threads": 1, "display_progress": False}, + requires_permission_to_run=False, + ) - # Simulate calling the optimized student with a new input - test_input = "What is the capital of Spain?" - prediction = optimized_student(input=test_input) + # Simulate calling the optimized student with a new input + test_input = "What is the capital of Spain?" + prediction = optimized_student(input=test_input) - print("CORRECT ANSWER") - print(lm.get_convo(-1)) + print("CORRECT ANSWER") + print(lm.get_convo(-1)) - assert prediction.output == "Madrid" + assert prediction.output == "Madrid" - expected_lm_output = textwrap.dedent( - """\ - Input: + expected_lm_output = textwrap.dedent( + """\ + Input: - --- - - Follow the following format. - - Input: ${input} - Reasoning: Let's think step by step in order to ${produce the output}. We ... - Output: ${output} + --- + + Follow the following format. + + Input: ${input} + Reasoning: Let's think step by step in order to ${produce the output}. We ... + Output: ${output} - --- + --- - Input: What is the capital of France? - Output: Paris + Input: What is the capital of France? + Output: Paris - --- + --- - Input: What is the capital of Norway? - Output: Oslo + Input: What is the capital of Norway? + Output: Oslo - --- + --- - Input: What does the fox say? - Output: Ring-ding-ding-ding-dingeringeding! + Input: What does the fox say? + Output: Ring-ding-ding-ding-dingeringeding! - --- + --- - Input: What is the capital of Spain? - Reasoning: Let's think step by step in order to think deeply. - Output: Madrid""" - ) + Input: What is the capital of Spain? + Reasoning: Let's think step by step in order to think deeply. + Output: Madrid""" + ) - assert lm.get_convo(-1) == expected_lm_output, lm.get_convo(-1) + assert lm.get_convo(-1) == expected_lm_output, lm.get_convo(-1) From a51c60f1131b3d1ec44c9d06ad57ae14537dfbec Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 12:25:25 -0400 Subject: [PATCH 241/243] fix: remove clean_up_lm_test --- dspy/utils/__init__.py | 1 - dspy/utils/testing.py | 12 ------------ tests/teleprompt/test_copro_optimizer.py | 2 +- 3 files changed, 1 insertion(+), 14 deletions(-) delete mode 100644 dspy/utils/testing.py diff --git a/dspy/utils/__init__.py b/dspy/utils/__init__.py index 4460f50907..9f8b201f6b 100644 --- a/dspy/utils/__init__.py +++ b/dspy/utils/__init__.py @@ -1,2 +1 @@ from .dummies import * -from .testing import * diff --git a/dspy/utils/testing.py b/dspy/utils/testing.py deleted file mode 100644 index ec29d74491..0000000000 --- a/dspy/utils/testing.py +++ /dev/null @@ -1,12 +0,0 @@ -import decorator - -import dspy - - -def clean_up_lm_test(func): - def wrapper(func, *args, **kwargs): - dspy.settings.configure(lm=None, backend=None, cache=False) - func(*args, **kwargs) - dspy.settings.configure(lm=None, backend=None, cache=False) - - return decorator.decorator(wrapper, func) diff --git a/tests/teleprompt/test_copro_optimizer.py b/tests/teleprompt/test_copro_optimizer.py index 7c2ce5c902..6188c0f484 100644 --- a/tests/teleprompt/test_copro_optimizer.py +++ b/tests/teleprompt/test_copro_optimizer.py @@ -2,7 +2,7 @@ import dspy from dspy.backends.template import TemplateBackend from dspy.teleprompt.signature_opt import COPRO -from dspy.utils import DummyLM, DummyLanguageModel, clean_up_lm_test +from dspy.utils import DummyLM, DummyLanguageModel from dspy import Example From ad265b0d1227cf7d8bece9d39e771eac5896ecbf Mon Sep 17 00:00:00 2001 From: KCaverly Date: Tue, 19 Mar 2024 15:50:28 -0400 Subject: [PATCH 242/243] fix: remove commented out code --- dspy/backends/template.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dspy/backends/template.py b/dspy/backends/template.py index 65e6917d3c..0ce025d690 100644 --- a/dspy/backends/template.py +++ b/dspy/backends/template.py @@ -29,12 +29,12 @@ def generate( demos = [] # TODO: Move this check to logging - # if not all(k in kwargs for k in signature.input_fields): - # present = [k for k in signature.input_fields if k in kwargs] - # missing = [k for k in signature.input_fields if k not in kwargs] - # print( - # f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}.", - # ) + if not all(k in kwargs for k in signature.input_fields): + present = [k for k in signature.input_fields if k in kwargs] + missing = [k for k in signature.input_fields if k not in kwargs] + print( + f"WARNING: Not all input fields were provided to module. Present: {present}. Missing: {missing}.", + ) # Generate Example example = Example(demos=demos, **kwargs) From a76b97dc40d046524c80ef67d08e1db1425f0b37 Mon Sep 17 00:00:00 2001 From: KCaverly Date: Fri, 22 Mar 2024 11:38:07 -0400 Subject: [PATCH 243/243] fix: moved BaseLM to output strings versus dictionary of content --- dspy/backends/json.py | 2 +- dspy/backends/lm/base.py | 5 ++--- dspy/backends/lm/litellm.py | 6 +++--- dspy/backends/template.py | 2 +- dspy/utils/dummies.py | 8 ++++---- tests/backends/test_template_backend.py | 2 -- 6 files changed, 11 insertions(+), 14 deletions(-) diff --git a/dspy/backends/json.py b/dspy/backends/json.py index 7271cef833..b159d65b6a 100644 --- a/dspy/backends/json.py +++ b/dspy/backends/json.py @@ -46,7 +46,7 @@ def generate( **config, ) extracted = [ - json.loads(prediction["message"]["content"]) + json.loads(prediction) for prediction in pred.generations ] diff --git a/dspy/backends/lm/base.py b/dspy/backends/lm/base.py index bbcf117fc2..b0d41ad9e6 100644 --- a/dspy/backends/lm/base.py +++ b/dspy/backends/lm/base.py @@ -11,12 +11,11 @@ _cachedir = os.environ.get("DSP_CACHEDIR") or str(Path.home() / ".joblib_cache") _cache_memory = Memory(_cachedir, verbose=0) -GeneratedContent = dict[str, t.Any] class LMOutput(BaseModel): prompt: str - generations: list[GeneratedContent] + generations: list[str] kwargs: dict[str, t.Any] @@ -46,7 +45,7 @@ def generate( self, prompt: str, **kwargs, - ) -> list[GeneratedContent]: + ) -> list[str]: """Generates `n` predictions for the signature output.""" ... diff --git a/dspy/backends/lm/litellm.py b/dspy/backends/lm/litellm.py index 332159afc8..f67181f2a0 100644 --- a/dspy/backends/lm/litellm.py +++ b/dspy/backends/lm/litellm.py @@ -3,7 +3,7 @@ from litellm import ModelResponse, completion, token_counter from pydantic import Field -from .base import BaseLM, GeneratedContent +from .base import BaseLM class LiteLM(BaseLM): @@ -22,7 +22,7 @@ def generate( self, prompt: str, **kwargs, - ) -> list[GeneratedContent]: + ) -> list[str]: """Generates `n` predictions for the signature output.""" options = {**self.STANDARD_PARAMS, **self.default_params, **kwargs} # We are not streaming this content in, therefore we can assume it'll always be a litellm ModelResponse @@ -35,7 +35,7 @@ def generate( if type(response) != ModelResponse: raise AssertionError("Response from completion incorrect type/format") - return [dict(c) for c in response.choices if c["finish_reason"] != "length"] + return [c["message"]["content"] for c in response.choices if c["finish_reason"] != "length"] def count_tokens(self, prompt: str) -> int: """Counts the number of tokens for a specific prompt.""" diff --git a/dspy/backends/template.py b/dspy/backends/template.py index 0ce025d690..270591cfb4 100644 --- a/dspy/backends/template.py +++ b/dspy/backends/template.py @@ -50,7 +50,7 @@ def generate( # This returns a list of Examples extracted_examples = [ - template.extract(example, prediction["message"]["content"]) + template.extract(example, prediction) for prediction in pred.generations ] diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index ea80df36ad..0688365cf9 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -7,7 +7,7 @@ from dsp.modules import LM from dsp.utils.utils import dotdict -from dspy.backends.lm.base import BaseLM, GeneratedContent +from dspy.backends.lm.base import BaseLM from dspy.primitives.example import Example from dspy.primitives.prediction import ( Completions, @@ -173,12 +173,12 @@ class DummyLanguageModel(BaseLM): answers: list[list[str]] step: int = 0 - def generate(self, prompt: str, **kwargs) -> t.List[GeneratedContent]: + def generate(self, prompt: str, **kwargs) -> t.List[str]: if len(self.answers) == 1: - return [{"message": {"content": content}} for content in self.answers[0]] + return [content for content in self.answers[0]] output = [ - {"message": {"content": content}} for content in self.answers[self.step] + content for content in self.answers[self.step] ] self.step += 1 diff --git a/tests/backends/test_template_backend.py b/tests/backends/test_template_backend.py index edfee4a087..f666e467c4 100644 --- a/tests/backends/test_template_backend.py +++ b/tests/backends/test_template_backend.py @@ -1,8 +1,6 @@ import pytest import dspy -import typing as t from dspy.signatures.signature import Signature, InputField, OutputField -from dspy.backends.lm.base import BaseLM, GeneratedContent from dspy.backends.template import TemplateBackend from dspy.utils.dummies import DummyLanguageModel