diff --git a/.gitignore b/.gitignore index 1f54b61cc..54c7b88c0 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,9 @@ graph-sitter-types/typings/** coverage.json tests/integration/verified_codemods/codemod_data/repo_commits.json .benchmarks/* + +# SWE Bench results +results.*.json +codegen-examples/examples/swebench_agent_run/results/* +codegen-examples/examples/swebench_agent_run/predictions/* +codegen-examples/examples/swebench_agent_run/logs/* diff --git a/codegen-examples/examples/swebench_agent_run/.env.template b/codegen-examples/examples/swebench_agent_run/.env.template new file mode 100644 index 000000000..3e799557e --- /dev/null +++ b/codegen-examples/examples/swebench_agent_run/.env.template @@ -0,0 +1,4 @@ +OPENAI_API_KEY= # Your OpenAI API key +ANTHROPIC_API_KEY= # Your Anthropic API key +LANGSMITH_API_KEY= # Your Langsmith API key +LANGCHAIN_TRACING_V2= # `true` for tracing, `false` for no tracing diff --git a/codegen-examples/examples/swebench_agent_run/README.md b/codegen-examples/examples/swebench_agent_run/README.md new file mode 100644 index 000000000..daf509e1f --- /dev/null +++ b/codegen-examples/examples/swebench_agent_run/README.md @@ -0,0 +1,33 @@ +# INSTRUCTIONS + +1. Create a `.env` file in the root directory and add your API keys. + +1. cd into the `codegen-examples/examples/swebench_agent_run` directory + +1. Create a `.venv` with `uv venv` and activate it with `source .venv/bin/activate` + +1. Install the codegen dependencies with `uv add codegen` + +- Note: If you'd like to install the dependencies in the global environment, you can use `uv pip install -e ../../../`. This will allow you to test modifications to the codegen codebase. You will need to run `uv pip install -e ../../../` each time you make changes to the codebase. + +5. Ensure that you have a modal account and profile set up. If you don't have one, you can create one at https://modal.com/ + +1. Activate the appropriate modal profile `uv modal profile activate ` + +1. Launch the modal app with `uv run modal deploy --env= entry_point.py` + +1. Run the evaluation with `python run_eval.py` with the desired options: + +- ```bash + $ python run_eval.py --help + Usage: run_eval.py [OPTIONS] + + Options: + --use-existing-preds Use existing predictions instead of + generating new ones. + --dataset [princeton-nlp/SWE-bench_Lite|princeton-nlp/SWE-bench|princeton-nlp/SWE-bench-verified] + The dataset to use. + --length INTEGER The number of examples to process. + --instance-id TEXT The instance ID of the example to process. + --help Show this message and exit. + ``` diff --git a/codegen-examples/examples/swebench_agent_run/entry_point.py b/codegen-examples/examples/swebench_agent_run/entry_point.py new file mode 100644 index 000000000..0d5007419 --- /dev/null +++ b/codegen-examples/examples/swebench_agent_run/entry_point.py @@ -0,0 +1,19 @@ +from codegen.extensions.swebench.utils import SweBenchExample +from codegen.extensions.swebench.harness import run_agent_on_entry +import modal + +image = ( + modal.Image.debian_slim(python_version="3.13") + .apt_install("git") + .pip_install("fastapi[standard]") + .copy_local_dir("../../../", "/root/codegen", ignore=[".venv", "**/.venv", "tests", "**/tests"]) + .run_commands("pip install -e /root/codegen") +) + +app = modal.App(name="swebench-agent-run", image=image, secrets=[modal.Secret.from_dotenv()]) + + +@app.function(timeout=5 * 60) +async def run_agent_modal(entry: SweBenchExample): + """Modal function to process a single example from the SWE-bench dataset.""" + return run_agent_on_entry(entry) diff --git a/codegen-examples/examples/swebench_agent_run/pyproject.toml b/codegen-examples/examples/swebench_agent_run/pyproject.toml new file mode 100644 index 000000000..dd1b0587c --- /dev/null +++ b/codegen-examples/examples/swebench_agent_run/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "swebench-agent-run" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12, <3.14" +dependencies = [] diff --git a/codegen-examples/examples/swebench_agent_run/run_eval.py b/codegen-examples/examples/swebench_agent_run/run_eval.py new file mode 100644 index 000000000..d339b4418 --- /dev/null +++ b/codegen-examples/examples/swebench_agent_run/run_eval.py @@ -0,0 +1,168 @@ +import asyncio +import json +import traceback +from pathlib import Path +import modal +import click +from datetime import datetime +from codegen.extensions.swebench.utils import SWEBenchDataset, get_swe_bench_example, get_swe_bench_examples +from codegen.extensions.swebench.report import generate_report + +PREDS_DNAME = Path(__file__).parent / "predictions" +LOG_DIR = Path(__file__).parent / "logs" + +run_agent_modal = modal.Function.lookup("swebench-agent-run", "run_agent_modal") + + +async def process_batch(examples, batch_size=10): + """Process a batch of examples concurrently. + + Args: + examples: List of SweBenchExample objects to process + batch_size: Number of examples to process concurrently. + Default is 50 which provides good parallelization + while staying well within Modal's limits. + """ + results = [] + + # Process examples in batches + for i in range(0, len(examples), batch_size): + batch = examples[i : i + batch_size] + + # Create tasks for this batch + batch_tasks = [run_agent_modal.remote.aio(example) for example in batch] + + # Wait for all tasks in this batch to complete + print(f"Processing batch {i // batch_size + 1}/{len(examples) // batch_size + 1} (examples {i + 1}-{min(i + batch_size, len(examples))})") + + try: + batch_results = await asyncio.gather(*batch_tasks, return_exceptions=True) + + # Store results + for example, result in zip(batch, batch_results): + error_info = None + + if isinstance(result, Exception): + error_type = type(result).__name__ + error_info = { + "error_type": error_type, + "error_message": str(result), + "traceback": traceback.format_exception(type(result), result, result.__traceback__), + } + + if isinstance(result, modal.exception.Error): + error_info["modal_error_code"] = getattr(result, "code", None) + error_info["modal_error_details"] = getattr(result, "details", None) + + print(f"Error processing {example.instance_id}:") + print(f"Type: {error_type}") + print(f"Message: {str(result)}") + print("Traceback:") + print("".join(error_info["traceback"])) + + results.append({"instance_id": example.instance_id, "status": "error", "error_info": error_info}) + else: + if result is None: + print(f"Warning: Null result for {example.instance_id}") + results.append({"instance_id": example.instance_id, "status": "error", "error_info": {"error_type": "NullResult", "error_message": "Process returned None"}}) + else: + results.append(result) + + except Exception as e: + print("Batch processing error:") + print(f"Type: {type(e).__name__}") + print(f"Message: {str(e)}") + traceback.print_exc() + + # Mark all examples in the batch as failed + for example in batch: + results.append( + { + "instance_id": example.instance_id, + "status": "error", + "error_info": {"error_type": type(e).__name__, "error_message": str(e), "traceback": traceback.format_exc(), "batch_failure": True}, + } + ) + + return results + + +async def run_eval(use_existing_preds, dataset, length, instance_id=None): + dataset = SWEBenchDataset(dataset) + if instance_id: + examples = [get_swe_bench_example(instance_id, dataset=dataset)] + else: + examples = get_swe_bench_examples(dataset=dataset, length=length) + + try: + if not use_existing_preds: + print(f"Processing {len(examples)} examples...") + + # Create output directory if it doesn't exist + PREDS_DNAME.mkdir(exist_ok=True) + results_dir = PREDS_DNAME / "results" + results_dir.mkdir(exist_ok=True) + + # Create a timestamp for this run + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + + # Process all examples in parallel batches + results = await process_batch(examples) + + # Save individual results + for result in results: + if result and "instance_id" in result: + instance_id = result["instance_id"] + output_file = results_dir / f"{instance_id}.json" + with open(output_file, "w") as f: + json.dump(result, f, indent=4) + + # Save summary file + summary_file = results_dir / f"summary_{timestamp}.json" + summary = { + "timestamp": timestamp, + "total_examples": len(examples), + "successful": len([r for r in results if r and "status" not in r]), + "failed": len([r for r in results if r and "status" in r and r["status"] == "error"]), + "error_types": {}, + "results": results, + } + + # Collect error statistics + for result in results: + if result and "status" in result and result["status"] == "error": + error_type = result.get("error_info", {}).get("error_type", "Unknown") + summary["error_types"][error_type] = summary["error_types"].get(error_type, 0) + 1 + + with open(summary_file, "w") as f: + json.dump(summary, f, indent=4) + + print("\nProcessing complete!") + print(f"Results saved to: {results_dir}") + print(f"Summary saved to: {summary_file}") + print(f"Successful: {summary['successful']}/{summary['total_examples']}") + print(f"Failed: {summary['failed']}/{summary['total_examples']}") + if summary["error_types"]: + print("\nError type distribution:") + for error_type, count in summary["error_types"].items(): + print(f" {error_type}: {count}") + + # Generate Report on Modal + generate_report(PREDS_DNAME, LOG_DIR, dataset) + except Exception: + print("Fatal error in run_eval:") + traceback.print_exc() + raise + + +@click.command() +@click.option("--use-existing-preds", is_flag=True, help="Use existing predictions instead of generating new ones.") +@click.option("--dataset", help="The dataset to use.", type=click.Choice([dataset.value for dataset in SWEBenchDataset]), default=SWEBenchDataset.LITE.value) +@click.option("--length", help="The number of examples to process.", type=int, default=10) +@click.option("--instance-id", help="The instance ID of the example to process.") +def run_eval_command(use_existing_preds, dataset, length, instance_id): + asyncio.run(run_eval(use_existing_preds, dataset, length, instance_id)) + + +if __name__ == "__main__": + run_eval_command() diff --git a/codegen-examples/pyproject.toml b/codegen-examples/pyproject.toml index 8f61c5735..9074b1b5e 100644 --- a/codegen-examples/pyproject.toml +++ b/codegen-examples/pyproject.toml @@ -31,6 +31,9 @@ dev-dependencies = [ "deptry>=0.22.0", ] +[tool.uv.workspace] +members = ["examples/swebench_agent_run"] + [tool.pre-commit-uv] requirements = ["strict-requirements"] diff --git a/codegen-examples/uv.lock b/codegen-examples/uv.lock index 152d3beaa..5f962f29b 100644 --- a/codegen-examples/uv.lock +++ b/codegen-examples/uv.lock @@ -1,11 +1,16 @@ version = 1 -revision = 1 requires-python = ">=3.12, <3.14" resolution-markers = [ "python_full_version >= '3.12.4'", "python_full_version < '3.12.4'", ] +[manifest] +members = [ + "codegen-examples", + "swebench-agent-run", +] + [[package]] name = "aiohappyeyeballs" version = "2.4.6" @@ -3197,6 +3202,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d9/61/f2b52e107b1fc8944b33ef56bf6ac4ebbe16d91b94d2b87ce013bf63fb84/starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d", size = 71507 }, ] +[[package]] +name = "swebench-agent-run" +version = "0.1.0" +source = { virtual = "examples/swebench_agent_run" } + [[package]] name = "synchronicity" version = "0.9.11" diff --git a/pyproject.toml b/pyproject.toml index 422eb12b6..6d4ffec89 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,6 +70,7 @@ dependencies = [ "modal>=0.73.45", "slack-sdk", "langchain-anthropic>=0.3.7", + "lox>=0.12.0", ] license = { text = "Apache-2.0" } diff --git a/src/codegen/extensions/swebench/README.md b/src/codegen/extensions/swebench/README.md new file mode 100644 index 000000000..12063180d --- /dev/null +++ b/src/codegen/extensions/swebench/README.md @@ -0,0 +1,29 @@ +## Codegen Harness and Evaluator for SWE Bennch Development Tool + +This folder contains a harness and evaluator for the SWE Bench leaderboard, and enables developers to test and evaluate their codegen models on the SWE Bench leaderboard. + +It integrates directly into the Codegen agentic framework and can be built on top of. + +### Setup + +Remember to install all the dependencies for the environment. + +### Usage + +#### Edit agent.py, your codegen agent + +This file contains the main logic for the agent. + +The agent taps into the tree sitter using codegen. You can modify this by adding additional tools, extending its capabilities, prompts, and more. + +It is invoked in the harness script. + +#### Run harness.py to run the agent + +This script will gather the correct dataset, run the agent, and save the results. + +#### Run report.py to generate a report + +This script will generate a report from the results. It will loop through all the results and generate a report to evaluate each. Currently, there is an error in the docker image. + +There are currently example predictions in the `predictions/results` folder. diff --git a/src/codegen/extensions/swebench/harness.py b/src/codegen/extensions/swebench/harness.py new file mode 100644 index 000000000..7445df7e9 --- /dev/null +++ b/src/codegen/extensions/swebench/harness.py @@ -0,0 +1,180 @@ +"""This is the harness for running an AI agent on the SWE Bench dataset.""" + +#!/usr/bin/env python +import json +import pprint +import random +import subprocess +import sys +from pathlib import Path + +import lox + +from codegen import Codebase +from codegen.agents.code_agent import CodeAgent +from codegen.extensions.swebench.utils import ( + SweBenchExample, + get_swe_bench_examples, + load_predictions, +) + +PARENT_DIR = Path(__file__).parent + +PREDS_DNAME = PARENT_DIR / "predictions" + + +def diff_versus_commit(git_dname, commit): + """Take a diff of `git_dname` current contents versus the `commit`.""" + diff_cmd = f"git -C {git_dname} diff {commit}" + diff_output = subprocess.check_output(diff_cmd.split()).decode() + return diff_output + + +def files_in_patch(patch): + """Extract the list of modified files from a unified diff patch string.""" + files = [] + for line in patch.split("\n"): + if line.startswith("--- a/") or line.startswith("+++ b/"): + fname = line.split("/", 1)[1] + if fname not in files: + files.append(fname) + return files + + +def show_problems(dataset): + """Print out all the instance_id and problem_descriptions.""" + for inst, entry in dataset.items(): + problem = entry.problem_statement.splitlines()[0] + print(f"{inst}: {problem}") + + +def run_agent_on_entry(entry: SweBenchExample): + """Process one `entry` from SWE Bench using the LLM `models` at the + given `temperature`. Set `model_name_or_path` in the result json. + """ + instance_id = entry.instance_id + base_commit = entry.base_commit + + print("=" * 60) + pprint.pprint(instance_id) + print("=" * 60) + problem_statement = entry.problem_statement + print(problem_statement) + + gold_files = files_in_patch(entry.patch) + + codebase = Codebase.from_repo(repo_full_name=entry.repo, commit=base_commit, language="python") # check out the repo + + agent = CodeAgent(codebase=codebase) + + pprint.pprint(instance_id) + pprint.pprint(gold_files) + + message = """Below is a real GitHub issue from a popular GitHub repository. +The issue was filed some time ago. +The repo has been checked out at the commit that existed at the moment the issue was filed. +If you are already familiar with this repo, be cautious! +You are working with an old version of the repo! +Filenames, directory names, file contents, etc may be different than what you're used to. + +Propose changes to update the repo to fix the problem below. + +""" + message += problem_statement + + try: + result = agent.run(prompt=message, session_id="swebench") + except Exception as agent_error: + pprint.pprint(f"Instance ID: {instance_id} terminated with error: {agent_error}") + raise agent_error + + # Get the diff between the current state and the original commit + model_patch = codebase.get_diff(base=base_commit) + pprint.pprint(model_patch) + + # Record the results for the logs + result = dict( + # Required args for running eval tests + instance_id=instance_id, + model_patch=model_patch, + # For computing stats + gold_files=gold_files, + edited_files=files_in_patch(model_patch), + ) + + # Did we get a successful patch? + if not model_patch: + msg = "Failed to generate a patch" + raise ValueError(msg) + + return result + + +def process_instances(dataset: dict[str, SweBenchExample], threads: int): + """Dataset - The subset of the SWE Bench dataset to process. + threads - How many problems to attempt concurrently. + prior_dnames - Names of predictions/ dirnames from previous runs. + If they contain a plausible solution for an instance, + don't continue looking. + """ + # Create the predictions directory if it doesn't exist + PREDS_DNAME.mkdir(exist_ok=True) + out_dname = PREDS_DNAME / "results" + out_dname.mkdir(exist_ok=True) + + pprint.pprint(out_dname) + + # If we are restarting this run, figure out which instances are already done. + done_preds = load_predictions([out_dname]) + done_instances = set(done_preds.keys()) + pprint.pprint(len(done_instances)) + + all_instances = set(dataset.keys()) + + remaining_instances = set(all_instances) + remaining_instances -= done_instances + + remaining_instances = list(remaining_instances) + random.shuffle(remaining_instances) + + pprint.pprint(sorted(remaining_instances)) + pprint.pprint(len(remaining_instances)) + + print() + print("press enter...") + input() + + if threads > 1: + process_one_instance_lox = lox.process(threads)(run_agent_on_entry) + process_one_instance_func = process_one_instance_lox.scatter + gather = process_one_instance_lox.gather + else: + process_one_instance_func = run_agent_on_entry + + for instance_id in remaining_instances: + if instance_id in done_instances: + print("skipping", instance_id) + continue + + result = process_one_instance_func( + dataset[instance_id], + ) + with open(out_dname / f"{instance_id}.json", "w") as f: + json.dump(result, f) + + print("#" * 60) + # input() + + if threads > 1: + gather() + + +def main(): + # Load the SWE Bench dataset + dataset = {example.instance_id: example for example in get_swe_bench_examples()} + process_instances(dataset, threads=10) + + +if __name__ == "__main__": + status = main() + sys.exit(status) diff --git a/src/codegen/extensions/swebench/report.py b/src/codegen/extensions/swebench/report.py new file mode 100755 index 000000000..f89a97cef --- /dev/null +++ b/src/codegen/extensions/swebench/report.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python + +import json +import subprocess +import uuid +from collections import defaultdict +from pathlib import Path + +from codegen.extensions.swebench.tests import remove_patches_to_tests +from codegen.extensions.swebench.utils import SWEBenchDataset + +using_dataset = "lite" +LOG_DIR = Path(__file__).parent / "logs" +NUM_EVAL_PROCS = 5 + + +def run_evals(predictions_jsonl, logs_dir: Path, dataset: SWEBenchDataset): + """Run the evaluations on the predictions on modal.""" + run_evals_cmd = f""" +python -m swebench.harness.run_evaluation + --predictions_path {predictions_jsonl} + --run_id {uuid.uuid4()!s} + --dataset_name {dataset.value} + --cache_level instance + --report_dir {logs_dir} + --modal true +""" + run_evals_cmd = " ".join([line.strip() for line in run_evals_cmd.split() if line.strip()]) + print("Running evaluation command:", run_evals_cmd) + + subprocess.run(run_evals_cmd.split(), check=True) + + +def get_report(predictions_jsonl): + # Load and parse the evaluation results directly from the predictions file + results = defaultdict(list) + + with open(predictions_jsonl) as f: + for line in f: + pred = json.loads(line) + instance_id = pred["instance_id"] + + # Track basic stats + results["generated"].append(instance_id) + + # Check for evaluation logs + log_file = LOG_DIR / f"{instance_id}.eval.log" + if log_file.exists(): + results["with_logs"].append(instance_id) + log_content = log_file.read_text() + + if "PASS" in log_content: + results["resolved"].append(instance_id) + results["applied"].append(instance_id) + elif "FAIL" in log_content: + results["applied"].append(instance_id) + else: + results["no_apply"].append(instance_id) + else: + results["no_logs"].append(instance_id) + + # Convert lists to sets for compatibility with existing code + return {k: set(v) for k, v in results.items()} + + +def update_pred_json(predictions, report, predictions_dir: Path): + all_instances = set(report.get("generated", [])) + all_instances.update(set(report.get("no_generation", []))) + + for instance_id, pred in predictions.items(): + # Use get() to handle missing 'resolved' key, defaulting to empty set + was_resolved = instance_id in report.get("resolved", set()) + if "resolved" in pred and pred["resolved"] == was_resolved: + continue + + assert instance_id in all_instances, instance_id + + pred["resolved"] = was_resolved + save = dict(pred) + + # Construct json_fname if it doesn't exist + if "json_fname" not in pred: + json_fname = predictions_dir / "results" / f"{instance_id}.json" + else: + json_fname = pred["json_fname"] + del save["json_fname"] # Remove from save data if it exists + + Path(json_fname).write_text(json.dumps(save, indent=4)) + + return predictions + + +def preds_to_jsonl(predictions, predictions_dir: Path): + dname = predictions_dir / "results" + + predictions_jsonl = str(dname / "all_preds.jsonl") + print(f"Creating JSONL file: {predictions_jsonl}") + + # Use a default model name since it's not in the predictions + model_name = "results" + + with open(predictions_jsonl, "w") as fh: + for inst, pred in predictions.items(): + minimal_pred = { + "model_name_or_path": model_name, # Use default model name + "model_patch": remove_patches_to_tests(pred["model_patch"]) if "model_patch" in pred else pred.get("patch", ""), + "instance_id": pred["instance_id"], + } + fh.write(json.dumps(minimal_pred) + "\n") + return predictions_jsonl + + +def generate_report(predictions_dir: Path, logs_dir: Path, dataset: SWEBenchDataset): + # Automatically find all JSON files in predictions/results + results_dir = predictions_dir / "results" + if not results_dir.exists(): + print(f"Directory does not exist: {results_dir}") + return 1 + + prediction_files = list(results_dir.glob("*.json")) + print(f"Found {len(prediction_files)} prediction files") + + predictions = {} + for file_path in prediction_files: + try: + with open(file_path) as f: + prediction = json.load(f) + if isinstance(prediction, dict) and "instance_id" in prediction: + predictions[prediction["instance_id"]] = prediction + except json.JSONDecodeError: + print(f"Error reading JSON from {file_path}") + continue + + print(f"Successfully loaded {len(predictions)} predictions") + + if predictions: + # Create predictions JSONL file + predictions_jsonl = preds_to_jsonl(predictions, predictions_dir) + print(f"\nCreated predictions JSONL: {predictions_jsonl}") + + # Setup log directory + log_dir = logs_dir / "results" + log_dir.mkdir(exist_ok=True, parents=True) + print(f"Using log directory: {log_dir}") + + # Run evaluations + run_evals(predictions_jsonl, logs_dir, dataset) + + # Get and display report + model_name = "results" # or whatever model name you want to use + report = get_report(predictions_jsonl) + + print("\nEvaluation Results:") + print(f"Total predictions: {len(predictions)}") + print(f"Successfully applied: {len(report.get('applied', []))}") + print(f"Resolved: {len(report.get('resolved', []))}") + print(f"Failed to apply: {len(report.get('no_apply', []))}") + print(f"With logs: {len(report.get('with_logs', []))}") + print(f"No logs: {len(report.get('no_logs', []))}") + + # Update prediction JSONs with results + predictions = update_pred_json(predictions, report, predictions_dir) + else: + print("No valid predictions found") + return 1 + + return 0 diff --git a/src/codegen/extensions/swebench/tests.py b/src/codegen/extensions/swebench/tests.py new file mode 100755 index 000000000..9233f0c07 --- /dev/null +++ b/src/codegen/extensions/swebench/tests.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +# A no-op patch which creates an empty file is used to stand in for +# the `model_patch` and/or `test_patch` when running SWE Bench tests +# without one or both of those patches. +NOOP_PATCH = "diff --git a/empty.file.{nonce}.ignore b/empty.file.{nonce}.ignore\nnew file mode 100644\nindex 0000000..e69de29\n" + + +def remove_patches_to_tests(model_patch): + """Remove any changes to the tests directory from the provided patch. + This is to ensure that the model_patch does not disturb the repo's + tests when doing acceptance testing with the `test_patch`. + """ + if not model_patch: + return model_patch + + lines = model_patch.splitlines(keepends=True) + filtered_lines = [] + is_tests = False + + for line in lines: + if line.startswith("diff --git a/"): + pieces = line.split() + to = pieces[-1] + if to.startswith("b/") and ("/test/" in to or "/tests/" in to or "/testing/" in to or "/test_" in to or "/tox.ini" in to): + is_tests = True + else: + is_tests = False + + if not is_tests: + filtered_lines.append(line) + + return "".join(filtered_lines) diff --git a/src/codegen/extensions/langchain/utils.py b/src/codegen/extensions/swebench/utils.py similarity index 64% rename from src/codegen/extensions/langchain/utils.py rename to src/codegen/extensions/swebench/utils.py index 7e60734a0..91e42c464 100644 --- a/src/codegen/extensions/langchain/utils.py +++ b/src/codegen/extensions/swebench/utils.py @@ -1,11 +1,19 @@ -"""Utilities for working with language models and datasets.""" - +import json from dataclasses import dataclass -from typing import Optional +from enum import Enum +from pathlib import Path +from pprint import pprint +from typing import Literal, Optional import requests +class SWEBenchDataset(Enum): + LITE = "princeton-nlp/SWE-bench_Lite" + FULL = "princeton-nlp/SWE-bench" + VERIFIED = "princeton-nlp/SWE-bench-verified" + + @dataclass class SweBenchExample: """A single example from the SWE-bench dataset.""" @@ -24,7 +32,39 @@ class SweBenchExample: environment_setup_commit: Optional[str] -def get_swe_bench_examples() -> list[SweBenchExample]: +def load_predictions(paths): + prediction_paths = [] + for path in paths: + path = Path(path) + if path.is_file(): + prediction_paths.append(path) + elif path.is_dir(): + prediction_paths += list(path.glob("*.json")) + else: + assert False, path + + # prediction_paths.sort(key=lambda p: p.stat().st_mtime) + + predictions = dict() + for fname in prediction_paths: + try: + pred = json.loads(fname.read_text()) + except json.decoder.JSONDecodeError as err: + pprint(fname) + raise err + + if "instance_id" not in pred: + print("Skipping json without instance_id", fname) + continue + + inst = pred["instance_id"] + pred["json_fname"] = str(fname) + predictions[inst] = pred + + return predictions + + +def get_swe_bench_examples(dataset: SWEBenchDataset = SWEBenchDataset.LITE, split: Literal["train", "dev", "test"] = "test", offset: int = 0, length: int = 100) -> list[SweBenchExample]: """Fetch examples from the SWE-bench dataset. Returns: @@ -35,11 +75,11 @@ def get_swe_bench_examples() -> list[SweBenchExample]: """ url = "https://datasets-server.huggingface.co/rows" params = { - "dataset": "princeton-nlp/SWE-bench", + "dataset": dataset.value, "config": "default", - "split": "dev", - "offset": 0, - "length": 100, + "split": split, + "offset": offset, + "length": length, } response = requests.get(url, params=params) @@ -67,7 +107,10 @@ def get_swe_bench_examples() -> list[SweBenchExample]: return examples -def get_swe_bench_example(instance_id: str) -> SweBenchExample: +def get_swe_bench_example( + instance_id: str, + dataset: SWEBenchDataset = SWEBenchDataset.LITE, +) -> SweBenchExample: """Fetch a single example from the SWE-bench dataset by its instance ID. Args: @@ -82,12 +125,10 @@ def get_swe_bench_example(instance_id: str) -> SweBenchExample: """ url = "https://datasets-server.huggingface.co/filter" params = { - "dataset": "princeton-nlp/SWE-bench", + "dataset": dataset.value, "config": "default", "split": "dev", "where": f"instance_id='{instance_id}'", - "offset": 0, - "length": 1, } response = requests.get(url, params=params) diff --git a/uv.lock b/uv.lock index a7e7d5d03..bf83931ec 100644 --- a/uv.lock +++ b/uv.lock @@ -1,4 +1,5 @@ version = 1 +revision = 1 requires-python = ">=3.12, <3.14" resolution-markers = [ "python_full_version >= '3.12.4'", @@ -75,6 +76,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/6a/bc7e17a3e87a2985d3e8f4da4cd0f481060eb78fb08596c42be62c90a4d9/aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", size = 7597 }, ] +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929 }, +] + [[package]] name = "annotated-types" version = "0.7.0" @@ -548,6 +558,7 @@ dependencies = [ { name = "langchain-core" }, { name = "langchain-openai" }, { name = "lazy-object-proxy" }, + { name = "lox" }, { name = "mcp", extra = ["cli"] }, { name = "mini-racer" }, { name = "modal" }, @@ -668,6 +679,7 @@ requires-dist = [ { name = "langchain-core" }, { name = "langchain-openai" }, { name = "lazy-object-proxy", specifier = ">=0.0.0" }, + { name = "lox", specifier = ">=0.12.0" }, { name = "lsprotocol", marker = "extra == 'lsp'", specifier = "==2024.0.0b1" }, { name = "mcp", extras = ["cli"] }, { name = "mini-racer", specifier = ">=0.12.4" }, @@ -721,6 +733,7 @@ requires-dist = [ { name = "wrapt", specifier = ">=1.16.0,<2.0.0" }, { name = "xmltodict", specifier = ">=0.13.0,<1.0.0" }, ] +provides-extras = ["lsp", "types"] [package.metadata.requires-dev] dev = [ @@ -1015,6 +1028,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/09/40/9d521973cae7f7ef8b1f0d0e28a3db0f851c1f1dca45d4c2ed5360bb7246/dicttoxml-1.7.16-py3-none-any.whl", hash = "sha256:8677671496d0d38e66c7179f82a7e9059f94887777955dc71b0ac602ee637c26", size = 24155 }, ] +[[package]] +name = "dill" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252 }, +] + [[package]] name = "distlib" version = "0.3.9" @@ -1051,6 +1073,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637", size = 36533 }, ] +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, +] + [[package]] name = "dotty-dict" version = "1.3.1" @@ -1466,6 +1497,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, ] +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, +] + [[package]] name = "importlib-metadata" version = "8.6.1" @@ -2046,6 +2086,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595 }, ] +[[package]] +name = "lox" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pathos" }, + { name = "sphinx-rtd-theme" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/b5/2bfa8da2a1dd6647c3ea0b8d7ae366bbb36b49f9f3858a253199daacb860/lox-0.12.0.tar.gz", hash = "sha256:cc7d5f867afb4dc7c2bce7bd6e90f4665c6df492863f35ff63229300b7219977", size = 37579 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/9a/cc790ca4b853821b76acb5944d32036590a789e5f3b9e4f10a8962bcfda5/lox-0.12.0-py2.py3-none-any.whl", hash = "sha256:ac0a392662f3a75cc9097655d26169d5e3564e2670431fd9884a7a09a09f6921", size = 25372 }, +] + [[package]] name = "lsprotocol" version = "2024.0.0b1" @@ -2255,6 +2308,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, ] +[[package]] +name = "multiprocess" +version = "0.70.16" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dill" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b5/ae/04f39c5d0d0def03247c2893d6f2b83c136bf3320a2154d7b8858f2ba72d/multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1", size = 1772603 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/7ec7fddc92e50714ea3745631f79bd9c96424cb2702632521028e57d3a36/multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02", size = 134824 }, + { url = "https://files.pythonhosted.org/packages/50/15/b56e50e8debaf439f44befec5b2af11db85f6e0f344c3113ae0be0593a91/multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a", size = 143519 }, + { url = "https://files.pythonhosted.org/packages/0a/7d/a988f258104dcd2ccf1ed40fdc97e26c4ac351eeaf81d76e266c52d84e2f/multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e", size = 146741 }, + { url = "https://files.pythonhosted.org/packages/ea/89/38df130f2c799090c978b366cfdf5b96d08de5b29a4a293df7f7429fa50b/multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435", size = 132628 }, + { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 }, +] + [[package]] name = "mypy" version = "1.15.0" @@ -2539,6 +2608,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, ] +[[package]] +name = "pathos" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dill" }, + { name = "multiprocess" }, + { name = "pox" }, + { name = "ppft" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/99/7fcb91495e40735958a576b9bde930cc402d594e9ad5277bdc9b6326e1c8/pathos-0.3.2.tar.gz", hash = "sha256:4f2a42bc1e10ccf0fe71961e7145fc1437018b6b21bd93b2446abc3983e49a7a", size = 166506 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/7f/cea34872c000d17972dad998575d14656d7c6bcf1a08a8d66d73c1ef2cca/pathos-0.3.2-py3-none-any.whl", hash = "sha256:d669275e6eb4b3fbcd2846d7a6d1bba315fe23add0c614445ba1408d8b38bafe", size = 82075 }, +] + [[package]] name = "pathspec" version = "0.12.1" @@ -2600,6 +2684,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, ] +[[package]] +name = "pox" +version = "0.3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/0d/f2eb94b4d1358a60f3539a6abcbbd757fbcb78538fe8d4cfa49850356ccf/pox-0.3.5.tar.gz", hash = "sha256:8120ee4c94e950e6e0483e050a4f0e56076e590ba0a9add19524c254bd23c2d1", size = 119452 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/4c/490d8f7825f38fa77bff188c568163f222d01f6c6d76f574429135edfc49/pox-0.3.5-py3-none-any.whl", hash = "sha256:9e82bcc9e578b43e80a99cad80f0d8f44f4d424f0ee4ee8d4db27260a6aa365a", size = 29492 }, +] + +[[package]] +name = "ppft" +version = "1.7.6.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2b/06/305532df3e1b0c601f60854b6e080991835809d077934cf41976d0f224ce/ppft-1.7.6.9.tar.gz", hash = "sha256:73161c67474ea9d81d04bcdad166d399cff3f084d5d2dc21ebdd46c075bbc265", size = 136395 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/b3/45a04dabc39d93ad4836d99625e7c5350257b48e9ae2c5b701f3d5da6960/ppft-1.7.6.9-py3-none-any.whl", hash = "sha256:dab36548db5ca3055067fbe6b1a17db5fee29f3c366c579a9a27cebb52ed96f0", size = 56792 }, +] + [[package]] name = "pre-commit" version = "4.1.0" @@ -3549,6 +3651,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7e/1b/1c2f43af46456050b27810a7a013af8a7e12bc545a0cdc00eb0df55eb769/rich_toolkit-0.13.2-py3-none-any.whl", hash = "sha256:f3f6c583e5283298a2f7dbd3c65aca18b7f818ad96174113ab5bec0b0e35ed61", size = 13566 }, ] +[[package]] +name = "roman-numerals-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9e/78/9491ab144c9cb2d97aa74d6f632bd6f4be67957de03f945a23a67415d859/roman_numerals_py-3.0.0.tar.gz", hash = "sha256:91199c4373658c03d87d9fe004f4a5120a20f6cb192be745c2377cce274ef41c", size = 8970 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/d0/a3a2fed015e95b9e81619182adc472540f9786183febfaef8b7c5e909418/roman_numerals_py-3.0.0-py3-none-any.whl", hash = "sha256:a1421ce66b3eab7e8735065458de3fa5c4a46263d50f9f4ac8f0e5e7701dd125", size = 4416 }, +] + [[package]] name = "rpds-py" version = "0.22.3" @@ -3759,6 +3870,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, ] +[[package]] +name = "snowballstemmer" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1", size = 86699 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a", size = 93002 }, +] + [[package]] name = "soupsieve" version = "2.6" @@ -3768,6 +3888,114 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 }, ] +[[package]] +name = "sphinx" +version = "8.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alabaster" }, + { name = "babel" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docutils" }, + { name = "imagesize" }, + { name = "jinja2" }, + { name = "packaging" }, + { name = "pygments" }, + { name = "requests" }, + { name = "roman-numerals-py" }, + { name = "snowballstemmer" }, + { name = "sphinxcontrib-applehelp" }, + { name = "sphinxcontrib-devhelp" }, + { name = "sphinxcontrib-htmlhelp" }, + { name = "sphinxcontrib-jsmath" }, + { name = "sphinxcontrib-qthelp" }, + { name = "sphinxcontrib-serializinghtml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/46/08fe30fc7a6b0e8ff1f502e44133d3a1bd9453d7ab884c2ac7f0ef280920/sphinx-8.2.0.tar.gz", hash = "sha256:5b0067853d6e97f3fa87563e3404ebd008fce03525b55b25da90706764da6215", size = 8321764 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/4d/bbe0250199b9dfa8b25a1949ff13d81e7a6f3bfde37fe373a881bd78a37a/sphinx-8.2.0-py3-none-any.whl", hash = "sha256:3c0a40ff71ace28b316bde7387d93b9249a3688c202181519689b66d5d0aed53", size = 3589193 }, +] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "sphinx" }, + { name = "sphinxcontrib-jquery" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561 }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300 }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530 }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705 }, +] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104 }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071 }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743 }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, +] + [[package]] name = "sqlalchemy" version = "2.0.38"