diff --git a/pyproject.toml b/pyproject.toml index f76600a8d..422eb12b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ readme = "README.md" # renovate: datasource=python-version depName=python requires-python = ">=3.12, <3.14" dependencies = [ - "backoff==2.2.1", "openai==1.63.2", "tiktoken<1.0.0,>=0.5.1", "tabulate>=0.9.0,<1.0.0", @@ -31,14 +30,12 @@ dependencies = [ "plotly>=5.24.0,<7.0.0", "humanize<5.0.0,>=4.10.0", "pytest-snapshot>=0.9.0", - "anthropic==0.23.1", "pyjson5==1.6.8", "mini-racer>=0.12.4", "rustworkx>=0.15.1", "typing-extensions>=4.12.2", "termcolor>=2.4.0", "sentry-sdk==2.22.0", - "tenacity>=9.0.0", "click>=8.1.7", "requests>=2.32.3", "lazy-object-proxy>=0.0.0", @@ -72,6 +69,7 @@ dependencies = [ "neo4j", "modal>=0.73.45", "slack-sdk", + "langchain-anthropic>=0.3.7", ] license = { text = "Apache-2.0" } diff --git a/src/codegen/extensions/events/linear.py b/src/codegen/extensions/events/linear.py index 278c2c459..161400a5b 100644 --- a/src/codegen/extensions/events/linear.py +++ b/src/codegen/extensions/events/linear.py @@ -4,7 +4,7 @@ from typing import Callable import modal # deptry: ignore -from anthropic import BaseModel +from pydantic import BaseModel from codegen.extensions.clients.linear import LinearClient from codegen.extensions.events.interface import EventHandlerManagerProtocol diff --git a/src/codegen/extensions/langchain/agent.py b/src/codegen/extensions/langchain/agent.py index acddefde8..20396006f 100644 --- a/src/codegen/extensions/langchain/agent.py +++ b/src/codegen/extensions/langchain/agent.py @@ -1,9 +1,10 @@ """Demo implementation of an agent with Codegen tools.""" -from langchain.agents import AgentExecutor +from langchain.agents import AgentExecutor, create_tool_calling_agent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.hub import pull from langchain.tools import BaseTool +from langchain_anthropic import ChatAnthropic from langchain_core.chat_history import InMemoryChatMessageHistory from langchain_core.messages import BaseMessage from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder @@ -29,7 +30,7 @@ def create_codebase_agent( codebase: Codebase, - model_name: str = "gpt-4o", + model_name: str = "claude-3-5-sonnet-latest", temperature: float = 0, verbose: bool = True, chat_history: list[BaseMessage] = [], @@ -46,8 +47,13 @@ def create_codebase_agent( Initialized agent with message history """ # Initialize language model - llm = ChatOpenAI( - model_name=model_name, + # llm = ChatOpenAI( + # model_name=model_name, + # temperature=temperature, + # ) + + llm = ChatAnthropic( + model="claude-3-5-sonnet-latest", temperature=temperature, ) @@ -64,7 +70,8 @@ def create_codebase_agent( RevealSymbolTool(codebase), SemanticEditTool(codebase), SemanticSearchTool(codebase), - # CommitTool(codebase), + # =====[ Github Integration ]===== + # Enable Github integration # GithubCreatePRTool(codebase), # GithubViewPRTool(codebase), # GithubCreatePRCommentTool(codebase), @@ -128,7 +135,12 @@ def create_codebase_agent( ) # Create the agent - agent = OpenAIFunctionsAgent( + # agent = OpenAIFunctionsAgent( + # llm=llm, + # tools=tools, + # prompt=prompt, + # ) + agent = create_tool_calling_agent( llm=llm, tools=tools, prompt=prompt, diff --git a/src/codegen/extensions/tools/reveal_symbol.py b/src/codegen/extensions/tools/reveal_symbol.py index 341ddad1d..06897d88e 100644 --- a/src/codegen/extensions/tools/reveal_symbol.py +++ b/src/codegen/extensions/tools/reveal_symbol.py @@ -3,17 +3,12 @@ import tiktoken from codegen import Codebase +from codegen.sdk.ai.utils import count_tokens from codegen.sdk.core.external_module import ExternalModule from codegen.sdk.core.import_resolution import Import from codegen.sdk.core.symbol import Symbol -def count_tokens(text: str) -> int: - """Count the number of tokens in a string using GPT tokenizer.""" - enc = tiktoken.get_encoding("cl100k_base") # GPT-4 encoding - return len(enc.encode(text)) - - def truncate_source(source: str, max_tokens: int) -> str: """Truncate source code to fit within max_tokens while preserving meaning. diff --git a/src/codegen/extensions/tools/semantic_edit.py b/src/codegen/extensions/tools/semantic_edit.py index 764881749..69bc4186b 100644 --- a/src/codegen/extensions/tools/semantic_edit.py +++ b/src/codegen/extensions/tools/semantic_edit.py @@ -3,12 +3,13 @@ import difflib import re +from langchain_anthropic import ChatAnthropic from langchain_core.prompts import ChatPromptTemplate -from langchain_openai import ChatOpenAI from codegen import Codebase from .tool_prompts import _HUMAN_PROMPT_DRAFT_EDITOR, _SYSTEM_PROMPT_DRAFT_EDITOR +from .view_file import add_line_numbers def generate_diff(original: str, modified: str) -> str: @@ -117,15 +118,25 @@ def semantic_edit(codebase: Codebase, filepath: str, edit_content: str, start: i original_content = file.content original_lines = original_content.split("\n") + # Check if file is too large for full edit + MAX_LINES = 300 + if len(original_lines) > MAX_LINES and start == 1 and end == -1: + return { + "error": ( + f"File is {len(original_lines)} lines long. For files longer than {MAX_LINES} lines, " + "please specify a line range using start and end parameters. " + "You may need to make multiple targeted edits." + ), + "status": "error", + "line_count": len(original_lines), + } + # Handle append mode if start == -1 and end == -1: try: file.add_symbol_from_source(edit_content) codebase.commit() - # Analyze changes for append - new_lines = file.content.split("\n") - return {"filepath": filepath, "content": file.content, "diff": generate_diff(original_content, file.content), "status": "success"} except Exception as e: msg = f"Failed to append content: {e!s}" @@ -144,10 +155,10 @@ def semantic_edit(codebase: Codebase, filepath: str, edit_content: str, start: i system_message = _SYSTEM_PROMPT_DRAFT_EDITOR human_message = _HUMAN_PROMPT_DRAFT_EDITOR prompt = ChatPromptTemplate.from_messages([system_message, human_message]) - llm = ChatOpenAI( - model="gpt-4o", + llm = ChatAnthropic( + model="claude-3-5-sonnet-latest", temperature=0, - max_tokens=10000, + max_tokens=5000, ) chain = prompt | llm response = chain.invoke({"original_file_section": original_file_section, "edit_content": edit_content}) @@ -173,4 +184,4 @@ def semantic_edit(codebase: Codebase, filepath: str, edit_content: str, start: i file.edit(new_content) codebase.commit() - return {"filepath": filepath, "diff": diff, "status": "success"} + return {"filepath": filepath, "diff": diff, "status": "success", "new_content": add_line_numbers(new_content)} diff --git a/src/codegen/extensions/tools/semantic_search.py b/src/codegen/extensions/tools/semantic_search.py index 9394285ad..7acc071e9 100644 --- a/src/codegen/extensions/tools/semantic_search.py +++ b/src/codegen/extensions/tools/semantic_search.py @@ -69,18 +69,12 @@ def semantic_search( # Format results with previews formatted_results = [] - for filepath, score in results: - try: - file = codebase.get_file(filepath) - preview = file.content[:preview_length].replace("\n", " ").strip() - if len(file.content) > preview_length: - preview += "..." + for file, score in results: + preview = file.content[:preview_length].replace("\n", " ").strip() + if len(file.content) > preview_length: + preview += "..." - formatted_results.append({"filepath": filepath, "score": float(score), "preview": preview}) - except Exception as e: - # Skip files that can't be read - print(f"Warning: Could not read file {filepath}: {e}") - continue + formatted_results.append({"filepath": file.filepath, "score": float(score), "preview": preview}) return {"status": "success", "query": query, "results": formatted_results} diff --git a/src/codegen/gscli/generate/commands.py b/src/codegen/gscli/generate/commands.py index 04a109553..7ad0e17a9 100644 --- a/src/codegen/gscli/generate/commands.py +++ b/src/codegen/gscli/generate/commands.py @@ -11,7 +11,7 @@ from codegen.gscli.generate.runner_imports import _generate_runner_imports from codegen.gscli.generate.system_prompt import get_system_prompt from codegen.gscli.generate.utils import LanguageType, generate_builtins_file -from codegen.sdk.ai.helpers import AnthropicHelper +from codegen.sdk.ai.client import get_openai_client from codegen.sdk.code_generation.changelog_generation import generate_changelog from codegen.sdk.code_generation.codegen_sdk_codebase import get_codegen_sdk_codebase from codegen.sdk.code_generation.doc_utils.generate_docs_json import generate_docs_json @@ -201,9 +201,9 @@ def generate_codegen_sdk_docs(docs_dir: str) -> None: @generate.command() @click.option("--docs-dir", default="docs", required=False) -@click.option("--anthropic-key", required=True) +@click.option("--openai-key", required=True) @click.option("--complete", is_flag=True, help="Generate a complete changelog for the codegen_sdk API") -def changelog(docs_dir: str, anthropic_key: str, complete: bool = False) -> None: +def changelog(docs_dir: str, openai_key: str, complete: bool = False) -> None: """Generate the changelog for the codegen_sdk API and update the changelog.mdx file""" print(colored("Generating changelog", "green")) header = """--- @@ -212,8 +212,8 @@ def changelog(docs_dir: str, anthropic_key: str, complete: bool = False) -> None iconType: "solid" --- """ - # Generate the changelog for the codegen_sdk API and update the changelog.mdx file - client = AnthropicHelper(anthropic_key=anthropic_key, cache=True, openai_anthropic_translation=False) + + client = get_openai_client(openai_key) if complete: entire_release_history = generate_changelog(client) diff --git a/src/codegen/runner/sandbox/runner.py b/src/codegen/runner/sandbox/runner.py index 7901a199d..257d61a66 100644 --- a/src/codegen/runner/sandbox/runner.py +++ b/src/codegen/runner/sandbox/runner.py @@ -11,8 +11,8 @@ from codegen.sdk.codebase.config import CodebaseConfig, ProjectConfig, SessionOptions from codegen.sdk.codebase.factory.codebase_factory import CodebaseType from codegen.sdk.core.codebase import Codebase -from codegen.sdk.secrets import Secrets from codegen.shared.compilation.string_to_code import create_execute_function_from_codeblock +from codegen.shared.configs.models.secrets import SecretsConfig from codegen.shared.configs.session_configs import config from codegen.shared.performance.stopwatch_utils import stopwatch @@ -47,7 +47,7 @@ async def warmup(self) -> None: async def _build_graph(self) -> Codebase: logger.info("> Building graph...") projects = [ProjectConfig(programming_language=self.repo.language, repo_operator=self.op, base_path=self.repo.base_path, subdirectories=self.repo.subdirectories)] - secrets = Secrets(openai_key=config.secrets.openai_api_key) + secrets = SecretsConfig(openai_api_key=config.secrets.openai_api_key) codebase_config = CodebaseConfig(secrets=secrets, feature_flags=config.feature_flags.codebase) return Codebase(projects=projects, config=codebase_config) diff --git a/src/codegen/sdk/ai/client.py b/src/codegen/sdk/ai/client.py new file mode 100644 index 000000000..8902a2fa1 --- /dev/null +++ b/src/codegen/sdk/ai/client.py @@ -0,0 +1,5 @@ +from openai import OpenAI + + +def get_openai_client(key: str) -> OpenAI: + return OpenAI(api_key=key) diff --git a/src/codegen/sdk/ai/converters.py b/src/codegen/sdk/ai/converters.py deleted file mode 100644 index 4165d8bd4..000000000 --- a/src/codegen/sdk/ai/converters.py +++ /dev/null @@ -1,105 +0,0 @@ -# TODO: these should move to claude -import json -import logging - -import anthropic -import openai.types.chat as openai_types - -logger = logging.getLogger(__name__) - -""" -Utilities for converting messages between OpenAI and Claude formats. -""" - - -def convert_openai_messages_to_claude(messages: list) -> tuple[str | anthropic.NotGiven, list]: - """Converts OpenAI chat messages to Claude chat messages.""" - messages, system_prompt = extract_system_prompt_from_openai_message(messages) - converted_messages = convert_messages(messages) - combined_messages = combine_consecutive_user_messages(converted_messages) - return system_prompt, combined_messages - - -def extract_system_prompt_from_openai_message(messages: list) -> tuple[list, str | anthropic.NotGiven]: - """Extracts the system prompt from the first message if it exists.""" - if len(messages) >= 1 and isinstance(messages[0], dict) and messages[0].get("role", None) == "system": - system_prompt = messages[0]["content"] - messages = messages[1:] - else: - system_prompt = anthropic.NotGiven() - return messages, system_prompt - - -def convert_messages(messages: list) -> list: - """Helper function to convert messages to Claude format.""" - new_messages = [] - for message in messages: - if isinstance(message, openai_types.chat_completion_message.ChatCompletionMessage): - new_message = convert_openai_message(message) - elif isinstance(message, dict): - new_message = convert_dict_message(message) - else: - logger.warn(f"Message type not supported: {type(message)}") - continue - new_messages.append(new_message) - return new_messages - - -def combine_consecutive_user_messages(messages: list) -> list: - """Combines consecutive user messages into a single message.""" - combined_messages = [] - for message in messages: - if combined_messages and combined_messages[-1]["role"] == "user" and message["role"] == "user": - merge_user_messages(combined_messages[-1], message) - else: - combined_messages.append(message) - return combined_messages - - -def convert_openai_message(message): - """Converts an OpenAI message to an Anthropic message.""" - if message.tool_calls is not None: - return create_anthropic_message_with_tool_calls(message.role, message.content, message.tool_calls) - return {"role": message.role, "content": message.content} - - -def convert_dict_message(message): - """Converts a dictionary message to an Anthropic message.""" - role = message.get("role") - if role == "user" or role == "assistant": - if "tool_calls" in message: - return create_anthropic_message_with_tool_calls(role, message["content"], message["tool_calls"]) - return {"role": role, "content": message["content"]} - elif role == "tool": - return {"role": "user", "content": [{"type": "tool_result", "tool_use_id": message["tool_call_id"], "content": message["content"]}]} - else: - logger.warn(f"Unsupported role: {role}") - return None - - -def create_anthropic_message_with_tool_calls(role, content, tool_calls): - """Converts a OpenAI message with tool calls to an Anthropic message.""" - message_content = [] - if content: - message_content.append({"type": "text", "text": content}) - for tool_call in tool_calls: - message_content.append( - { - "type": "tool_use", - "id": tool_call.id if hasattr(tool_call, "id") else tool_call["id"], - "name": tool_call.function.name if hasattr(tool_call, "function") else tool_call["function"]["name"], - "input": json.loads(tool_call.function.arguments if hasattr(tool_call, "function") else tool_call["function"]["arguments"]), - } - ) - return {"role": role, "content": message_content} - - -def merge_user_messages(existing_message, new_message): - """Merges two user messages into a single message.""" - if isinstance(existing_message["content"], list): - existing_message["content"].extend(new_message["content"] if isinstance(new_message["content"], list) else [{"type": "text", "text": new_message["content"]}]) - else: - existing_message["content"] = [ - {"type": "text", "text": existing_message["content"]}, - {"type": "text", "text": new_message["content"]} if isinstance(new_message["content"], str) else new_message["content"], - ] diff --git a/src/codegen/sdk/ai/helpers.py b/src/codegen/sdk/ai/helpers.py deleted file mode 100644 index 70ccb622a..000000000 --- a/src/codegen/sdk/ai/helpers.py +++ /dev/null @@ -1,487 +0,0 @@ -import json -import logging -from abc import ABC, abstractmethod - -import anthropic -import anthropic.types as anthropic_types -import anthropic.types.beta.tools as anthropic_tool_types -import backoff -import openai -import openai.types.chat as openai_types -import tiktoken -from anthropic import Anthropic -from openai import OpenAI -from tenacity import retry, stop_after_attempt, wait_random_exponential - -from codegen.sdk.ai.converters import convert_openai_messages_to_claude -from codegen.sdk.utils import XMLUtils - -CLAUDE_OPENAI_MODEL_MAP = { - "gpt-4o": "claude-3-5-sonnet-20240620", - "gpt-4o-mini": "claude-3-haiku-20240307", - "gpt-4-turbo": "claude-3-5-sonnet-20240620", - "gpt-4-32k": "claude-3-opus-20240229", - "gpt-4-1106-preview": "claude-3-opus-20240229", - "gpt-4": "claude-3-opus-20240229", - "gpt-3.5-turbo": "claude-3-sonnet-20240229", -} - -ENCODERS = { - "gpt-4-1106-preview": tiktoken.encoding_for_model("gpt-4-32k"), - "gpt-4-32k": tiktoken.encoding_for_model("gpt-4-32k"), -} - - -def count_tokens(s: str, model_name: str = "gpt-4-32k") -> int: - """Uses tiktoken""" - if s is None: - return 0 - enc = ENCODERS.get(model_name, None) - if not enc: - ENCODERS[model_name] = tiktoken.encoding_for_model(model_name) - enc = ENCODERS[model_name] - tokens = enc.encode(s) - return len(tokens) - - -def get_headers(headers, cache_enabled: bool | None = True): - tmp_headers = headers if headers else {"Helicone-Auth": "Bearer sk-pucao3a-blpeocy-qcdpbzi-i5n4pja"} - - if cache_enabled: - tmp_headers["Helicone-Cache-Enabled"] = "true" - tmp_headers["Cache-Control"] = "max-age=2592000" # 30 days - - return tmp_headers - - -class AbstractAIHelper(ABC): - api_base: str - headers: dict[str, str] - - @abstractmethod - def __init__(self) -> None: - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def embeddings_with_backoff(self, **kwargs): - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def get_embeddings(self, content_strs: list[str]) -> list[list[float]]: - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def get_embedding(self, content_str: str) -> list[float]: - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def llm_query_with_retry(self, **kwargs): - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def llm_query_no_retry(self, messages: list = [], model: str = "gpt-4-32k", max_tokens: int | None = None): - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def llm_query_functions_with_retry(self, model: str, messages: list, functions: list[dict], max_tokens: int | None = None): - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def llm_query_functions(self, model: str, messages: list, functions: list[dict], max_tokens: int | None = None): - msg = "This is an abstract class" - raise NotImplementedError(msg) - - @abstractmethod - def llm_response_to_json(response) -> str: - msg = "This is an abstract class" - raise NotImplementedError(msg) - - -# TODO: move into utils/ai folder -class OpenAIHelper(AbstractAIHelper): - client: OpenAI = None - - def __init__( - self, - openai_key: str, - api_base: str = "https://api.openai.com/v1", - headers=None, - cache: bool | None = True, - ) -> None: - if openai_key is None: - msg = "The openai_key must be provided" - raise ValueError(msg) - - self.openai_key = openai_key - self.api_base = api_base - self.headers = get_headers(headers, cache_enabled=cache) - self.logger = logging.getLogger(__name__) - self.embedding_model_name = "text-embedding-ada-002" - self.completions_model_name = "text-embedding-ada-002" - - self.set_up_open_ai_key() - - def set_up_open_ai_key(self) -> None: - self.client = OpenAI(api_key=self.openai_key, base_url=self.api_base, default_headers=self.headers) - - @backoff.on_exception(backoff.expo, openai.RateLimitError) - def embeddings_with_backoff(self, **kwargs): - return self.client.embeddings.create(**kwargs) - - def get_embeddings(self, content_strs: list[str]) -> list[list[float]]: - content_strs = [c[:1000] if type(c) in (str, bytes) else " " for c in content_strs] - response = self.embeddings_with_backoff(input=content_strs, model=self.embedding_model_name) - return [x.embedding for x in response.data] - - def get_embedding(self, content_str: str) -> list[float]: - return self.get_embeddings([content_str])[0] - - @backoff.on_exception(backoff.expo, openai.RateLimitError) - def completions_with_backoff(self, **kwargs): - return self.client.completions.create(**kwargs) - - @retry(wait=wait_random_exponential(min=70, max=600), stop=stop_after_attempt(10)) - def llm_query_with_retry(self, **kwargs): - return self.llm_query_no_retry(**kwargs) - - def llm_query_no_retry(self, messages: list = [], model: str = "gpt-4-32k", max_tokens: int | None = None, **kwargs): - return self.client.chat.completions.create( - messages=messages, - model=model, - max_tokens=max_tokens or openai.NOT_GIVEN, - **kwargs, - ) - - @retry(wait=wait_random_exponential(min=70, max=600), stop=stop_after_attempt(10)) - def llm_query_functions_with_retry(self, model: str, messages: list, functions: list[dict], max_tokens: int | None = None, **kwargs): - return self.llm_query_functions(model, messages, functions, max_tokens, **kwargs) - - def llm_query_functions(self, model: str, messages: list, functions: list[dict], max_tokens: int | None = None, **kwargs): - if functions is not None: - response = self.client.chat.completions.create( - model=model, - messages=messages, - tools=functions, # type: ignore - max_tokens=max_tokens or openai.NOT_GIVEN, - **kwargs, - # tool_choice="auto", # has it do multiple - ) - else: - response = self.client.chat.completions.create( - model=model, - messages=messages, - max_tokens=max_tokens or openai.NOT_GIVEN, - **kwargs, - ) - - return response - - @staticmethod - def llm_response_to_json(response: openai_types.chat_completion.ChatCompletion) -> str: - # the response needs an object of type ChatCompletionMessage - js = json.loads(response.model_dump_json()) - if len(js["choices"]) == 0: - return "" - return js["choices"][0]["message"]["content"] - - -class AnthropicHelper(AbstractAIHelper): - def __init__( - self, - anthropic_key: str, - # Dont add /v1 to the path. Anthropic already adds it, so it will be a double /v1/v1 - api_base: str = "https://api.anthropic.com", - headers=None, - openai_anthropic_translation: bool = True, - cache: bool | None = True, - ) -> None: - if anthropic_key is None: - msg = "The anthropic_key must be provided" - raise ValueError(msg) - - self.anthropic_key = anthropic_key - self.api_base = api_base - self.headers = get_headers(headers, cache_enabled=cache) - self.logger = logging.getLogger(__name__) - self.openai_anthropic_translation = openai_anthropic_translation - self.set_up_claude_key() - - def set_up_claude_key(self) -> None: - self.client = Anthropic(api_key=self.anthropic_key, base_url=self.api_base, default_headers=self.headers) - - def _convert_openai_functions_to_claude(self, functions: list[dict]) -> list[anthropic_tool_types.ToolParam]: - new_functions = [] - for function in functions: - if function["type"] == "function": - new_function = {"name": function["function"]["name"], "description": function["function"]["description"], "input_schema": {"type": "object", "properties": {}}} - if "parameters" in function["function"]: - new_function["input_schema"] = function["function"]["parameters"] - new_functions.append(new_function) - return new_functions - - def _convert_claude_response_to_openai( - self, response: anthropic_types.Message | anthropic_tool_types.ToolsBetaMessage, parse_function_calls: bool = False, parse_result_block: bool = False - ) -> openai_types.chat_completion.ChatCompletion: - choices = [] - if len(response.content) != 0: - for resp in response.content: - if isinstance(resp, anthropic_types.ContentBlock): - if "result" in resp.text and parse_result_block: - xml_result = XMLUtils.extract_elements(resp.text, "result", keep_tag=False) - resp.text = resp.text if len(xml_result) <= 1 else xml_result[0] - elif isinstance(resp, anthropic_tool_types.ToolUseBlock) and parse_result_block: - xml_answer = XMLUtils.extract_elements(resp.text, "answer", keep_tag=False)[0] - resp.text = resp.text if len(xml_answer) <= 1 else xml_answer[0] - choices.append( - openai_types.chat_completion.Choice( - index=0, - finish_reason="stop" if response.stop_reason in ("end_turn", "stop_sequence") else "length", - message=openai_types.chat_completion_message.ChatCompletionMessage(content=resp.text, role="assistant"), - ) - ) - elif isinstance(resp, anthropic_tool_types.ToolUseBlock): - # If the previous choice is a chat message, then we can add the tool call to it - if len(choices) > 0 and isinstance(choices[-1].message, openai_types.chat_completion_message.ChatCompletionMessage) and choices[-1].message.tool_calls is None: - text_response = choices[-1].message.content - choices = choices[:-1] - else: - text_response = None - choices.append( - openai_types.chat_completion.Choice( - index=0, - finish_reason="tool_calls", - message=openai_types.chat_completion_message.ChatCompletionMessage( - content=text_response, - role="assistant", - function_call=None, # Function calls are deprecated - tool_calls=[ - openai_types.chat_completion_message_tool_call.ChatCompletionMessageToolCall( - id=resp.id, - function=openai_types.chat_completion_message_tool_call.Function( - name=resp.name, - arguments=json.dumps(resp.input), - ), - type="function", - ) - ], - ), - ) - ) - return openai_types.chat_completion.ChatCompletion( - id=response.id, - choices=choices, - created=0, # TODO: Use current time - model=response.model, - object="chat.completion", - system_fingerprint=None, # TODO: What is this? - ) - - @backoff.on_exception(backoff.expo, anthropic.RateLimitError) - def embeddings_with_backoff(self, **kwargs): - msg = "Embeddings are not supported for AnthropicHelper" - raise NotImplementedError(msg) - # response = self.client.embeddings.create(**kwargs) - # return response - - def get_embeddings(self, content_strs: list[str]) -> list[list[float]]: - msg = "Embeddings are not supported for AnthropicHelper" - raise NotImplementedError(msg) - # content_strs = [c[:1000] if type(c) in (str, bytes) else " " for c in content_strs] - # response = self.embeddings_with_backoff(input=content_strs, model=self.embedding_model_name) - # return [x.embedding for x in response.data] - - def get_embedding(self, content_str: str) -> list[float]: - msg = "Embeddings are not supported for AnthropicHelper" - raise NotImplementedError(msg) - # embeddings = self.get_embeddings([content_str]) - # return embeddings[0] - - @backoff.on_exception(backoff.expo, anthropic.RateLimitError) - def completions_with_backoff(self, **kwargs): - msg = "Claude's completion api is deprecated. Please use messages_with_backoff instead." - raise Exception(msg) - - @backoff.on_exception(backoff.expo, anthropic.RateLimitError) - def messages_with_backoff(self, **kwargs): - return self.client.messages.create(**kwargs) - - @retry(wait=wait_random_exponential(min=70, max=600), stop=stop_after_attempt(10)) - def llm_query_with_retry(self, **kwargs): - return self.llm_query_no_retry(**kwargs) - - def llm_query_no_retry(self, messages: list = [], model: str = "claude-2.0", max_tokens: int | None = None, system_prompt: str | anthropic.NotGiven | None = None, **kwargs): - system_prompt = anthropic.NotGiven() if not system_prompt else system_prompt - if self.openai_anthropic_translation and model in CLAUDE_OPENAI_MODEL_MAP: - model = CLAUDE_OPENAI_MODEL_MAP[model] - if self.openai_anthropic_translation: - claude_system_prompt, messages = convert_openai_messages_to_claude(messages) - if isinstance(system_prompt, str) and isinstance(claude_system_prompt, str): - claude_system_prompt = system_prompt + claude_system_prompt - else: - claude_system_prompt = system_prompt - response = self.client.beta.tools.messages.create(max_tokens=max_tokens, system=claude_system_prompt, messages=messages, model=model, **kwargs) - if self.openai_anthropic_translation: - return self._convert_claude_response_to_openai(response) - else: - return response - - @retry(wait=wait_random_exponential(min=70, max=600), stop=stop_after_attempt(10)) - def llm_query_functions_with_retry(self, **kwargs): - return self.llm_query_functions(**kwargs) - - def llm_query_functions(self, model: str, messages: list, functions: list, max_tokens: int | None = None, system_prompt: str | anthropic.NotGiven | None = None, **kwargs): - system_prompt = anthropic.NotGiven() if not system_prompt else system_prompt - if self.openai_anthropic_translation and model in CLAUDE_OPENAI_MODEL_MAP: - model = CLAUDE_OPENAI_MODEL_MAP[model] - if functions is not None: - if self.openai_anthropic_translation: - claude_system_prompt, messages = convert_openai_messages_to_claude(messages) - if isinstance(system_prompt, str) and isinstance(claude_system_prompt, str): - claude_system_prompt = system_prompt + claude_system_prompt - claude_functions = self._convert_openai_functions_to_claude(functions) - else: - claude_functions = functions - claude_system_prompt = system_prompt - response = self.client.beta.tools.messages.create( - max_tokens=max_tokens or anthropic.NotGiven(), - system=claude_system_prompt, - messages=messages, - model=model, - tools=claude_functions, - **kwargs, - ) - if self.openai_anthropic_translation: - return self._convert_claude_response_to_openai(response, parse_function_calls=True, parse_result_block=True) - else: - return response - else: - response = self.llm_query_no_retry( - model=model, - messages=messages, - max_tokens=max_tokens or anthropic.NotGiven(), - system_prompt=system_prompt, - **kwargs, - ) - return response - - @staticmethod - def llm_response_to_json(response: openai_types.chat_completion.ChatCompletion | anthropic_types.Message) -> str: - if isinstance(response, openai_types.chat_completion.ChatCompletion): - return OpenAIHelper.llm_response_to_json(response) - else: - js = json.loads(response.model_dump_json()) - if len(js["content"]) == 0: - return "" - return js["content"][0]["text"] - - -class MultiProviderAIHelper(AbstractAIHelper): - def __init__( - self, - openai_key: str, - anthropic_key: str | None = None, - openai_base: str = "https://api.openai.com/v1", - anthropic_base: str = "https://api.anthropic.com", - headers=None, - use_openai: bool = True, - use_claude: bool = True, - cache: bool | None = True, - ) -> None: - self.use_openai = use_openai - self.use_claude = use_claude - self.cache = cache - - self.openai_helper = OpenAIHelper(openai_key, api_base=openai_base, headers=headers, cache=self.cache) - if self.use_claude: - if anthropic_key is None: - msg = "Anthropic Key must be provided if use_claude is True" - raise ValueError(msg) - - self.anthropic_helper = AnthropicHelper(anthropic_key, api_base=anthropic_base, headers=headers, openai_anthropic_translation=True, cache=self.cache) - - @backoff.on_exception(backoff.expo, openai.RateLimitError) - def embeddings_with_backoff(self, **kwargs): - # Prioritize OpenAI First - if self.use_openai: - return self.openai_helper.embeddings_with_backoff(**kwargs) - elif self.use_claude: - return self.anthropic_helper.embeddings_with_backoff(**kwargs) - else: - msg = "MultiProviderAIHelper: No AI helper is enabled" - raise Exception(msg) - - def get_embeddings(self, content_strs: list[str]) -> list[list[float]]: - # Prioritize OpenAI First - if self.use_openai: - return self.openai_helper.get_embeddings(content_strs) - elif self.use_claude: - return self.anthropic_helper.get_embeddings(content_strs) - else: - msg = "MultiProviderAIHelper: No AI helper is enabled" - raise Exception(msg) - - def get_embedding(self, content_str: str) -> list[float]: - # Prioritize OpenAI First - if self.use_openai: - return self.openai_helper.get_embedding(content_str) - elif self.use_claude: - return self.anthropic_helper.get_embedding(content_str) - else: - msg = "MultiProviderAIHelper: No AI helper is enabled" - raise Exception(msg) - - @backoff.on_exception(backoff.expo, anthropic.RateLimitError) - def completions_with_backoff(self, **kwargs): - # This is OpenAI specific - if self.use_openai: - return self.openai_helper.completions_with_backoff(**kwargs) - else: - msg = "MultiProviderAIHelper: OpenAI Helper is not enabled" - raise Exception(msg) - - @backoff.on_exception(backoff.expo, anthropic.RateLimitError) - def messages_with_backoff(self, **kwargs): - # This is Anthropic specific - if self.use_claude: - return self.anthropic_helper.messages_with_backoff(**kwargs) - else: - msg = "MultiProviderAIHelper: Anthropic Helper is not enabled" - raise Exception(msg) - - @retry(wait=wait_random_exponential(min=70, max=600), stop=stop_after_attempt(10)) - def llm_query_with_retry(self, **kwargs): - return self.llm_query_no_retry(**kwargs) - - def llm_query_no_retry(self, messages: list = [], model: str = "gpt-4-32k", max_tokens: int | None = None, **kwargs): - if self.use_openai and model.startswith("gpt"): - return self.openai_helper.llm_query_no_retry(messages=messages, model=model, max_tokens=max_tokens, **kwargs) - elif self.use_claude and model.startswith("claude"): - return self.anthropic_helper.llm_query_no_retry(messages=messages, model=model, max_tokens=max_tokens, **kwargs) - else: - msg = f"MultiProviderAIHelper: Unknown Model {model}" - raise Exception(msg) - - @retry(wait=wait_random_exponential(min=70, max=600), stop=stop_after_attempt(10)) - def llm_query_functions_with_retry(self, **kwargs): - return self.llm_query_functions(**kwargs) - - def llm_query_functions(self, model: str, messages: list, functions: list[dict], max_tokens: int | None = None, **kwargs): - if self.use_openai and model.startswith("gpt"): - return self.openai_helper.llm_query_functions(model, messages, functions, max_tokens, **kwargs) - elif self.use_claude and model.startswith("claude"): - return self.anthropic_helper.llm_query_functions(model, messages, functions, max_tokens, **kwargs) - else: - msg = f"MultiProviderAIHelper: Unknown Model {model}" - raise Exception(msg) - - @staticmethod - def llm_response_to_json(response) -> str: - # Prioritize Anthropic First (Has support for both, while OpenAI only supports OpenAI) - return AnthropicHelper.llm_response_to_json(response) diff --git a/src/codegen/sdk/ai/utils.py b/src/codegen/sdk/ai/utils.py new file mode 100644 index 000000000..b903a9a1a --- /dev/null +++ b/src/codegen/sdk/ai/utils.py @@ -0,0 +1,17 @@ +import tiktoken + +ENCODERS = { + "gpt-4o": tiktoken.encoding_for_model("gpt-4o"), +} + + +def count_tokens(s: str, model_name: str = "gpt-4o") -> int: + """Uses tiktoken""" + if s is None: + return 0 + enc = ENCODERS.get(model_name, None) + if not enc: + ENCODERS[model_name] = tiktoken.encoding_for_model(model_name) + enc = ENCODERS[model_name] + tokens = enc.encode(s) + return len(tokens) diff --git a/src/codegen/sdk/code_generation/changelog_generation.py b/src/codegen/sdk/code_generation/changelog_generation.py index 472273986..7229375f3 100644 --- a/src/codegen/sdk/code_generation/changelog_generation.py +++ b/src/codegen/sdk/code_generation/changelog_generation.py @@ -2,19 +2,15 @@ import logging from dataclasses import dataclass from pathlib import Path -from typing import TYPE_CHECKING from git import Repo +from openai import OpenAI from semantic_release import ParsedCommit, ParseError from semantic_release.changelog.release_history import Release, ReleaseHistory from semantic_release.cli.cli_context import CliContextObj from semantic_release.cli.config import GlobalCommandLineOptions import codegen -from codegen.sdk.ai.helpers import AnthropicHelper - -if TYPE_CHECKING: - import anthropic logger = logging.getLogger(__name__) @@ -88,11 +84,10 @@ def generate_release_summary_context(release: Release): return release_summary_context -def generate_release_summary(client: AnthropicHelper, release: Release): +def generate_release_summary(client: OpenAI, release: Release) -> str: release_summary_context = generate_release_summary_context(release) - response: anthropic.types.message.Message = client.llm_query_no_retry( - system_prompt=SYSTEM_PROMPT, - model="claude-3-5-sonnet-20241022", + response = client.chat.completions.create( + model="gpt-4o", max_tokens=1000, messages=[ { @@ -107,14 +102,10 @@ def generate_release_summary(client: AnthropicHelper, release: Release): } ], ) - if not response.content: - msg = "No response from Anthropic" - raise Exception(msg) - - return json.loads(response.content[0].text) + return response.choices[0].message.content -def generate_changelog(client: AnthropicHelper, latest_existing_version: str | None = None): +def generate_changelog(client: OpenAI, latest_existing_version: str | None = None): ctx = CliContextObj(ContextMock(), logger=logger, global_opts=GlobalCommandLineOptions()) runtime = ctx.runtime_ctx translator = runtime.version_translator diff --git a/src/codegen/sdk/codebase/config.py b/src/codegen/sdk/codebase/config.py index 8648fa1e2..c2587b143 100644 --- a/src/codegen/sdk/codebase/config.py +++ b/src/codegen/sdk/codebase/config.py @@ -9,8 +9,8 @@ from codegen.git.schemas.repo_config import RepoConfig from codegen.git.utils.file_utils import split_git_path from codegen.git.utils.language import determine_project_language -from codegen.sdk.secrets import Secrets from codegen.shared.configs.models.feature_flags import CodebaseFeatureFlags +from codegen.shared.configs.models.secrets import SecretsConfig from codegen.shared.enums.programming_language import ProgrammingLanguage HARD_MAX_AI_LIMIT = 500 # Global limit for AI requests @@ -76,7 +76,7 @@ class CodebaseConfig(BaseModel): """ model_config = ConfigDict(frozen=True) - secrets: Secrets = Secrets() + secrets: SecretsConfig = SecretsConfig() feature_flags: CodebaseFeatureFlags = DefaultFlags diff --git a/src/codegen/sdk/codebase/factory/get_session.py b/src/codegen/sdk/codebase/factory/get_session.py index 9560e5df8..10c00d79d 100644 --- a/src/codegen/sdk/codebase/factory/get_session.py +++ b/src/codegen/sdk/codebase/factory/get_session.py @@ -9,9 +9,9 @@ from codegen.sdk.codebase.config import CodebaseConfig, ProjectConfig, SessionOptions, TestFlags from codegen.sdk.codebase.factory.codebase_factory import CodebaseFactory from codegen.sdk.core.codebase import Codebase, PyCodebaseType, TSCodebaseType -from codegen.sdk.secrets import Secrets from codegen.sdk.tree_sitter_parser import print_errors from codegen.shared.configs.models.feature_flags import CodebaseFeatureFlags +from codegen.shared.configs.models.secrets import SecretsConfig from codegen.shared.enums.programming_language import ProgrammingLanguage @@ -26,7 +26,7 @@ def get_codebase_session( verify_output: bool = True, feature_flags: CodebaseFeatureFlags = TestFlags, session_options: SessionOptions = SessionOptions(), - secrets: Secrets = Secrets(), + secrets: SecretsConfig = SecretsConfig(), ) -> AbstractContextManager[PyCodebaseType]: ... @@ -41,7 +41,7 @@ def get_codebase_session( verify_output: bool = True, feature_flags: CodebaseFeatureFlags = TestFlags, session_options: SessionOptions = SessionOptions(), - secrets: Secrets = Secrets(), + secrets: SecretsConfig = SecretsConfig(), ) -> AbstractContextManager[PyCodebaseType]: ... @@ -56,7 +56,7 @@ def get_codebase_session( verify_output: bool = True, feature_flags: CodebaseFeatureFlags = TestFlags, session_options: SessionOptions = SessionOptions(), - secrets: Secrets = Secrets(), + secrets: SecretsConfig = SecretsConfig(), ) -> AbstractContextManager[TSCodebaseType]: ... @@ -71,7 +71,7 @@ def get_codebase_session( verify_output: bool = True, feature_flags: CodebaseFeatureFlags = TestFlags, session_options: SessionOptions = SessionOptions(), - secrets: Secrets = Secrets(), + secrets: SecretsConfig = SecretsConfig(), ) -> Generator[Codebase, None, None]: """Gives you a Codebase operating on the files you provided as a dict""" config = CodebaseConfig(feature_flags=feature_flags, secrets=secrets) diff --git a/src/codegen/sdk/core/codebase.py b/src/codegen/sdk/core/codebase.py index bfe0e05dd..352e920e2 100644 --- a/src/codegen/sdk/core/codebase.py +++ b/src/codegen/sdk/core/codebase.py @@ -18,6 +18,7 @@ from git.remote import PushInfoList from github.PullRequest import PullRequest from networkx import Graph +from openai import OpenAI from rich.console import Console from typing_extensions import TypeVar, deprecated @@ -25,7 +26,7 @@ from codegen.git.schemas.enums import CheckoutResult from codegen.git.utils.pr_review import CodegenPR from codegen.sdk._proxy import proxy_property -from codegen.sdk.ai.helpers import AbstractAIHelper, MultiProviderAIHelper +from codegen.sdk.ai.client import get_openai_client from codegen.sdk.codebase.codebase_ai import generate_system_prompt, generate_tools from codegen.sdk.codebase.codebase_context import GLOBAL_FILE_IGNORE_LIST, CodebaseContext from codegen.sdk.codebase.config import CodebaseConfig, DefaultConfig, ProjectConfig, SessionOptions @@ -1097,20 +1098,20 @@ def _enable_experimental_language_engine(self, async_start: bool = False, instal # AI #################################################################################################################### - _ai_helper: AbstractAIHelper = None + _ai_helper: OpenAI = None _num_ai_requests: int = 0 @property @noapidoc - def ai_client(self) -> AbstractAIHelper: + def ai_client(self) -> OpenAI: """Enables calling AI/LLM APIs - re-export of the initialized `openai` module""" # Create a singleton AIHelper instance if self._ai_helper is None: - if self.ctx.config.secrets.openai_key is None: + if self.ctx.config.secrets.openai_api_key is None: msg = "OpenAI key is not set" raise ValueError(msg) - self._ai_helper = MultiProviderAIHelper(openai_key=self.ctx.config.secrets.openai_key, use_openai=True, use_claude=False) + self._ai_helper = get_openai_client(key=self.ctx.config.secrets.openai_api_key) return self._ai_helper def ai(self, prompt: str, target: Editable | None = None, context: Editable | list[Editable] | dict[str, Editable | list[Editable]] | None = None, model: str = "gpt-4o") -> str: @@ -1149,7 +1150,13 @@ def ai(self, prompt: str, target: Editable | None = None, context: Editable | li params["tool_choice"] = "required" # Make the AI request - response = self.ai_client.llm_query_functions(**params) + response = self.ai_client.chat.completions.create( + model=model, + messages=params["messages"], + tools=params["functions"], # type: ignore + temperature=params["temperature"], + tool_choice=params["tool_choice"], + ) # Handle finish reasons # First check if there is a response @@ -1193,7 +1200,7 @@ def set_ai_key(self, key: str) -> None: self._ai_helper = None # Set the AI key - self.ctx.config.secrets.openai_key = key + self.ctx.config.secrets.openai_api_key = key def find_by_span(self, span: Span) -> list[Editable]: """Finds editable objects that overlap with the given source code span. @@ -1276,10 +1283,10 @@ def from_repo( # Use RepoOperator to fetch the repository logger.info("Cloning repository...") if commit is None: - repo_operator = RepoOperator.create_from_repo(repo_path=repo_path, url=repo_url, access_token=config.secrets.github_api_key if config.secrets else None) + repo_operator = RepoOperator.create_from_repo(repo_path=repo_path, url=repo_url, access_token=config.secrets.github_token if config.secrets else None) else: # Ensure the operator can handle remote operations - repo_operator = RepoOperator.create_from_commit(repo_path=repo_path, commit=commit, url=repo_url, access_token=config.secrets.github_api_key if config.secrets else None) + repo_operator = RepoOperator.create_from_commit(repo_path=repo_path, commit=commit, url=repo_url, access_token=config.secrets.github_token if config.secrets else None) logger.info("Clone completed successfully") # Initialize and return codebase with proper context diff --git a/src/codegen/sdk/secrets.py b/src/codegen/sdk/secrets.py deleted file mode 100644 index dd4eaf15b..000000000 --- a/src/codegen/sdk/secrets.py +++ /dev/null @@ -1,7 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class Secrets: - openai_key: str | None = None - github_api_key: str | None = None diff --git a/src/codegen/shared/configs/models/secrets.py b/src/codegen/shared/configs/models/secrets.py index 83b344382..3d341d963 100644 --- a/src/codegen/shared/configs/models/secrets.py +++ b/src/codegen/shared/configs/models/secrets.py @@ -1,3 +1,7 @@ +import os +from pathlib import Path + +from dotenv import load_dotenv from pydantic_settings import BaseSettings from codegen.shared.configs.models.utils import get_setting_config @@ -6,7 +10,29 @@ class SecretsConfig(BaseSettings): + """Configuration for various API secrets and tokens. + + Loads from environment variables with the SECRETS_ prefix. + Falls back to .env file for missing values. + """ + model_config = get_setting_config(prefix) github_token: str | None = None openai_api_key: str | None = None + + def __init__(self, **kwargs): + """Initialize secrets, loading from .env if needed.""" + super().__init__(**kwargs) + + # Load .env file if it exists + env_path = Path(".env") + if env_path.exists(): + load_dotenv(env_path) + + # Try to load from environment if not set + if not self.github_token: + self.github_token = os.getenv("GITHUB_TOKEN") + + if not self.openai_api_key: + self.openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/tests/integration/codegen/sdk/code_generation/test_api_doc_generation.py b/tests/integration/codegen/sdk/code_generation/test_api_doc_generation.py index 5dd16e709..341233296 100644 --- a/tests/integration/codegen/sdk/code_generation/test_api_doc_generation.py +++ b/tests/integration/codegen/sdk/code_generation/test_api_doc_generation.py @@ -1,6 +1,6 @@ import pytest -from codegen.sdk.ai.helpers import count_tokens +from codegen.sdk.ai.utils import count_tokens from codegen.sdk.code_generation.doc_utils.generate_docs_json import generate_docs_json from codegen.sdk.code_generation.prompts.api_docs import get_codegen_sdk_codebase, get_codegen_sdk_docs from codegen.sdk.core.symbol import Symbol diff --git a/uv.lock b/uv.lock index dc5051edb..a7e7d5d03 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,4 @@ version = 1 -revision = 1 requires-python = ">=3.12, <3.14" resolution-markers = [ "python_full_version >= '3.12.4'", @@ -87,20 +86,20 @@ wheels = [ [[package]] name = "anthropic" -version = "0.23.1" +version = "0.46.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "distro" }, { name = "httpx" }, + { name = "jiter" }, { name = "pydantic" }, { name = "sniffio" }, - { name = "tokenizers" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1b/2d/7be8f53faba0ca14ea20d31ebc53a2a27a8ab76672d993c12198b69dda39/anthropic-0.23.1.tar.gz", hash = "sha256:9325103702cbc96bb09d1b58c36bde75c726f6a01029fb4d85f41ebba07e9066", size = 823288 } +sdist = { url = "https://files.pythonhosted.org/packages/d4/68/3b4c045edf6dc6933895e8f279cc77c7684874c8aba46a4e6241c8b147cf/anthropic-0.46.0.tar.gz", hash = "sha256:eac3d43271d02321a57c3ca68aca84c3d58873e8e72d1433288adee2d46b745b", size = 202191 } wheels = [ - { url = "https://files.pythonhosted.org/packages/f8/99/6716253f156fac232d2979020f1fb7c93f7ba4daafca4e8872e83dbe378a/anthropic-0.23.1-py3-none-any.whl", hash = "sha256:6dc5779dae83a5834864f4a4af0166c972b70f4cb8fd2765e1558282cc6d6242", size = 869140 }, + { url = "https://files.pythonhosted.org/packages/50/6f/346beae0375df5f6907230bc63d557ef5d7659be49250ac5931a758322ae/anthropic-0.46.0-py3-none-any.whl", hash = "sha256:1445ec9be78d2de7ea51b4d5acd3574e414aea97ef903d0ecbb57bec806aaa49", size = 223228 }, ] [[package]] @@ -271,15 +270,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537 }, ] -[[package]] -name = "backoff" -version = "2.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/47/d7/5bbeb12c44d7c4f2fb5b56abce497eb5ed9f34d85701de869acedd602619/backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba", size = 17001 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148 }, -] - [[package]] name = "bashlex" version = "0.18" @@ -540,9 +530,7 @@ wheels = [ name = "codegen" source = { editable = "." } dependencies = [ - { name = "anthropic" }, { name = "astor" }, - { name = "backoff" }, { name = "click" }, { name = "codeowners" }, { name = "dataclasses-json" }, @@ -556,6 +544,7 @@ dependencies = [ { name = "hatchling" }, { name = "humanize" }, { name = "langchain", extra = ["openai"] }, + { name = "langchain-anthropic" }, { name = "langchain-core" }, { name = "langchain-openai" }, { name = "lazy-object-proxy" }, @@ -590,7 +579,6 @@ dependencies = [ { name = "slack-sdk" }, { name = "starlette" }, { name = "tabulate" }, - { name = "tenacity" }, { name = "termcolor" }, { name = "tiktoken" }, { name = "toml" }, @@ -661,10 +649,8 @@ dev = [ [package.metadata] requires-dist = [ - { name = "anthropic", specifier = "==0.23.1" }, { name = "astor", specifier = ">=0.8.1,<1.0.0" }, { name = "attrs", marker = "extra == 'lsp'", specifier = ">=25.1.0" }, - { name = "backoff", specifier = "==2.2.1" }, { name = "click", specifier = ">=8.1.7" }, { name = "codeowners", specifier = ">=0.6.0,<1.0.0" }, { name = "dataclasses-json", specifier = ">=0.6.4,<1.0.0" }, @@ -678,6 +664,7 @@ requires-dist = [ { name = "hatchling", specifier = ">=1.25.0" }, { name = "humanize", specifier = ">=4.10.0,<5.0.0" }, { name = "langchain", extras = ["openai"] }, + { name = "langchain-anthropic", specifier = ">=0.3.7" }, { name = "langchain-core" }, { name = "langchain-openai" }, { name = "lazy-object-proxy", specifier = ">=0.0.0" }, @@ -714,7 +701,6 @@ requires-dist = [ { name = "slack-sdk" }, { name = "starlette", specifier = ">=0.16.0,<1.0.0" }, { name = "tabulate", specifier = ">=0.9.0,<1.0.0" }, - { name = "tenacity", specifier = ">=9.0.0" }, { name = "termcolor", specifier = ">=2.4.0" }, { name = "tiktoken", specifier = ">=0.5.1,<1.0.0" }, { name = "toml", specifier = ">=0.10.2" }, @@ -735,7 +721,6 @@ requires-dist = [ { name = "wrapt", specifier = ">=1.16.0,<2.0.0" }, { name = "xmltodict", specifier = ">=0.13.0,<1.0.0" }, ] -provides-extras = ["lsp", "types"] [package.metadata.requires-dev] dev = [ @@ -1242,15 +1227,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 }, ] -[[package]] -name = "fsspec" -version = "2025.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b5/79/68612ed99700e6413de42895aa725463e821a6b3be75c87fcce1b4af4c70/fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd", size = 292283 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e2/94/758680531a00d06e471ef649e4ec2ed6bf185356a7f9fbfbb7368a40bd49/fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b", size = 184484 }, -] - [[package]] name = "genson" version = "1.3.0" @@ -1454,31 +1430,13 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819 }, ] -[[package]] -name = "huggingface-hub" -version = "0.28.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock" }, - { name = "fsspec" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "requests" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e7/ce/a734204aaae6c35a22f9956ebcd8d8708ae5b842e15d6f42bd6f49e634a4/huggingface_hub-0.28.1.tar.gz", hash = "sha256:893471090c98e3b6efbdfdacafe4052b20b84d59866fb6f54c33d9af18c303ae", size = 387074 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ea/da/6c2bea5327b640920267d3bf2c9fc114cfbd0a5de234d81cda80cc9e33c8/huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7", size = 464068 }, -] - [[package]] name = "humanize" -version = "4.12.0" +version = "4.12.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/38/ff/9f38de04e15bd53f5b64d38e6b9f21357d7b3edee7e398d05aaf407dbdfe/humanize-4.12.0.tar.gz", hash = "sha256:87ff7b43591370b12a1d103c9405849d911d4b039ed22d80b718b62c76eec8a3", size = 80785 } +sdist = { url = "https://files.pythonhosted.org/packages/5b/8c/4f2f0784d08a383b5de3d3b1d65a6f204cc5dc487621c91c550388d756af/humanize-4.12.1.tar.gz", hash = "sha256:1338ba97415c96556758a6e2f65977ed406dddf4620d4c6db9bbdfd07f0f1232", size = 80827 } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/6b/09e54be6cc58913fd991728b9b8f959b58ade87a2a7684318c3e90e5f1dc/humanize-4.12.0-py3-none-any.whl", hash = "sha256:106a7436a2d545d742c147c469716b3a08424aa143a82103630147c489a89f48", size = 127401 }, + { url = "https://files.pythonhosted.org/packages/32/30/5ef5994b090398f9284d2662f56853e5183ae2cb5d8e3db67e4f4cfea407/humanize-4.12.1-py3-none-any.whl", hash = "sha256:86014ca5c52675dffa1d404491952f1f5bf03b07c175a51891a343daebf01fea", size = 127409 }, ] [[package]] @@ -1919,7 +1877,7 @@ wheels = [ [[package]] name = "langchain" -version = "0.3.18" +version = "0.3.19" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -1933,9 +1891,9 @@ dependencies = [ { name = "sqlalchemy" }, { name = "tenacity" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/87/23/612d99c74889f672fe349f43a458a42e449650ebd57073b9e96e0b6b2253/langchain-0.3.18.tar.gz", hash = "sha256:311ac227a995545ff7c3f74c7767930c5349edef0b39f19d3105b86d39316b69", size = 10223807 } +sdist = { url = "https://files.pythonhosted.org/packages/a2/cf/a064ef27d5f3154491c85783590a25d7ae22340cddedf9bf47496044e4eb/langchain-0.3.19.tar.gz", hash = "sha256:b96f8a445f01d15d522129ffe77cc89c8468dbd65830d153a676de8f6b899e7b", size = 10224228 } wheels = [ - { url = "https://files.pythonhosted.org/packages/93/83/a4b41a1cf8b22fd708104d50edf98b720aa28647d3083d83b8348927a786/langchain-0.3.18-py3-none-any.whl", hash = "sha256:1a6e629f02a25962aa5b16932e8f073248104a66804ed5af1f78618ad7c1d38d", size = 1010321 }, + { url = "https://files.pythonhosted.org/packages/18/7d/0f4cc3317634195381f87c5d90268f29b9a31fda62aa7a7f36a1c27b06f3/langchain-0.3.19-py3-none-any.whl", hash = "sha256:1e16d97db9106640b7de4c69f8f5ed22eeda56b45b9241279e83f111640eff16", size = 1010630 }, ] [package.optional-dependencies] @@ -1943,9 +1901,23 @@ openai = [ { name = "langchain-openai" }, ] +[[package]] +name = "langchain-anthropic" +version = "0.3.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anthropic" }, + { name = "langchain-core" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/b0/84cfe0b4b829bcdc99fbb1a06973a6f3109b4e326292cdf5fa46f88dbf2f/langchain_anthropic-0.3.7.tar.gz", hash = "sha256:534cd1867bc41711cd8c3d0a0bc055e6c5a4215953c87260209a90dc5816f30d", size = 39838 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/b3/111e1f41b0044687ec0c34c921ad52d33d2802282b1bc45343d5dd923fb6/langchain_anthropic-0.3.7-py3-none-any.whl", hash = "sha256:adec0a1daabd3c25249753c6cd625654917fb9e3feee68e72c7dc3f4449c0f3c", size = 22998 }, +] + [[package]] name = "langchain-core" -version = "0.3.35" +version = "0.3.36" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, @@ -1956,9 +1928,9 @@ dependencies = [ { name = "tenacity" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/56/5c/5ed0aa88d0ffc52634b97dcd971d675cfc55072b28f7723bba2ef9432bad/langchain_core-0.3.35.tar.gz", hash = "sha256:328688228ece259da734417d477994a69cf8202dea9ed4271f2d792e3575c6fc", size = 525736 } +sdist = { url = "https://files.pythonhosted.org/packages/e2/41/e638f46eb7037fd8aab3484d9c109d0f30a04ac4bbd3e283dcfc80a31309/langchain_core-0.3.36.tar.gz", hash = "sha256:dffdce8a554905f53f33c1d6a40633a45a8d47c17c5792753891dd73941cd57a", size = 526843 } wheels = [ - { url = "https://files.pythonhosted.org/packages/94/63/be67b8ab4f4cf07b8da65a67ff72ea870ec8cf9f2e95e3a5b837a5604d1b/langchain_core-0.3.35-py3-none-any.whl", hash = "sha256:81a4097226e180fa6c64e2d2ab38dcacbbc23b64fc109fb15622910fe8951670", size = 413151 }, + { url = "https://files.pythonhosted.org/packages/d3/06/b764bcf5523c271a35005ba7047f6d216337e598b41a1f2783a99a11f5d6/langchain_core-0.3.36-py3-none-any.whl", hash = "sha256:8410311862c7c674e4f3f120cfd8d1f3d003d6e7d8cb8f934746e222f7e865d9", size = 413640 }, ] [[package]] @@ -2212,16 +2184,16 @@ wheels = [ [[package]] name = "mistune" -version = "3.1.1" +version = "3.1.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c6/1d/6b2b634e43bacc3239006e61800676aa6c41ac1836b2c57497ed27a7310b/mistune-3.1.1.tar.gz", hash = "sha256:e0740d635f515119f7d1feb6f9b192ee60f0cc649f80a8f944f905706a21654c", size = 94645 } +sdist = { url = "https://files.pythonhosted.org/packages/80/f7/f6d06304c61c2a73213c0a4815280f70d985429cda26272f490e42119c1a/mistune-3.1.2.tar.gz", hash = "sha256:733bf018ba007e8b5f2d3a9eb624034f6ee26c4ea769a98ec533ee111d504dff", size = 94613 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/02/c66bdfdadbb021adb642ca4e8a5ed32ada0b4a3e4b39c5d076d19543452f/mistune-3.1.1-py3-none-any.whl", hash = "sha256:02106ac2aa4f66e769debbfa028509a275069dcffce0dfa578edd7b991ee700a", size = 53696 }, + { url = "https://files.pythonhosted.org/packages/12/92/30b4e54c4d7c48c06db61595cffbbf4f19588ea177896f9b78f0fbe021fd/mistune-3.1.2-py3-none-any.whl", hash = "sha256:4b47731332315cdca99e0ded46fc0004001c1299ff773dfb48fbe1fd226de319", size = 53696 }, ] [[package]] name = "modal" -version = "0.73.51" +version = "0.73.55" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -2239,9 +2211,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "watchfiles" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3c/d0/ef9322bc8fc653e1b24422287b108ca9a0cd489b59691b77082c4ee6a840/modal-0.73.51.tar.gz", hash = "sha256:497d115ae92b46b65f0b8d2391465e327cd67f05ef11aa3cbc5f74f184cbefae", size = 468049 } +sdist = { url = "https://files.pythonhosted.org/packages/a5/ee/2619ec80552859e1ef45f5a6125e7fe84b6bc4b4876fcf852fa0d84670b3/modal-0.73.55.tar.gz", hash = "sha256:90276bc234c0ef371879ca8e21085d33a98520bd5552e19f04c26563be5bb447", size = 467414 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/a3/57bccda40048ad4feae34f60ee7a88b57f5d7e0162c7bba51f7c16d90b85/modal-0.73.51-py3-none-any.whl", hash = "sha256:fb173b405ed139666657580a2ffee313004b84643585052bdfa7447acf2df599", size = 534085 }, + { url = "https://files.pythonhosted.org/packages/b6/e5/6f2ac840a186015492f61bd080d5eec271291bbf6dad37e800b83b48aaeb/modal-0.73.55-py3-none-any.whl", hash = "sha256:fcc2a8cbfd590f7a1f28d377cf22939c523a0f56c4f0372b10024395ae18e8ed", size = 533649 }, ] [[package]] @@ -2327,11 +2299,11 @@ wheels = [ [[package]] name = "narwhals" -version = "1.26.0" +version = "1.27.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/18/6f/75929abaac73088fe34c788ecb40db20252174bcd00b8612381aebb954ee/narwhals-1.26.0.tar.gz", hash = "sha256:b9d7605bf1d97a9d87783a69748c39150964e2a1ab0e5a6fef3e59e56772639e", size = 248933 } +sdist = { url = "https://files.pythonhosted.org/packages/5a/d6/1dadff863b95e4ec74eaba7979278e446699532136c74183a398778b1949/narwhals-1.27.1.tar.gz", hash = "sha256:68505d0cee1e6c00382ac8b65e922f8b694a11cbe482a057fa63139de8d0ea03", size = 251670 } wheels = [ - { url = "https://files.pythonhosted.org/packages/15/fc/420680ad8b0cf81372eee7a213a7b7173ec5a628f0d5b2426047fe55c3b3/narwhals-1.26.0-py3-none-any.whl", hash = "sha256:4af8bbdea9e45638bb9a981568a8dfa880e40eb7dcf740d19fd32aea79223c6f", size = 306574 }, + { url = "https://files.pythonhosted.org/packages/ed/ea/dc14822a0a75e027562f081eb638417b1b7845e1e01dd85c5b6573ebf1b2/narwhals-1.27.1-py3-none-any.whl", hash = "sha256:71e4a126007886e3dd9d71d0d5921ebd2e8c1f9be9c405fe11850ece2b066c59", size = 308837 }, ] [[package]] @@ -3248,11 +3220,12 @@ wheels = [ [[package]] name = "python-semantic-release" -version = "9.19.1" +version = "9.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "click-option-group" }, + { name = "deprecated" }, { name = "dotty-dict" }, { name = "gitpython" }, { name = "importlib-resources" }, @@ -3264,9 +3237,9 @@ dependencies = [ { name = "shellingham" }, { name = "tomlkit" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b8/3a/7d1e5a7fadaf2e2d9d43478d45556ad8b01f680fbc2807bac368639ef2a8/python_semantic_release-9.19.1.tar.gz", hash = "sha256:cc8af7a897ac796c6ed81b26bf6c6ef00e4cd182d7379399b0152e02b51b6b6c", size = 299811 } +sdist = { url = "https://files.pythonhosted.org/packages/30/51/cdd1d8d3eae29cdca5d59087106050d0c9eaf767ca2b3c0c0dde5f16c405/python_semantic_release-9.20.0.tar.gz", hash = "sha256:56bd78d39b59be1741e4783bd857110590166f08e7dcae1c951d14d7ac6076f3", size = 306483 } wheels = [ - { url = "https://files.pythonhosted.org/packages/06/1c/a12704d6e7d3e553fb0010f2a2ff9e76437328764067278e3101958abb91/python_semantic_release-9.19.1-py3-none-any.whl", hash = "sha256:6628a12593cac5fc3ba21664905b25e0a718f3c63b4e028f93c09bad508c4dad", size = 127268 }, + { url = "https://files.pythonhosted.org/packages/e7/80/706dfddba4b99d6821bf769cfc28aef1365db63f2a94967004ff4cd8bca9/python_semantic_release-9.20.0-py3-none-any.whl", hash = "sha256:75de18044e6ca11d0298414a02f66c50cbde3c76af63ca7375de04198f462e95", size = 132553 }, ] [[package]] @@ -3874,11 +3847,11 @@ wheels = [ [[package]] name = "sybil" -version = "9.0.0" +version = "9.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f8/de/c6268b0e266654274f7b8eaeb26d1753ace5f424ac40bb1b72be1be57545/sybil-9.0.0.tar.gz", hash = "sha256:0856c253e40d448d6e15d89044ef34c8b5d1f63f685f9371f4bbc9f75f072fa5", size = 42529 } +sdist = { url = "https://files.pythonhosted.org/packages/4f/6a/70301b41715b24b5372b22e83bea6637d92f017e7718fffff78ae0a80381/sybil-9.1.0.tar.gz", hash = "sha256:c63c14c9843c62fa994c777eb48c0632f62be5c499ecbe05c1d130157ed6a6ca", size = 84284 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a4/e1/bb1ce3d1c9d9dd764faf255f7050c0a9068d844593de4e4fa23bf10efa78/sybil-9.0.0-py3-none-any.whl", hash = "sha256:dbcbd7f11a0d438aad22bc24f0bc0c651ad2dde7f3b1023e72e92ea4aed9e02b", size = 37671 }, + { url = "https://files.pythonhosted.org/packages/3f/c5/995e0ff97f2f972c2c87fa35e1ad8926fcfc280853094118edc4c44b1e44/sybil-9.1.0-py3-none-any.whl", hash = "sha256:7fe20074289bbb5f06dc7ad564dff4ffe42a678add7860345089a7b78125132a", size = 37761 }, ] [package.optional-dependencies] @@ -3985,31 +3958,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610 }, ] -[[package]] -name = "tokenizers" -version = "0.21.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "huggingface-hub" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/20/41/c2be10975ca37f6ec40d7abd7e98a5213bb04f284b869c1a24e6504fd94d/tokenizers-0.21.0.tar.gz", hash = "sha256:ee0894bf311b75b0c03079f33859ae4b2334d675d4e93f5a4132e1eae2834fe4", size = 343021 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/5c/8b09607b37e996dc47e70d6a7b6f4bdd4e4d5ab22fe49d7374565c7fefaf/tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2", size = 2647461 }, - { url = "https://files.pythonhosted.org/packages/22/7a/88e58bb297c22633ed1c9d16029316e5b5ac5ee44012164c2edede599a5e/tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e", size = 2563639 }, - { url = "https://files.pythonhosted.org/packages/f7/14/83429177c19364df27d22bc096d4c2e431e0ba43e56c525434f1f9b0fd00/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b177fb54c4702ef611de0c069d9169f0004233890e0c4c5bd5508ae05abf193", size = 2903304 }, - { url = "https://files.pythonhosted.org/packages/7e/db/3433eab42347e0dc5452d8fcc8da03f638c9accffefe5a7c78146666964a/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b43779a269f4629bebb114e19c3fca0223296ae9fea8bb9a7a6c6fb0657ff8e", size = 2804378 }, - { url = "https://files.pythonhosted.org/packages/57/8b/7da5e6f89736c2ade02816b4733983fca1c226b0c42980b1ae9dc8fcf5cc/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aeb255802be90acfd363626753fda0064a8df06031012fe7d52fd9a905eb00e", size = 3095488 }, - { url = "https://files.pythonhosted.org/packages/4d/f6/5ed6711093dc2c04a4e03f6461798b12669bc5a17c8be7cce1240e0b5ce8/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8b09dbeb7a8d73ee204a70f94fc06ea0f17dcf0844f16102b9f414f0b7463ba", size = 3121410 }, - { url = "https://files.pythonhosted.org/packages/81/42/07600892d48950c5e80505b81411044a2d969368cdc0d929b1c847bf6697/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:400832c0904f77ce87c40f1a8a27493071282f785724ae62144324f171377273", size = 3388821 }, - { url = "https://files.pythonhosted.org/packages/22/06/69d7ce374747edaf1695a4f61b83570d91cc8bbfc51ccfecf76f56ab4aac/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84ca973b3a96894d1707e189c14a774b701596d579ffc7e69debfc036a61a04", size = 3008868 }, - { url = "https://files.pythonhosted.org/packages/c8/69/54a0aee4d576045b49a0eb8bffdc495634309c823bf886042e6f46b80058/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:eb7202d231b273c34ec67767378cd04c767e967fda12d4a9e36208a34e2f137e", size = 8975831 }, - { url = "https://files.pythonhosted.org/packages/f7/f3/b776061e4f3ebf2905ba1a25d90380aafd10c02d406437a8ba22d1724d76/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:089d56db6782a73a27fd8abf3ba21779f5b85d4a9f35e3b493c7bbcbbf0d539b", size = 8920746 }, - { url = "https://files.pythonhosted.org/packages/d8/ee/ce83d5ec8b6844ad4c3ecfe3333d58ecc1adc61f0878b323a15355bcab24/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:c87ca3dc48b9b1222d984b6b7490355a6fdb411a2d810f6f05977258400ddb74", size = 9161814 }, - { url = "https://files.pythonhosted.org/packages/18/07/3e88e65c0ed28fa93aa0c4d264988428eef3df2764c3126dc83e243cb36f/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4145505a973116f91bc3ac45988a92e618a6f83eb458f49ea0790df94ee243ff", size = 9357138 }, - { url = "https://files.pythonhosted.org/packages/15/b0/dc4572ca61555fc482ebc933f26cb407c6aceb3dc19c301c68184f8cad03/tokenizers-0.21.0-cp39-abi3-win32.whl", hash = "sha256:eb1702c2f27d25d9dd5b389cc1f2f51813e99f8ca30d9e25348db6585a97e24a", size = 2202266 }, - { url = "https://files.pythonhosted.org/packages/44/69/d21eb253fa91622da25585d362a874fa4710be600f0ea9446d8d0217cec1/tokenizers-0.21.0-cp39-abi3-win_amd64.whl", hash = "sha256:87841da5a25a3a5f70c102de371db120f41873b854ba65e52bccd57df5a3780c", size = 2389192 }, -] - [[package]] name = "toml" version = "0.10.2" @@ -4136,11 +4084,11 @@ wheels = [ [[package]] name = "trove-classifiers" -version = "2025.1.15.22" +version = "2025.2.18.16" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/cb/8f6a91c74049180e395590901834d68bef5d6a2ce4c9ca9792cfadc1b9b4/trove_classifiers-2025.1.15.22.tar.gz", hash = "sha256:90af74358d3a01b3532bc7b3c88d8c6a094c2fd50a563d13d9576179326d7ed9", size = 16236 } +sdist = { url = "https://files.pythonhosted.org/packages/13/8e/15ba2980e2704edecc53d15506a5bfa6efb3b1cadc5e4df7dc277bc199f8/trove_classifiers-2025.2.18.16.tar.gz", hash = "sha256:b1ee2e1668589217d4edf506743e28b1834da128f8a122bad522c02d837006e1", size = 16271 } wheels = [ - { url = "https://files.pythonhosted.org/packages/2b/c5/6422dbc59954389b20b2aba85b737ab4a552e357e7ea14b52f40312e7c84/trove_classifiers-2025.1.15.22-py3-none-any.whl", hash = "sha256:5f19c789d4f17f501d36c94dbbf969fb3e8c2784d008e6f5164dd2c3d6a2b07c", size = 13610 }, + { url = "https://files.pythonhosted.org/packages/e1/67/038a8c7f60ffd6037374649826dbaa221e4b17755016b71a581162a15ce1/trove_classifiers-2025.2.18.16-py3-none-any.whl", hash = "sha256:7f6dfae899f23f04b73bc09e0754d9219a6fc4d6cca6acd62f1850a87ea92262", size = 13616 }, ] [[package]] @@ -4278,27 +4226,27 @@ wheels = [ [[package]] name = "uv" -version = "0.6.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7d/63/321ab82ff5f5c665ca9aeef20150d12775f32386a240eda045cf8f51c8bc/uv-0.6.0.tar.gz", hash = "sha256:7dc1f1050b89537ee8ac66ef532a05fe822293d788b6754db729e1b4b3be062a", size = 2879792 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4e/44/b241637dae9d8d37dda7698ab6c2ef657d20162e99ee78ecd73f7366665d/uv-0.6.0-py3-none-linux_armv6l.whl", hash = "sha256:b56235a98d81b156052f4e8d436ebeba60687a91113aeeebab310a8d5998cd68", size = 15461279 }, - { url = "https://files.pythonhosted.org/packages/f2/9e/2695331ac5187a74940870ba9400d207a0d593dd5a0417de6fffa5246b3e/uv-0.6.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5dca405337e5b62b3172335fc8a5e627c9f07cedc87c8ff595199b847b0b877f", size = 15621660 }, - { url = "https://files.pythonhosted.org/packages/6d/c3/c1e81bfdd3414492650ca2647dbb466bb9e0063ee71020dc49f5f011f9ac/uv-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:26d655adf59ec088f07a2459de3f5e0565e8f84f389bfe936a354e5e169dfc8f", size = 14511660 }, - { url = "https://files.pythonhosted.org/packages/2c/7c/59939f2c704eee7a1b5f58814970e46527668e51ad715f1466a6793edee4/uv-0.6.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:7dbdaf1c99df5b78fd39ced7ede967aa596ecf4f9d0bee8deb182d4e160dd488", size = 14938907 }, - { url = "https://files.pythonhosted.org/packages/6e/9d/fb0278d0655da86b1f5301e3f4a85d0cac6875eedf8c06960493791e22c0/uv-0.6.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f620509b75285b216f855ddd16126f4d8868ff8cd44a518e6f9ec917d8ac3ceb", size = 15193019 }, - { url = "https://files.pythonhosted.org/packages/dd/0f/c271840728449bc00a155c141fdb22cc4bcd1c6b23fbc353ba5ad316286a/uv-0.6.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51a09e1d06ac677f4cc478fdd73d6d243a7e71d542174e75748c9843406f483", size = 15947047 }, - { url = "https://files.pythonhosted.org/packages/20/0d/4fdfe36e6409af7d61b502c325dbb9fb0d2487c29fccf9f55a3652759607/uv-0.6.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b3829d82e0dd37de375b5d9ee8485731465e255e61557f4df798e3324cf306cd", size = 16886229 }, - { url = "https://files.pythonhosted.org/packages/fd/41/a7d3ae81e5f200f7dd339dabe9c81bf6ff0182a67d03d7fc712bc6ad9542/uv-0.6.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec09034b7d5ba255fc80e5bec2a3f2ff3a40bdaea3c10a056e2fb3aeceb94d94", size = 16593590 }, - { url = "https://files.pythonhosted.org/packages/45/cb/acda84c40d4679bfe3a539b9f17ef648969688b407c1391c6e168f3a25a9/uv-0.6.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c986ce9c02beccc08ab8ebdaf05a5540af4fd361f15049cdabdd2fee3d26afb9", size = 20926831 }, - { url = "https://files.pythonhosted.org/packages/17/6e/8af7aa224d20ea9d40c751df318887e533de4feb970d76134a151d1f4137/uv-0.6.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd09c6b7ff1742aca07ad4805ef491eb91b03e4b0815a230076381900061f4a7", size = 16256572 }, - { url = "https://files.pythonhosted.org/packages/f4/43/7b9def0e832be06e6da2c37f0e845eb554e2591c019a82644f6add9087e7/uv-0.6.0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:18c481de419bf7d77243fb9f79dba2f06510008b4b451e2db23c6a8e3c269970", size = 15210365 }, - { url = "https://files.pythonhosted.org/packages/e4/ca/e439a34379b09c8486cea97ef6e9c9ac84c75d415141bbae785fa31fd9c5/uv-0.6.0-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:2e6ae77ab255f5732e8dd7bfe51128955cc863c8b478c842fbebce31111f403e", size = 15201279 }, - { url = "https://files.pythonhosted.org/packages/7c/1b/8487406c3a4ff1d326764e7f7e7b529dadfe789e47ba9327f77fb2df32b3/uv-0.6.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:d8d9b7747b8125fafd12ce72a06a5df22c12f19eb34dc809705304cbf9f4ba45", size = 15571125 }, - { url = "https://files.pythonhosted.org/packages/92/3c/ac38d15db45a01e831214d50521eb2b2d78d465a6575a0bb4d9a89edae80/uv-0.6.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:967e5e76910f22f0fc03dc60913e16b37f5c90a5f173830c6566f13a34bca22e", size = 16370602 }, - { url = "https://files.pythonhosted.org/packages/43/68/229b623e96e6d83a7a8078ecd4c90b1ccff071a8a3b0153bfa40213298b8/uv-0.6.0-py3-none-win32.whl", hash = "sha256:10eb10c88a9837d68dd3d7deef66b31c22b3d6984175ade0702c60f04b804d68", size = 15566123 }, - { url = "https://files.pythonhosted.org/packages/5c/93/f7699bd6774a969625717f7e687b37f21ca63b6015c361d2eba073160f19/uv-0.6.0-py3-none-win_amd64.whl", hash = "sha256:e9f7041131e6f1abd4e91f94143e31f5022d71e6f388976c62e5016eaa676f5d", size = 16918420 }, - { url = "https://files.pythonhosted.org/packages/13/ea/f40a874d32e4e74ec62799ed83df840187c7062855ed16e51cb2dabe807a/uv-0.6.0-py3-none-win_arm64.whl", hash = "sha256:32af3ab683aa3e630fbb4f069637445488770a6722f8d97aaae7f0032d32b68f", size = 15801705 }, +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/05/118e10d91981b85f47b27d089782a6598a9584ff607bffb8e2f6be1f1245/uv-0.6.2.tar.gz", hash = "sha256:d696a4f3d4a3ac1b305255e8814ae3a147ea3428a977bb3b4335a339941799bc", size = 3066291 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/cf/9c3c9a427c7ecc37be238c4433188614b3d342191c0299c632f512d493ff/uv-0.6.2-py3-none-linux_armv6l.whl", hash = "sha256:d501ae16fb33969b12a64ac7b9c49d672b8c3964026c5dcaee3b1dcd50a6a22c", size = 15513992 }, + { url = "https://files.pythonhosted.org/packages/86/01/1e1f88826d92d11f2232f96eef190574a4edb470546a141bba652cd37240/uv-0.6.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2c13ca920d87dc00721a86ac3d19667cff5435b369d21e3d6df76b373d8fa8df", size = 15659547 }, + { url = "https://files.pythonhosted.org/packages/ee/40/59e9c03431d4c82420e081f92719e5784db8f1c92a25b2abdfe6ac645b7e/uv-0.6.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f24e119d338bae32b5a604585b7b518036fba556e2c2d9dbd2d7cf1411213b57", size = 14589044 }, + { url = "https://files.pythonhosted.org/packages/11/8b/5d9f9f4e3969d6a2c9ce9a0b4a85ecb8ca89bf5c00e9ec097cf472abb2a2/uv-0.6.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:1db90b728a173926e2018b89df776a373b1e50520466f61e0dbf05f9a64a6db5", size = 15034328 }, + { url = "https://files.pythonhosted.org/packages/f3/ba/f31fd6af8f70b21d9e0b7cca0241a8f10e03d24862f49f93fbc5ff1e4fce/uv-0.6.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d23fb9cd41aecb31845e884d0bfde243e04e763abeab3532138321b4ebe7437c", size = 15275180 }, + { url = "https://files.pythonhosted.org/packages/aa/3b/358cfea4265a0966fafa7934ed0f9f1fb031d7ebbe8a15e02a308afff6ad/uv-0.6.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df0a1d95fd1539c05de434259fafcee0b6852900d4178e94b3b6b6b06438b60c", size = 15969503 }, + { url = "https://files.pythonhosted.org/packages/57/f5/840d8fb46c1cf723e1b7168832de52e58d86764aa625c2100b35a27261af/uv-0.6.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f2f0dc9a0564b31d4efdee317c176a23bbe7e61aec6d281a331ba6ae32f828ff", size = 16950563 }, + { url = "https://files.pythonhosted.org/packages/f6/37/75c5ff09db56c34f0f5d3d55dd4188e52d09219ef76bfe176dae58ed5f4a/uv-0.6.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:326aff8c4fb8153e2384e79904c27b1c9d4c3a5879b53a6fbc2da3283fda321d", size = 16631562 }, + { url = "https://files.pythonhosted.org/packages/9d/5f/91bfae5ecf9f6c5f4754aa794159acc77245a53233a966865ae4974e5cdf/uv-0.6.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8763f310a473f46c0226f5e08a876bd34de121ac370cc7294a5397a13a18d8a", size = 20994598 }, + { url = "https://files.pythonhosted.org/packages/8d/39/17f77b4b5f1a1e579d9ce94859aada9418c9ebcaa227b54b10648218bafa/uv-0.6.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2e421947ef889e6c8913992c560d611826464eabc78f8f702a5eff824aabc7", size = 16367280 }, + { url = "https://files.pythonhosted.org/packages/a7/6b/fbd9794e1344b299e02993322f44b500f4d66ecdb83860e2fcf35d8cac2c/uv-0.6.2-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:7dd26dabd918e5648ecf94fb7c0787db954237e34ea3bdd944b98d007b44c3a5", size = 15317824 }, + { url = "https://files.pythonhosted.org/packages/51/a0/9249a55365c2f9781243a7f35c3a01864b19aa9a62b1fc50b7231793346e/uv-0.6.2-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:f3719da2e59403783eab634a6238b90051fc65379e02c10b9ca1b32b26d35f77", size = 15228644 }, + { url = "https://files.pythonhosted.org/packages/27/76/790b3d9c0b9ecd9ab6c1b7e904c36d470685c70d0b21a134b026452e0fcc/uv-0.6.2-py3-none-musllinux_1_1_i686.whl", hash = "sha256:b435687e5c26a64858ea842fbb4b35ced8e8741a99d1b75d0c0143462e956db9", size = 15608612 }, + { url = "https://files.pythonhosted.org/packages/05/b6/79961374b2318461b4dfc0e565d63281bf788fea93fc81b2d1738847aec2/uv-0.6.2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:0f1e8e15c92607862e72e0467a31947af7b9aef93924072e9b4d5dcb5633d374", size = 16480962 }, + { url = "https://files.pythonhosted.org/packages/68/20/df7788bde9d114c501cd8ebb60235be07ff0fb0dc26fa1e7e99ada251d73/uv-0.6.2-py3-none-win32.whl", hash = "sha256:52b7452f4c523b9875de53ba73df87acd1cdea36640281d0d80c8074eda42f16", size = 15717804 }, + { url = "https://files.pythonhosted.org/packages/e1/0a/fc966f859b6252050c71e1afcdce116c8ef3513f8b423bb3ca05fb13485d/uv-0.6.2-py3-none-win_amd64.whl", hash = "sha256:5337cdb6ecc604d0cf36fe6799dd0479111b606009e6c29685d213c74eb40373", size = 17017798 }, + { url = "https://files.pythonhosted.org/packages/03/82/4318c4874c8dd59a0386e2bf0f4d09fc5bb4900349238828153235d387eb/uv-0.6.2-py3-none-win_arm64.whl", hash = "sha256:27ecb8f6ef796220062f31a12e2dc5dc7a14704aa1df0da2dfa3530346c7e3cc", size = 15923484 }, ] [[package]]