Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 15 additions & 25 deletions common/graph.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,5 @@
from pathlib import Path
from typing import (
Any,
List,
Mapping,
Optional,
Set,
Tuple,
)
from typing import Any

import networkx as nx

Expand All @@ -19,9 +12,8 @@
logger = logging.getLogger(__name__)


def _get_module_path(process: OdoobinProcess, module_name: str) -> Optional[Path]:
"""
Find the path of a module within the Odoo addons paths.
def _get_module_path(process: OdoobinProcess, module_name: str) -> Path | None:
"""Find the path of a module within the Odoo addons paths.

:param process: The OdoobinProcess instance which knows about addon paths.
:param module_name: The name of the module to find.
Expand All @@ -37,9 +29,8 @@ def _get_module_path(process: OdoobinProcess, module_name: str) -> Optional[Path
return None


def build_dependency_tree(process: OdoobinProcess, modules: List[str], max_level: int = 1) -> nx.DiGraph:
"""
Build a dependency tree for a list of Odoo modules.
def build_dependency_tree(process: OdoobinProcess, modules: list[str], max_level: int = 1) -> nx.DiGraph:
"""Build a dependency tree for a list of Odoo modules.

This method parses modules from the standard Odoo repositories (odoo,
enterprise, design-themes), reads their manifests to find dependencies,
Expand All @@ -51,8 +42,8 @@ def build_dependency_tree(process: OdoobinProcess, modules: List[str], max_level
:return: A networkx.DiGraph representing the dependency tree.
"""
graph: nx.DiGraph = nx.DiGraph()
to_process: List[Tuple[str, int]] = [(m, 0) for m in modules]
processed: Set[str] = set()
to_process: list[tuple[str, int]] = [(m, 0) for m in modules]
processed: set[str] = set()

while to_process:
module_name, level = to_process.pop(0)
Expand All @@ -65,14 +56,14 @@ def build_dependency_tree(process: OdoobinProcess, modules: List[str], max_level
if max_level is not None and level >= max_level:
continue

module_path: Optional[Path] = _get_module_path(process, module_name)
module_path: Path | None = _get_module_path(process, module_name)
if not module_path:
logger.warning(f"Module '{module_name}' not found in standard Odoo repositories.")
continue

manifest: Optional[Mapping[str, Any]] = process.read_manifest(module_path / "__manifest__.py")
manifest: dict[str, Any] | None = process.read_manifest(module_path / "__manifest__.py")
if manifest and "depends" in manifest:
dependencies: List[str] = manifest.get("depends", [])
dependencies: list[str] = manifest.get("depends", [])
for dependency in dependencies:
graph.add_edge(dependency, module_name)
if dependency not in processed:
Expand All @@ -81,24 +72,23 @@ def build_dependency_tree(process: OdoobinProcess, modules: List[str], max_level
return graph


def print_dependency_tree(graph: nx.DiGraph, modules: List[str]) -> None:
"""
Prints the dependency tree and installation order for a given graph.
def print_dependency_tree(graph: nx.DiGraph, modules: list[str]) -> None:
"""Print the dependency tree and installation order for a given graph.

:param graph: The dependency graph, as returned by `build_dependency_tree`.
:param modules: The list of initial modules to highlight in the output.
"""
console.print(string.stylize(f"\nDependency Tree for: {', '.join(modules)}\n", "bold underline"))

sorted_modules: List[str] = sorted(graph.nodes())
sorted_modules: list[str] = sorted(graph.nodes())

for module in sorted_modules:
dependencies: List[str] = sorted(graph.predecessors(module))
dependencies: list[str] = sorted(graph.predecessors(module))
if dependencies:
console.print(f" {string.stylize(module, 'bold cyan')} -> {', '.join(dependencies)}")

try:
installation_order: List[str] = list(nx.topological_sort(graph))
installation_order: list[str] = list(nx.topological_sort(graph))
console.print(f"\n{string.stylize('Installation Order (Topological Sort):', 'bold underline')}\n")
for module in installation_order:
console.print(f" - {module}")
Expand Down
45 changes: 20 additions & 25 deletions common/llm.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import Dict, List, Optional

import litellm
from litellm import InternalServerError, ModelResponse, token_counter

Expand All @@ -13,8 +11,8 @@

# A mapping of provider names to a list of model names to try in order of preference.
# Note: These may be custom model names/aliases specific to a litellm proxy setup.
LLM_LIST: Dict[str, List[str]] = {
"Gemini": ["gemini/gemini-2.5-pro", "gemini/gemini-2.5-flash"],
LLM_LIST: dict[str, list[str]] = {
"Gemini": ["gemini/gemini-3-pro-preview", "gemini/gemini-2.5-pro", "gemini/gemini-2.5-flash"],
"ChatGPT": ["chatgpt/gpt-5", "chatgpt/gpt-4.5"],
"Claude": ["claude/claude-4", "claude/claude-3.7"],
"Grok": ["grok/grok-4", "grok/grok-3"],
Expand All @@ -26,11 +24,10 @@ class LLM:

provider: str
api_key: str
model: Optional[str] = None
model: str | None = None

def __init__(self, model_identifier: Optional[str] = None, api_key: Optional[str] = None):
"""
Initializes the LLM client.
def __init__(self, model_identifier: str | None = None, api_key: str | None = None):
"""Initialize the LLM client.

:param model_identifier: The name of the LLM provider (e.g., "Gemini", "ChatGPT") or a specific model
identifier (e.g., "gemini/gemini-1.5-pro"). If a provider is given, it must
Expand All @@ -51,9 +48,8 @@ def __init__(self, model_identifier: Optional[str] = None, api_key: Optional[str

self.api_key = api_key

def completion(self, messages: List[Dict[str, str]], response_format: Optional[type] = None) -> Optional[str]:
"""
Sends a completion request to the configured LLM and expects a structured response.
def completion(self, messages: list[dict[str, str]], response_format: type | None = None) -> str | None:
"""Send a completion request to the configured LLM and expect a structured response.

If a specific model was provided during initialization, it will use that model.
Otherwise, it iterates through a list of models for the configured provider,
Expand All @@ -62,7 +58,7 @@ def completion(self, messages: List[Dict[str, str]], response_format: Optional[t
:param messages: A list of messages forming the conversation history for the prompt.
:return: The string content of the response, or `None` if all attempts fail.
"""
model_list: List[str] = [self.model] if self.model else LLM_LIST.get(self.provider, [])
model_list: list[str] = [self.model] if self.model else LLM_LIST.get(self.provider, [])
if not model_list:
logger.error(f"No models are configured for the provider '{self.provider}'.")
return None
Expand All @@ -73,13 +69,21 @@ def completion(self, messages: List[Dict[str, str]], response_format: Optional[t
logger.debug(f"Attempting completion with model: {model_name}")
litellm.suppress_debug_info = True

response: ModelResponse = litellm.completion( # type: ignore
response: ModelResponse = litellm.completion(
model=model_name,
messages=messages,
api_key=self.api_key,
response_format=response_format,
verbose=False,
)
except InternalServerError as e:
logger.warning(f"Model '{model_name}' failed with an internal server error: {e}")
# Continue to the next model in the list
except Exception as e: # noqa: BLE001
# Catch other potential exceptions from litellm (e.g., validation, connection errors)
logger.error(f"An unexpected error occurred with model '{model_name}': {e}")
# Continue to the next model in the list
else:
logger.info(f"Successfully received a response from {model_name}.")

if response.usage:
Expand All @@ -90,18 +94,9 @@ def completion(self, messages: List[Dict[str, str]], response_format: Optional[t
)

# Callers expect the string content of the message.
if response.choices and response.choices[0].message.content: # type: ignore
return response.choices[0].message.content # type: ignore
if response.choices and response.choices[0].message.content:
return response.choices[0].message.content
return None
except InternalServerError as e:
logger.warning(f"Model '{model_name}' failed with an internal server error: {e}")
# Continue to the next model in the list
except Exception as e:
# Catch other potential exceptions from litellm (e.g., validation, connection errors)
logger.error(f"An unexpected error occurred with model '{model_name}': {e}")
# Continue to the next model in the list

logger.error(
f"All configured models for provider '{self.provider}' failed. " f"Attempted: {', '.join(model_list)}"
)
logger.error(f"All configured models for provider '{self.provider}' failed. Attempted: {', '.join(model_list)}")
return None
Loading