Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 0 additions & 43 deletions .github/workflows/codeql.yml

This file was deleted.

78 changes: 76 additions & 2 deletions LLM/interpreter.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import os
import json
from typing import List, Optional, Dict, Any
import sqlite3
from typing import List, Optional, Dict, Any, TYPE_CHECKING
from enum import Enum

if TYPE_CHECKING:
from cortex.semantic_cache import SemanticCache


class APIProvider(Enum):
CLAUDE = "claude"
Expand All @@ -11,14 +15,43 @@


class CommandInterpreter:
"""Interprets natural language commands into executable shell commands using LLM APIs.

Supports multiple providers (OpenAI, Claude, Ollama) with optional semantic caching
and offline mode for cached responses.
"""

def __init__(
self,
api_key: str,
provider: str = "openai",
model: Optional[str] = None
model: Optional[str] = None,
offline: bool = False,
cache: Optional["SemanticCache"] = None,
):
"""Initialize the command interpreter.

Args:
api_key: API key for the LLM provider
provider: Provider name ("openai", "claude", or "ollama")
model: Optional model name override
offline: If True, only use cached responses
cache: Optional SemanticCache instance for response caching
"""
self.api_key = api_key
self.provider = APIProvider(provider.lower())
self.offline = offline

if cache is None:
try:
from cortex.semantic_cache import SemanticCache

self.cache: Optional["SemanticCache"] = SemanticCache()
except (ImportError, OSError) as e:

Check warning on line 50 in LLM/interpreter.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Remove the unused local variable "e".

See more on https://sonarcloud.io/project/issues?id=cortexlinux_cortex&issues=AZsec53CVKxHeFyvtIDD&open=AZsec53CVKxHeFyvtIDD&pullRequest=299
# Cache initialization can fail due to missing dependencies or permissions
self.cache = None
else:
self.cache = cache

if model:
self.model = model
Expand Down Expand Up @@ -173,8 +206,36 @@
return validated

def parse(self, user_input: str, validate: bool = True) -> List[str]:
"""Parse natural language input into shell commands.

Args:
user_input: Natural language description of desired action
validate: If True, validate commands for dangerous patterns

Returns:
List of shell commands to execute

Raises:
ValueError: If input is empty
RuntimeError: If offline mode is enabled and no cached response exists
"""
if not user_input or not user_input.strip():
raise ValueError("User input cannot be empty")

cache_system_prompt = self._get_system_prompt() + f"\n\n[cortex-cache-validate={bool(validate)}]"

if self.cache is not None:
cached = self.cache.get_commands(
prompt=user_input,
provider=self.provider.value,
model=self.model,
system_prompt=cache_system_prompt,
)
if cached is not None:
return cached

if self.offline:
raise RuntimeError("Offline mode: no cached response available for this request")

if self.provider == APIProvider.OPENAI:
commands = self._call_openai(user_input)
Expand All @@ -187,6 +248,19 @@

if validate:
commands = self._validate_commands(commands)

if self.cache is not None and commands:
try:
self.cache.put_commands(
prompt=user_input,
provider=self.provider.value,
model=self.model,
system_prompt=cache_system_prompt,
commands=commands,
)
except (OSError, sqlite3.Error):
# Silently fail cache writes - not critical for operation
pass

return commands

Expand Down
34 changes: 33 additions & 1 deletion cortex/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
self.spinner_idx = 0
self.prefs_manager = None # Lazy initialization
self.verbose = verbose
self.offline = False

def _debug(self, message: str):
"""Print debug info only in verbose mode"""
Expand Down Expand Up @@ -199,7 +200,7 @@
try:
self._print_status("🧠", "Understanding request...")

interpreter = CommandInterpreter(api_key=api_key, provider=provider)
interpreter = CommandInterpreter(api_key=api_key, provider=provider, offline=self.offline)

self._print_status("📦", "Planning installation...")

Expand Down Expand Up @@ -311,6 +312,24 @@
self._print_error(f"Unexpected error: {str(e)}")
return 1

def cache_stats(self) -> int:
try:
from cortex.semantic_cache import SemanticCache

cache = SemanticCache()
stats = cache.stats()
hit_rate = f"{stats.hit_rate * 100:.1f}%" if stats.total else "0.0%"

cx_header("Cache Stats")
cx_print(f"Hits: {stats.hits}", "info")
cx_print(f"Misses: {stats.misses}", "info")
cx_print(f"Hit rate: {hit_rate}", "info")
cx_print(f"Saved calls (approx): {stats.hits}", "info")
return 0
except Exception as e:
self._print_error(f"Unable to read cache stats: {e}")
return 1

def history(self, limit: int = 20, status: Optional[str] = None, show_id: Optional[str] = None):
"""Show installation history"""
history = InstallationHistory()
Expand Down Expand Up @@ -544,13 +563,14 @@
table.add_row("history", "View history")
table.add_row("rollback <id>", "Undo installation")
table.add_row("notify", "Manage desktop notifications") # Added this line
table.add_row("cache stats", "Show LLM cache statistics")

console.print(table)
console.print()
console.print("[dim]Learn more: https://cortexlinux.com/docs[/dim]")


def main():

Check failure on line 573 in cortex/cli.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Refactor this function to reduce its Cognitive Complexity from 16 to the 15 allowed.

See more on https://sonarcloud.io/project/issues?id=cortexlinux_cortex&issues=AZsec50bVKxHeFyvtIDB&open=AZsec50bVKxHeFyvtIDB&pullRequest=299
parser = argparse.ArgumentParser(
prog='cortex',
description='AI-powered Linux command interpreter',
Expand All @@ -560,6 +580,7 @@
# Global flags
parser.add_argument('--version', '-V', action='version', version=f'cortex {VERSION}')
parser.add_argument('--verbose', '-v', action='store_true', help='Show detailed output')
parser.add_argument('--offline', action='store_true', help='Use cached responses only (no network calls)')

subparsers = parser.add_subparsers(dest='command', help='Available commands')

Expand Down Expand Up @@ -617,13 +638,19 @@
send_parser.add_argument('--actions', nargs='*', help='Action buttons')
# --------------------------

# Cache commands
cache_parser = subparsers.add_parser('cache', help='Cache operations')
cache_subs = cache_parser.add_subparsers(dest='cache_action', help='Cache actions')
cache_subs.add_parser('stats', help='Show cache statistics')

args = parser.parse_args()

if not args.command:
show_rich_help()
return 0

cli = CortexCLI(verbose=args.verbose)
cli.offline = bool(getattr(args, 'offline', False))

try:
if args.command == 'demo':
Expand All @@ -645,6 +672,11 @@
# Handle the new notify command
elif args.command == 'notify':
return cli.notify(args)
elif args.command == 'cache':
if getattr(args, 'cache_action', None) == 'stats':
return cli.cache_stats()
parser.print_help()
return 1
else:
parser.print_help()
return 1
Expand Down
Loading
Loading