Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
.env
.venv
env/
venv/
ENV/
.mypy_cache/
.pytest_cache/
.coverage
htmlcov/
33 changes: 19 additions & 14 deletions LLM/test_interpreter.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,34 @@
import unittest
from unittest.mock import Mock, patch, MagicMock
import json
from interpreter import CommandInterpreter, APIProvider
import sys
import os

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

from LLM.interpreter import CommandInterpreter, APIProvider


class TestCommandInterpreter(unittest.TestCase):

def setUp(self):
self.api_key = "test-api-key"

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_initialization_openai(self, mock_openai):
interpreter = CommandInterpreter(api_key=self.api_key, provider="openai")
self.assertEqual(interpreter.provider, APIProvider.OPENAI)
self.assertEqual(interpreter.model, "gpt-4")
mock_openai.assert_called_once_with(api_key=self.api_key)

@patch('interpreter.Anthropic')
@patch('anthropic.Anthropic')
def test_initialization_claude(self, mock_anthropic):
interpreter = CommandInterpreter(api_key=self.api_key, provider="claude")
self.assertEqual(interpreter.provider, APIProvider.CLAUDE)
self.assertEqual(interpreter.model, "claude-3-5-sonnet-20241022")
mock_anthropic.assert_called_once_with(api_key=self.api_key)

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_initialization_custom_model(self, mock_openai):
interpreter = CommandInterpreter(
api_key=self.api_key,
Expand Down Expand Up @@ -73,14 +78,14 @@ def test_validate_commands_dd_pattern(self):
result = interpreter._validate_commands(commands)
self.assertEqual(result, ["apt update"])

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_parse_empty_input(self, mock_openai):
interpreter = CommandInterpreter(api_key=self.api_key, provider="openai")

with self.assertRaises(ValueError):
interpreter.parse("")

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_call_openai_success(self, mock_openai):
mock_client = Mock()
mock_response = Mock()
Expand All @@ -94,7 +99,7 @@ def test_call_openai_success(self, mock_openai):
result = interpreter._call_openai("install docker")
self.assertEqual(result, ["apt update"])

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_call_openai_failure(self, mock_openai):
mock_client = Mock()
mock_client.chat.completions.create.side_effect = Exception("API Error")
Expand All @@ -105,7 +110,7 @@ def test_call_openai_failure(self, mock_openai):
with self.assertRaises(RuntimeError):
interpreter._call_openai("install docker")

@patch('interpreter.Anthropic')
@patch('anthropic.Anthropic')
def test_call_claude_success(self, mock_anthropic):
mock_client = Mock()
mock_response = Mock()
Expand All @@ -119,7 +124,7 @@ def test_call_claude_success(self, mock_anthropic):
result = interpreter._call_claude("install docker")
self.assertEqual(result, ["apt update"])

@patch('interpreter.Anthropic')
@patch('anthropic.Anthropic')
def test_call_claude_failure(self, mock_anthropic):
mock_client = Mock()
mock_client.messages.create.side_effect = Exception("API Error")
Expand All @@ -130,7 +135,7 @@ def test_call_claude_failure(self, mock_anthropic):
with self.assertRaises(RuntimeError):
interpreter._call_claude("install docker")

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_parse_with_validation(self, mock_openai):
mock_client = Mock()
mock_response = Mock()
Expand All @@ -144,7 +149,7 @@ def test_parse_with_validation(self, mock_openai):
result = interpreter.parse("test command", validate=True)
self.assertEqual(result, ["apt update"])

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_parse_without_validation(self, mock_openai):
mock_client = Mock()
mock_response = Mock()
Expand All @@ -158,7 +163,7 @@ def test_parse_without_validation(self, mock_openai):
result = interpreter.parse("test command", validate=False)
self.assertEqual(result, ["apt update", "rm -rf /"])

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_parse_with_context(self, mock_openai):
mock_client = Mock()
mock_response = Mock()
Expand Down Expand Up @@ -197,7 +202,7 @@ def test_parse_commands_empty_commands(self):
result = interpreter._parse_commands(response)
self.assertEqual(result, ["apt update", "apt install docker"])

@patch('interpreter.OpenAI')
@patch('openai.OpenAI')
def test_parse_docker_installation(self, mock_openai):
mock_client = Mock()
mock_response = Mock()
Expand All @@ -217,7 +222,7 @@ def test_parse_docker_installation(self, mock_openai):

result = interpreter.parse("install docker")
self.assertGreater(len(result), 0)
self.assertIn("docker", result[0].lower() or result[1].lower())
self.assertTrue(any("docker" in cmd.lower() for cmd in result))


if __name__ == "__main__":
Expand Down
5 changes: 5 additions & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
include README.md
include LICENSE
recursive-include LLM *.py
recursive-include cortex *.py
include LLM/requirements.txt
2 changes: 2 additions & 0 deletions cortex/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .cli import main
__version__ = "0.1.0"
203 changes: 203 additions & 0 deletions cortex/cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
"""Command-line interface entry point for the Cortex automation toolkit."""

import argparse
import os
import subprocess
import sys
import time
from typing import Optional

from LLM.interpreter import CommandInterpreter
from cortex.coordinator import InstallationCoordinator, StepStatus


class CortexCLI:
"""Command-line interface for Cortex AI-powered software installation."""

def __init__(self) -> None:
"""Initialise spinner state used for interactive progress updates."""
self.spinner_chars = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
self.spinner_idx = 0

def _get_api_key(self) -> Optional[str]:
"""Return the configured API key or emit an error if missing."""
api_key = os.environ.get('OPENAI_API_KEY') or os.environ.get('ANTHROPIC_API_KEY')
if not api_key:
self._print_error("API key not found. Set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable.")
return None
return api_key

def _get_provider(self) -> str:
"""Detect which LLM provider to use based on available credentials."""
if os.environ.get('OPENAI_API_KEY'):
return 'openai'
if os.environ.get('ANTHROPIC_API_KEY'):
return 'claude'
return 'openai'

def _print_status(self, label: str, message: str) -> None:
"""Emit informational output with a consistent status label."""
print(f"{label} {message}")

def _print_error(self, message: str) -> None:
"""Emit an error message to ``stderr`` with standard formatting."""
print(f"[ERROR] {message}", file=sys.stderr)

def _print_success(self, message: str) -> None:
"""Emit a success message to ``stdout`` with the success label."""
print(f"[SUCCESS] {message}")

def _animate_spinner(self, message: str) -> None:
"""Render a single spinner frame with the supplied ``message``."""
sys.stdout.write(f"\r{self.spinner_chars[self.spinner_idx]} {message}")
sys.stdout.flush()
self.spinner_idx = (self.spinner_idx + 1) % len(self.spinner_chars)
time.sleep(0.1)

def _clear_line(self) -> None:
"""Clear the active terminal line to hide spinner artifacts."""
sys.stdout.write('\r\033[K')
sys.stdout.flush()

def install(self, software: str, execute: bool = False, dry_run: bool = False) -> int:
"""Interpret a natural-language request and optionally execute the plan."""

api_key = self._get_api_key()
if not api_key:
return 1

provider = self._get_provider()

try:
self._print_status("[INFO]", "Understanding request...")

interpreter = CommandInterpreter(api_key=api_key, provider=provider)

self._print_status("[PLAN]", "Planning installation...")

for _ in range(10):
self._animate_spinner("Analyzing system requirements...")
self._clear_line()

commands = interpreter.parse(f"install {software}")

if not commands:
self._print_error("No commands generated. Please try again with a different request.")
return 1

self._print_status("[EXEC]", f"Installing {software}...")
print("\nGenerated commands:")
for index, command in enumerate(commands, 1):
print(f" {index}. {command}")

if dry_run:
print("\n(Dry run mode - commands not executed)")
return 0

if execute:
def progress_callback(current: int, total: int, step) -> None:
status_label = "[PENDING]"
if step.status == StepStatus.SUCCESS:
status_label = "[OK]"
elif step.status == StepStatus.FAILED:
status_label = "[FAIL]"
print(f"\n[{current}/{total}] {status_label} {step.description}")
print(f" Command: {step.command}")

print("\nExecuting commands...")

coordinator = InstallationCoordinator(
commands=commands,
descriptions=[f"Step {i + 1}" for i in range(len(commands))],
timeout=300,
stop_on_error=True,
progress_callback=progress_callback,
)

result = coordinator.execute()

if result.success:
self._print_success(f"{software} installed successfully!")
print(f"\nCompleted in {result.total_duration:.2f} seconds")
return 0

if result.failed_step is not None:
self._print_error(f"Installation failed at step {result.failed_step + 1}")
else:
self._print_error("Installation failed")
if result.error_message:
print(f" Error: {result.error_message}", file=sys.stderr)
return 1

print("\nTo execute these commands, run with --execute flag")
print("Example: cortex install docker --execute")
return 0

except ValueError as exc:
self._print_error(str(exc))
return 1
except RuntimeError as exc:
self._print_error(f"API call failed: {str(exc)}")
return 1
except Exception as exc:
self._print_error(f"Unexpected error: {str(exc)}")
return 1


def main() -> int:
"""Entry point for the cortex CLI command."""

parser = argparse.ArgumentParser(
prog='cortex',
description='AI-powered Linux command interpreter',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
cortex install docker
cortex install docker --execute
cortex install "python 3.11 with pip"
cortex install nginx --dry-run
cortex --test

Environment Variables:
OPENAI_API_KEY OpenAI API key for GPT-4
ANTHROPIC_API_KEY Anthropic API key for Claude
"""
)

parser.add_argument('--test', action='store_true', help='Run all test suites')

subparsers = parser.add_subparsers(dest='command', help='Available commands')

install_parser = subparsers.add_parser('install', help='Install software using natural language')
install_parser.add_argument('software', type=str, help='Software to install (natural language)')
install_parser.add_argument('--execute', action='store_true', help='Execute the generated commands')
install_parser.add_argument('--dry-run', action='store_true', help='Show commands without executing')

args = parser.parse_args()

if args.test:
test_dir = os.path.join(os.path.dirname(__file__), '..', 'test')
test_runner = os.path.join(test_dir, 'run_all_tests.py')

if not os.path.exists(test_runner):
print("[ERROR] Test runner not found", file=sys.stderr)
return 1

result = subprocess.run([sys.executable, test_runner])
return result.returncode

if not args.command:
parser.print_help()
return 1

cli = CortexCLI()

if args.command == 'install':
return cli.install(args.software, execute=args.execute, dry_run=args.dry_run)

return 0


if __name__ == '__main__':
sys.exit(main())
Loading