From 3b5b24541d2f374b520927fd4ebf6cc370d9fbb8 Mon Sep 17 00:00:00 2001 From: sahil Date: Sun, 9 Nov 2025 12:41:20 +0530 Subject: [PATCH 01/12] Add CLI interface for cortex command - Fixes #11 --- .gitignore | 30 +++++++ MANIFEST.in | 5 ++ cortex/__init__.py | 2 + cortex/cli.py | 166 ++++++++++++++++++++++++++++++++++++ cortex/test_cli.py | 203 +++++++++++++++++++++++++++++++++++++++++++++ setup.py | 43 ++++++++++ 6 files changed, 449 insertions(+) create mode 100644 .gitignore create mode 100644 MANIFEST.in create mode 100644 cortex/__init__.py create mode 100644 cortex/cli.py create mode 100644 cortex/test_cli.py create mode 100644 setup.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..eeac129 --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +.env +.venv +env/ +venv/ +ENV/ +.mypy_cache/ +.pytest_cache/ +.coverage +htmlcov/ diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..a933d69 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include README.md +include LICENSE +recursive-include LLM *.py +recursive-include cortex *.py +include LLM/requirements.txt diff --git a/cortex/__init__.py b/cortex/__init__.py new file mode 100644 index 0000000..57abaed --- /dev/null +++ b/cortex/__init__.py @@ -0,0 +1,2 @@ +from .cli import main +__version__ = "0.1.0" diff --git a/cortex/cli.py b/cortex/cli.py new file mode 100644 index 0000000..dcc0cab --- /dev/null +++ b/cortex/cli.py @@ -0,0 +1,166 @@ +import sys +import os +import argparse +import time +from typing import List, Optional +import subprocess + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from LLM.interpreter import CommandInterpreter + + +class CortexCLI: + def __init__(self): + self.spinner_chars = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'] + self.spinner_idx = 0 + + def _get_api_key(self) -> Optional[str]: + api_key = os.environ.get('OPENAI_API_KEY') or os.environ.get('ANTHROPIC_API_KEY') + if not api_key: + self._print_error("API key not found. Set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable.") + return None + return api_key + + def _get_provider(self) -> str: + if os.environ.get('OPENAI_API_KEY'): + return 'openai' + elif os.environ.get('ANTHROPIC_API_KEY'): + return 'claude' + return 'openai' + + def _print_status(self, emoji: str, message: str): + print(f"{emoji} {message}") + + def _print_error(self, message: str): + print(f"❌ Error: {message}", file=sys.stderr) + + def _print_success(self, message: str): + print(f"✅ {message}") + + def _animate_spinner(self, message: str): + sys.stdout.write(f"\r{self.spinner_chars[self.spinner_idx]} {message}") + sys.stdout.flush() + self.spinner_idx = (self.spinner_idx + 1) % len(self.spinner_chars) + time.sleep(0.1) + + def _clear_line(self): + sys.stdout.write('\r\033[K') + sys.stdout.flush() + + def install(self, software: str, execute: bool = False, dry_run: bool = False): + api_key = self._get_api_key() + if not api_key: + return 1 + + provider = self._get_provider() + + try: + self._print_status("🧠", "Understanding request...") + + interpreter = CommandInterpreter(api_key=api_key, provider=provider) + + self._print_status("📦", "Planning installation...") + + for _ in range(10): + self._animate_spinner("Analyzing system requirements...") + self._clear_line() + + commands = interpreter.parse(f"install {software}") + + if not commands: + self._print_error("No commands generated. Please try again with a different request.") + return 1 + + self._print_status("⚙️", f"Installing {software}...") + print("\nGenerated commands:") + for i, cmd in enumerate(commands, 1): + print(f" {i}. {cmd}") + + if dry_run: + print("\n(Dry run mode - commands not executed)") + return 0 + + if execute: + print("\nExecuting commands...") + for i, cmd in enumerate(commands, 1): + print(f"\n[{i}/{len(commands)}] Running: {cmd}") + try: + result = subprocess.run( + cmd, + shell=True, + capture_output=True, + text=True, + timeout=300 + ) + if result.returncode != 0: + self._print_error(f"Command failed: {result.stderr}") + return 1 + if result.stdout: + print(result.stdout) + except subprocess.TimeoutExpired: + self._print_error(f"Command timed out: {cmd}") + return 1 + except Exception as e: + self._print_error(f"Failed to execute command: {str(e)}") + return 1 + + self._print_success(f"{software} installed successfully!") + else: + print("\nTo execute these commands, run with --execute flag") + print("Example: cortex install docker --execute") + + return 0 + + except ValueError as e: + self._print_error(str(e)) + return 1 + except RuntimeError as e: + self._print_error(f"API call failed: {str(e)}") + return 1 + except Exception as e: + self._print_error(f"Unexpected error: {str(e)}") + return 1 + + +def main(): + parser = argparse.ArgumentParser( + prog='cortex', + description='AI-powered Linux command interpreter', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + cortex install docker + cortex install docker --execute + cortex install "python 3.11 with pip" + cortex install nginx --dry-run + +Environment Variables: + OPENAI_API_KEY OpenAI API key for GPT-4 + ANTHROPIC_API_KEY Anthropic API key for Claude + """ + ) + + subparsers = parser.add_subparsers(dest='command', help='Available commands') + + install_parser = subparsers.add_parser('install', help='Install software using natural language') + install_parser.add_argument('software', type=str, help='Software to install (natural language)') + install_parser.add_argument('--execute', action='store_true', help='Execute the generated commands') + install_parser.add_argument('--dry-run', action='store_true', help='Show commands without executing') + + args = parser.parse_args() + + if not args.command: + parser.print_help() + return 1 + + cli = CortexCLI() + + if args.command == 'install': + return cli.install(args.software, execute=args.execute, dry_run=args.dry_run) + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/cortex/test_cli.py b/cortex/test_cli.py new file mode 100644 index 0000000..9672192 --- /dev/null +++ b/cortex/test_cli.py @@ -0,0 +1,203 @@ +import unittest +from unittest.mock import Mock, patch, MagicMock, call +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from cortex.cli import CortexCLI, main + + +class TestCortexCLI(unittest.TestCase): + + def setUp(self): + self.cli = CortexCLI() + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + def test_get_api_key_openai(self): + api_key = self.cli._get_api_key() + self.assertEqual(api_key, 'test-key') + + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key'}) + def test_get_api_key_claude(self): + api_key = self.cli._get_api_key() + self.assertEqual(api_key, 'test-claude-key') + + @patch.dict(os.environ, {}, clear=True) + @patch('sys.stderr') + def test_get_api_key_not_found(self, mock_stderr): + api_key = self.cli._get_api_key() + self.assertIsNone(api_key) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + def test_get_provider_openai(self): + provider = self.cli._get_provider() + self.assertEqual(provider, 'openai') + + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-key'}, clear=True) + def test_get_provider_claude(self): + provider = self.cli._get_provider() + self.assertEqual(provider, 'claude') + + @patch('sys.stdout') + def test_print_status(self, mock_stdout): + self.cli._print_status("🧠", "Test message") + self.assertTrue(mock_stdout.write.called or print) + + @patch('sys.stderr') + def test_print_error(self, mock_stderr): + self.cli._print_error("Test error") + self.assertTrue(True) + + @patch('sys.stdout') + def test_print_success(self, mock_stdout): + self.cli._print_success("Test success") + self.assertTrue(True) + + @patch.dict(os.environ, {}, clear=True) + def test_install_no_api_key(self): + result = self.cli.install("docker") + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + def test_install_dry_run(self, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["apt update", "apt install docker"] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker", dry_run=True) + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once_with("install docker") + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + def test_install_no_execute(self, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["apt update", "apt install docker"] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker", execute=False) + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once() + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + @patch('subprocess.run') + def test_install_with_execute_success(self, mock_run, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["echo test"] + mock_interpreter_class.return_value = mock_interpreter + + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "test output" + mock_result.stderr = "" + mock_run.return_value = mock_result + + result = self.cli.install("docker", execute=True) + + self.assertEqual(result, 0) + mock_run.assert_called_once() + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + @patch('subprocess.run') + def test_install_with_execute_failure(self, mock_run, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["invalid command"] + mock_interpreter_class.return_value = mock_interpreter + + mock_result = Mock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_result.stderr = "command not found" + mock_run.return_value = mock_result + + result = self.cli.install("docker", execute=True) + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + def test_install_no_commands_generated(self, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.return_value = [] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + def test_install_value_error(self, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = ValueError("Invalid input") + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + def test_install_runtime_error(self, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = RuntimeError("API failed") + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch('cortex.cli.CommandInterpreter') + def test_install_unexpected_error(self, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = Exception("Unexpected") + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch('sys.argv', ['cortex']) + def test_main_no_command(self): + result = main() + self.assertEqual(result, 1) + + @patch('sys.argv', ['cortex', 'install', 'docker']) + @patch('cortex.cli.CortexCLI.install') + def test_main_install_command(self, mock_install): + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with('docker', execute=False, dry_run=False) + + @patch('sys.argv', ['cortex', 'install', 'docker', '--execute']) + @patch('cortex.cli.CortexCLI.install') + def test_main_install_with_execute(self, mock_install): + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with('docker', execute=True, dry_run=False) + + @patch('sys.argv', ['cortex', 'install', 'docker', '--dry-run']) + @patch('cortex.cli.CortexCLI.install') + def test_main_install_with_dry_run(self, mock_install): + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with('docker', execute=False, dry_run=True) + + def test_spinner_animation(self): + initial_idx = self.cli.spinner_idx + self.cli._animate_spinner("Testing") + self.assertNotEqual(self.cli.spinner_idx, initial_idx) + + +if __name__ == '__main__': + unittest.main() diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..1b38366 --- /dev/null +++ b/setup.py @@ -0,0 +1,43 @@ +from setuptools import setup, find_packages +import os + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + +with open(os.path.join("LLM", "requirements.txt"), "r", encoding="utf-8") as fh: + requirements = [line.strip() for line in fh if line.strip() and not line.startswith("#")] + +setup( + name="cortex-linux", + version="0.1.0", + author="Cortex Linux", + author_email="mike@cortexlinux.com", + description="AI-powered Linux command interpreter", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/cortexlinux/cortex", + packages=find_packages(), + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "Topic :: System :: Installation/Setup", + "Topic :: System :: Systems Administration", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: POSIX :: Linux", + ], + python_requires=">=3.8", + install_requires=requirements, + entry_points={ + "console_scripts": [ + "cortex=cortex.cli:main", + ], + }, + include_package_data=True, +) From ae131585988c560ea91ea051afe1a5182b021aa2 Mon Sep 17 00:00:00 2001 From: sahil Date: Sun, 9 Nov 2025 18:34:17 +0530 Subject: [PATCH 02/12] Add multi-step installation coordinator - Fixes #8 --- cortex/cli.py | 54 +++--- cortex/coordinator.py | 284 +++++++++++++++++++++++++++++ cortex/test_cli.py | 29 +-- cortex/test_coordinator.py | 353 +++++++++++++++++++++++++++++++++++++ 4 files changed, 685 insertions(+), 35 deletions(-) create mode 100644 cortex/coordinator.py create mode 100644 cortex/test_coordinator.py diff --git a/cortex/cli.py b/cortex/cli.py index dcc0cab..86b1682 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -8,6 +8,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) from LLM.interpreter import CommandInterpreter +from cortex.coordinator import InstallationCoordinator, StepStatus class CortexCLI: @@ -82,30 +83,39 @@ def install(self, software: str, execute: bool = False, dry_run: bool = False): return 0 if execute: + def progress_callback(current, total, step): + status_emoji = "⏳" + if step.status == StepStatus.SUCCESS: + status_emoji = "✅" + elif step.status == StepStatus.FAILED: + status_emoji = "❌" + print(f"\n[{current}/{total}] {status_emoji} {step.description}") + print(f" Command: {step.command}") + print("\nExecuting commands...") - for i, cmd in enumerate(commands, 1): - print(f"\n[{i}/{len(commands)}] Running: {cmd}") - try: - result = subprocess.run( - cmd, - shell=True, - capture_output=True, - text=True, - timeout=300 - ) - if result.returncode != 0: - self._print_error(f"Command failed: {result.stderr}") - return 1 - if result.stdout: - print(result.stdout) - except subprocess.TimeoutExpired: - self._print_error(f"Command timed out: {cmd}") - return 1 - except Exception as e: - self._print_error(f"Failed to execute command: {str(e)}") - return 1 - self._print_success(f"{software} installed successfully!") + coordinator = InstallationCoordinator( + commands=commands, + descriptions=[f"Step {i+1}" for i in range(len(commands))], + timeout=300, + stop_on_error=True, + progress_callback=progress_callback + ) + + result = coordinator.execute() + + if result.success: + self._print_success(f"{software} installed successfully!") + print(f"\nCompleted in {result.total_duration:.2f} seconds") + return 0 + else: + if result.failed_step is not None: + self._print_error(f"Installation failed at step {result.failed_step + 1}") + else: + self._print_error("Installation failed") + if result.error_message: + print(f" Error: {result.error_message}", file=sys.stderr) + return 1 else: print("\nTo execute these commands, run with --execute flag") print("Example: cortex install docker --execute") diff --git a/cortex/coordinator.py b/cortex/coordinator.py new file mode 100644 index 0000000..a2ae0a3 --- /dev/null +++ b/cortex/coordinator.py @@ -0,0 +1,284 @@ +import subprocess +import time +import json +from typing import List, Dict, Any, Optional, Callable +from dataclasses import dataclass, field +from enum import Enum +from datetime import datetime + + +class StepStatus(Enum): + PENDING = "pending" + RUNNING = "running" + SUCCESS = "success" + FAILED = "failed" + SKIPPED = "skipped" + + +@dataclass +class InstallationStep: + command: str + description: str + status: StepStatus = StepStatus.PENDING + output: str = "" + error: str = "" + start_time: Optional[float] = None + end_time: Optional[float] = None + return_code: Optional[int] = None + + def duration(self) -> Optional[float]: + if self.start_time and self.end_time: + return self.end_time - self.start_time + return None + + +@dataclass +class InstallationResult: + success: bool + steps: List[InstallationStep] + total_duration: float + failed_step: Optional[int] = None + error_message: Optional[str] = None + + +class InstallationCoordinator: + def __init__( + self, + commands: List[str], + descriptions: Optional[List[str]] = None, + timeout: int = 300, + stop_on_error: bool = True, + enable_rollback: bool = False, + log_file: Optional[str] = None, + progress_callback: Optional[Callable[[int, int, InstallationStep], None]] = None + ): + self.timeout = timeout + self.stop_on_error = stop_on_error + self.enable_rollback = enable_rollback + self.log_file = log_file + self.progress_callback = progress_callback + + if descriptions and len(descriptions) != len(commands): + raise ValueError("Number of descriptions must match number of commands") + + self.steps = [ + InstallationStep( + command=cmd, + description=descriptions[i] if descriptions else f"Step {i+1}" + ) + for i, cmd in enumerate(commands) + ] + + self.rollback_commands: List[str] = [] + + def _log(self, message: str): + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + log_entry = f"[{timestamp}] {message}" + + if self.log_file: + try: + with open(self.log_file, 'a', encoding='utf-8') as f: + f.write(log_entry + '\n') + except Exception: + pass + + def _execute_command(self, step: InstallationStep) -> bool: + step.status = StepStatus.RUNNING + step.start_time = time.time() + + self._log(f"Executing: {step.command}") + + try: + result = subprocess.run( + step.command, + shell=True, + capture_output=True, + text=True, + timeout=self.timeout + ) + + step.return_code = result.returncode + step.output = result.stdout + step.error = result.stderr + step.end_time = time.time() + + if result.returncode == 0: + step.status = StepStatus.SUCCESS + self._log(f"Success: {step.command}") + return True + else: + step.status = StepStatus.FAILED + self._log(f"Failed: {step.command} (exit code: {result.returncode})") + return False + + except subprocess.TimeoutExpired: + step.status = StepStatus.FAILED + step.error = f"Command timed out after {self.timeout} seconds" + step.end_time = time.time() + self._log(f"Timeout: {step.command}") + return False + + except Exception as e: + step.status = StepStatus.FAILED + step.error = str(e) + step.end_time = time.time() + self._log(f"Error: {step.command} - {str(e)}") + return False + + def _rollback(self): + if not self.enable_rollback or not self.rollback_commands: + return + + self._log("Starting rollback...") + + for cmd in reversed(self.rollback_commands): + try: + self._log(f"Rollback: {cmd}") + subprocess.run( + cmd, + shell=True, + capture_output=True, + timeout=self.timeout + ) + except Exception as e: + self._log(f"Rollback failed: {cmd} - {str(e)}") + + def add_rollback_command(self, command: str): + self.rollback_commands.append(command) + + def execute(self) -> InstallationResult: + start_time = time.time() + failed_step_index = None + + self._log(f"Starting installation with {len(self.steps)} steps") + + for i, step in enumerate(self.steps): + if self.progress_callback: + self.progress_callback(i + 1, len(self.steps), step) + + success = self._execute_command(step) + + if not success: + failed_step_index = i + if self.stop_on_error: + for remaining_step in self.steps[i+1:]: + remaining_step.status = StepStatus.SKIPPED + + if self.enable_rollback: + self._rollback() + + total_duration = time.time() - start_time + self._log(f"Installation failed at step {i+1}") + + return InstallationResult( + success=False, + steps=self.steps, + total_duration=total_duration, + failed_step=i, + error_message=step.error or "Command failed" + ) + + total_duration = time.time() - start_time + all_success = all(s.status == StepStatus.SUCCESS for s in self.steps) + + if all_success: + self._log("Installation completed successfully") + else: + self._log("Installation completed with errors") + + return InstallationResult( + success=all_success, + steps=self.steps, + total_duration=total_duration, + failed_step=failed_step_index, + error_message=self.steps[failed_step_index].error if failed_step_index is not None else None + ) + + def verify_installation(self, verify_commands: List[str]) -> Dict[str, bool]: + verification_results = {} + + self._log("Starting verification...") + + for cmd in verify_commands: + try: + result = subprocess.run( + cmd, + shell=True, + capture_output=True, + text=True, + timeout=30 + ) + success = result.returncode == 0 + verification_results[cmd] = success + self._log(f"Verification {cmd}: {'PASS' if success else 'FAIL'}") + except Exception as e: + verification_results[cmd] = False + self._log(f"Verification {cmd}: ERROR - {str(e)}") + + return verification_results + + def get_summary(self) -> Dict[str, Any]: + total_steps = len(self.steps) + success_steps = sum(1 for s in self.steps if s.status == StepStatus.SUCCESS) + failed_steps = sum(1 for s in self.steps if s.status == StepStatus.FAILED) + skipped_steps = sum(1 for s in self.steps if s.status == StepStatus.SKIPPED) + + return { + "total_steps": total_steps, + "success": success_steps, + "failed": failed_steps, + "skipped": skipped_steps, + "steps": [ + { + "command": s.command, + "description": s.description, + "status": s.status.value, + "duration": s.duration(), + "return_code": s.return_code + } + for s in self.steps + ] + } + + def export_log(self, filepath: str): + with open(filepath, 'w', encoding='utf-8') as f: + json.dump(self.get_summary(), f, indent=2) + + +def install_docker() -> InstallationResult: + commands = [ + "apt update", + "apt install -y apt-transport-https ca-certificates curl software-properties-common", + "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -", + 'add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"', + "apt update", + "apt install -y docker-ce docker-ce-cli containerd.io", + "systemctl start docker", + "systemctl enable docker" + ] + + descriptions = [ + "Update package lists", + "Install dependencies", + "Add Docker GPG key", + "Add Docker repository", + "Update package lists again", + "Install Docker packages", + "Start Docker service", + "Enable Docker on boot" + ] + + coordinator = InstallationCoordinator( + commands=commands, + descriptions=descriptions, + timeout=300, + stop_on_error=True + ) + + result = coordinator.execute() + + if result.success: + verify_commands = ["docker --version", "systemctl is-active docker"] + coordinator.verify_installation(verify_commands) + + return result diff --git a/cortex/test_cli.py b/cortex/test_cli.py index 9672192..cb2bf35 100644 --- a/cortex/test_cli.py +++ b/cortex/test_cli.py @@ -85,36 +85,39 @@ def test_install_no_execute(self, mock_interpreter_class): @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) @patch('cortex.cli.CommandInterpreter') - @patch('subprocess.run') - def test_install_with_execute_success(self, mock_run, mock_interpreter_class): + @patch('cortex.cli.InstallationCoordinator') + def test_install_with_execute_success(self, mock_coordinator_class, mock_interpreter_class): mock_interpreter = Mock() mock_interpreter.parse.return_value = ["echo test"] mock_interpreter_class.return_value = mock_interpreter + mock_coordinator = Mock() mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "test output" - mock_result.stderr = "" - mock_run.return_value = mock_result + mock_result.success = True + mock_result.total_duration = 1.5 + mock_coordinator.execute.return_value = mock_result + mock_coordinator_class.return_value = mock_coordinator result = self.cli.install("docker", execute=True) self.assertEqual(result, 0) - mock_run.assert_called_once() + mock_coordinator.execute.assert_called_once() @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) @patch('cortex.cli.CommandInterpreter') - @patch('subprocess.run') - def test_install_with_execute_failure(self, mock_run, mock_interpreter_class): + @patch('cortex.cli.InstallationCoordinator') + def test_install_with_execute_failure(self, mock_coordinator_class, mock_interpreter_class): mock_interpreter = Mock() mock_interpreter.parse.return_value = ["invalid command"] mock_interpreter_class.return_value = mock_interpreter + mock_coordinator = Mock() mock_result = Mock() - mock_result.returncode = 1 - mock_result.stdout = "" - mock_result.stderr = "command not found" - mock_run.return_value = mock_result + mock_result.success = False + mock_result.failed_step = 0 + mock_result.error_message = "command not found" + mock_coordinator.execute.return_value = mock_result + mock_coordinator_class.return_value = mock_coordinator result = self.cli.install("docker", execute=True) diff --git a/cortex/test_coordinator.py b/cortex/test_coordinator.py new file mode 100644 index 0000000..6911e23 --- /dev/null +++ b/cortex/test_coordinator.py @@ -0,0 +1,353 @@ +import unittest +from unittest.mock import Mock, patch, call +import tempfile +import os +import time +import sys + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from cortex.coordinator import ( + InstallationCoordinator, + InstallationStep, + InstallationResult, + StepStatus, + install_docker +) + + +class TestInstallationStep(unittest.TestCase): + + def test_step_creation(self): + step = InstallationStep(command="echo test", description="Test step") + self.assertEqual(step.command, "echo test") + self.assertEqual(step.description, "Test step") + self.assertEqual(step.status, StepStatus.PENDING) + + def test_step_duration(self): + step = InstallationStep(command="test", description="test") + self.assertIsNone(step.duration()) + + step.start_time = 100.0 + step.end_time = 105.5 + self.assertEqual(step.duration(), 5.5) + + +class TestInstallationCoordinator(unittest.TestCase): + + def test_initialization(self): + commands = ["echo 1", "echo 2"] + coordinator = InstallationCoordinator(commands) + + self.assertEqual(len(coordinator.steps), 2) + self.assertEqual(coordinator.steps[0].command, "echo 1") + self.assertEqual(coordinator.steps[1].command, "echo 2") + + def test_initialization_with_descriptions(self): + commands = ["echo 1", "echo 2"] + descriptions = ["First", "Second"] + coordinator = InstallationCoordinator(commands, descriptions) + + self.assertEqual(coordinator.steps[0].description, "First") + self.assertEqual(coordinator.steps[1].description, "Second") + + def test_initialization_mismatched_descriptions(self): + commands = ["echo 1", "echo 2"] + descriptions = ["First"] + + with self.assertRaises(ValueError): + InstallationCoordinator(commands, descriptions) + + @patch('subprocess.run') + def test_execute_single_success(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["echo test"]) + result = coordinator.execute() + + self.assertTrue(result.success) + self.assertEqual(len(result.steps), 1) + self.assertEqual(result.steps[0].status, StepStatus.SUCCESS) + + @patch('subprocess.run') + def test_execute_single_failure(self, mock_run): + mock_result = Mock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_result.stderr = "error" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["false"]) + result = coordinator.execute() + + self.assertFalse(result.success) + self.assertEqual(result.failed_step, 0) + self.assertEqual(result.steps[0].status, StepStatus.FAILED) + + @patch('subprocess.run') + def test_execute_multiple_success(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["echo 1", "echo 2", "echo 3"]) + result = coordinator.execute() + + self.assertTrue(result.success) + self.assertEqual(len(result.steps), 3) + self.assertTrue(all(s.status == StepStatus.SUCCESS for s in result.steps)) + + @patch('subprocess.run') + def test_execute_stop_on_error(self, mock_run): + def side_effect(*args, **kwargs): + cmd = args[0] if args else kwargs.get('shell') + if "fail" in str(cmd): + result = Mock() + result.returncode = 1 + result.stdout = "" + result.stderr = "error" + return result + else: + result = Mock() + result.returncode = 0 + result.stdout = "success" + result.stderr = "" + return result + + mock_run.side_effect = side_effect + + coordinator = InstallationCoordinator( + ["echo 1", "fail", "echo 3"], + stop_on_error=True + ) + result = coordinator.execute() + + self.assertFalse(result.success) + self.assertEqual(result.failed_step, 1) + self.assertEqual(result.steps[0].status, StepStatus.SUCCESS) + self.assertEqual(result.steps[1].status, StepStatus.FAILED) + self.assertEqual(result.steps[2].status, StepStatus.SKIPPED) + + @patch('subprocess.run') + def test_execute_continue_on_error(self, mock_run): + def side_effect(*args, **kwargs): + cmd = args[0] if args else kwargs.get('shell') + if "fail" in str(cmd): + result = Mock() + result.returncode = 1 + result.stdout = "" + result.stderr = "error" + return result + else: + result = Mock() + result.returncode = 0 + result.stdout = "success" + result.stderr = "" + return result + + mock_run.side_effect = side_effect + + coordinator = InstallationCoordinator( + ["echo 1", "fail", "echo 3"], + stop_on_error=False + ) + result = coordinator.execute() + + self.assertFalse(result.success) + self.assertEqual(result.steps[0].status, StepStatus.SUCCESS) + self.assertEqual(result.steps[1].status, StepStatus.FAILED) + self.assertEqual(result.steps[2].status, StepStatus.SUCCESS) + + @patch('subprocess.run') + def test_timeout_handling(self, mock_run): + mock_run.side_effect = Exception("Timeout") + + coordinator = InstallationCoordinator(["sleep 1000"], timeout=1) + result = coordinator.execute() + + self.assertFalse(result.success) + self.assertEqual(result.steps[0].status, StepStatus.FAILED) + + def test_progress_callback(self): + callback_calls = [] + + def callback(current, total, step): + callback_calls.append((current, total, step.command)) + + with patch('subprocess.run') as mock_run: + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator( + ["echo 1", "echo 2"], + progress_callback=callback + ) + coordinator.execute() + + self.assertEqual(len(callback_calls), 2) + self.assertEqual(callback_calls[0], (1, 2, "echo 1")) + self.assertEqual(callback_calls[1], (2, 2, "echo 2")) + + def test_log_file(self): + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.log') as f: + log_file = f.name + + try: + with patch('subprocess.run') as mock_run: + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator( + ["echo test"], + log_file=log_file + ) + coordinator.execute() + + self.assertTrue(os.path.exists(log_file)) + with open(log_file, 'r') as f: + content = f.read() + self.assertIn("Executing: echo test", content) + finally: + if os.path.exists(log_file): + os.unlink(log_file) + + @patch('subprocess.run') + def test_rollback(self, mock_run): + mock_result = Mock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_result.stderr = "error" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator( + ["fail"], + enable_rollback=True + ) + coordinator.add_rollback_command("echo rollback") + result = coordinator.execute() + + self.assertFalse(result.success) + self.assertGreaterEqual(mock_run.call_count, 2) + + @patch('subprocess.run') + def test_verify_installation(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "Docker version 20.10.0" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["echo test"]) + coordinator.execute() + + verify_results = coordinator.verify_installation(["docker --version"]) + + self.assertTrue(verify_results["docker --version"]) + + def test_get_summary(self): + with patch('subprocess.run') as mock_run: + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["echo 1", "echo 2"]) + coordinator.execute() + + summary = coordinator.get_summary() + + self.assertEqual(summary["total_steps"], 2) + self.assertEqual(summary["success"], 2) + self.assertEqual(summary["failed"], 0) + self.assertEqual(summary["skipped"], 0) + + def test_export_log(self): + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json') as f: + export_file = f.name + + try: + with patch('subprocess.run') as mock_run: + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["echo test"]) + coordinator.execute() + coordinator.export_log(export_file) + + self.assertTrue(os.path.exists(export_file)) + + import json + with open(export_file, 'r') as f: + data = json.load(f) + self.assertIn("total_steps", data) + self.assertEqual(data["total_steps"], 1) + finally: + if os.path.exists(export_file): + os.unlink(export_file) + + @patch('subprocess.run') + def test_step_timing(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + coordinator = InstallationCoordinator(["echo test"]) + result = coordinator.execute() + + step = result.steps[0] + self.assertIsNotNone(step.start_time) + self.assertIsNotNone(step.end_time) + if step.end_time and step.start_time: + self.assertTrue(step.end_time > step.start_time) + self.assertIsNotNone(step.duration()) + + +class TestInstallDocker(unittest.TestCase): + + @patch('subprocess.run') + def test_install_docker_success(self, mock_run): + mock_result = Mock() + mock_result.returncode = 0 + mock_result.stdout = "success" + mock_result.stderr = "" + mock_run.return_value = mock_result + + result = install_docker() + + self.assertTrue(result.success) + self.assertEqual(len(result.steps), 8) + + @patch('subprocess.run') + def test_install_docker_failure(self, mock_run): + mock_result = Mock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_result.stderr = "error" + mock_run.return_value = mock_result + + result = install_docker() + + self.assertFalse(result.success) + self.assertIsNotNone(result.failed_step) + + +if __name__ == '__main__': + unittest.main() From 04b1f077f8583b29ce7bae11eed5d502c5ace575 Mon Sep 17 00:00:00 2001 From: sahil Date: Mon, 10 Nov 2025 16:32:34 +0530 Subject: [PATCH 03/12] Test file update for CLI --- LLM/test_interpreter.py | 33 +++++++++++++++++++-------------- cortex/test_cli.py | 2 +- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index a8836c7..30914e2 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -1,7 +1,12 @@ import unittest from unittest.mock import Mock, patch, MagicMock import json -from interpreter import CommandInterpreter, APIProvider +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from LLM.interpreter import CommandInterpreter, APIProvider class TestCommandInterpreter(unittest.TestCase): @@ -9,21 +14,21 @@ class TestCommandInterpreter(unittest.TestCase): def setUp(self): self.api_key = "test-api-key" - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_initialization_openai(self, mock_openai): interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") self.assertEqual(interpreter.provider, APIProvider.OPENAI) self.assertEqual(interpreter.model, "gpt-4") mock_openai.assert_called_once_with(api_key=self.api_key) - @patch('interpreter.Anthropic') + @patch('anthropic.Anthropic') def test_initialization_claude(self, mock_anthropic): interpreter = CommandInterpreter(api_key=self.api_key, provider="claude") self.assertEqual(interpreter.provider, APIProvider.CLAUDE) self.assertEqual(interpreter.model, "claude-3-5-sonnet-20241022") mock_anthropic.assert_called_once_with(api_key=self.api_key) - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_initialization_custom_model(self, mock_openai): interpreter = CommandInterpreter( api_key=self.api_key, @@ -73,14 +78,14 @@ def test_validate_commands_dd_pattern(self): result = interpreter._validate_commands(commands) self.assertEqual(result, ["apt update"]) - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_parse_empty_input(self, mock_openai): interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") with self.assertRaises(ValueError): interpreter.parse("") - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_call_openai_success(self, mock_openai): mock_client = Mock() mock_response = Mock() @@ -94,7 +99,7 @@ def test_call_openai_success(self, mock_openai): result = interpreter._call_openai("install docker") self.assertEqual(result, ["apt update"]) - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_call_openai_failure(self, mock_openai): mock_client = Mock() mock_client.chat.completions.create.side_effect = Exception("API Error") @@ -105,7 +110,7 @@ def test_call_openai_failure(self, mock_openai): with self.assertRaises(RuntimeError): interpreter._call_openai("install docker") - @patch('interpreter.Anthropic') + @patch('anthropic.Anthropic') def test_call_claude_success(self, mock_anthropic): mock_client = Mock() mock_response = Mock() @@ -119,7 +124,7 @@ def test_call_claude_success(self, mock_anthropic): result = interpreter._call_claude("install docker") self.assertEqual(result, ["apt update"]) - @patch('interpreter.Anthropic') + @patch('anthropic.Anthropic') def test_call_claude_failure(self, mock_anthropic): mock_client = Mock() mock_client.messages.create.side_effect = Exception("API Error") @@ -130,7 +135,7 @@ def test_call_claude_failure(self, mock_anthropic): with self.assertRaises(RuntimeError): interpreter._call_claude("install docker") - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_parse_with_validation(self, mock_openai): mock_client = Mock() mock_response = Mock() @@ -144,7 +149,7 @@ def test_parse_with_validation(self, mock_openai): result = interpreter.parse("test command", validate=True) self.assertEqual(result, ["apt update"]) - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_parse_without_validation(self, mock_openai): mock_client = Mock() mock_response = Mock() @@ -158,7 +163,7 @@ def test_parse_without_validation(self, mock_openai): result = interpreter.parse("test command", validate=False) self.assertEqual(result, ["apt update", "rm -rf /"]) - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_parse_with_context(self, mock_openai): mock_client = Mock() mock_response = Mock() @@ -197,7 +202,7 @@ def test_parse_commands_empty_commands(self): result = interpreter._parse_commands(response) self.assertEqual(result, ["apt update", "apt install docker"]) - @patch('interpreter.OpenAI') + @patch('openai.OpenAI') def test_parse_docker_installation(self, mock_openai): mock_client = Mock() mock_response = Mock() @@ -217,7 +222,7 @@ def test_parse_docker_installation(self, mock_openai): result = interpreter.parse("install docker") self.assertGreater(len(result), 0) - self.assertIn("docker", result[0].lower() or result[1].lower()) + self.assertTrue(any("docker" in cmd.lower() for cmd in result)) if __name__ == "__main__": diff --git a/cortex/test_cli.py b/cortex/test_cli.py index cb2bf35..635ad06 100644 --- a/cortex/test_cli.py +++ b/cortex/test_cli.py @@ -18,7 +18,7 @@ def test_get_api_key_openai(self): api_key = self.cli._get_api_key() self.assertEqual(api_key, 'test-key') - @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key'}) + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key', 'OPENAI_API_KEY': ''}, clear=True) def test_get_api_key_claude(self): api_key = self.cli._get_api_key() self.assertEqual(api_key, 'test-claude-key') From dfa2794aa1b7ffd7385e55fac2c1bdaceaf84b12 Mon Sep 17 00:00:00 2001 From: sahil Date: Tue, 11 Nov 2025 21:07:54 +0530 Subject: [PATCH 04/12] CLI test integration fix. --- LLM/interpreter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LLM/interpreter.py b/LLM/interpreter.py index 67f9525..cdebe46 100644 --- a/LLM/interpreter.py +++ b/LLM/interpreter.py @@ -22,7 +22,7 @@ def __init__( if model: self.model = model else: - self.model = "gpt-4" if self.provider == APIProvider.OPENAI else "claude-3-5-sonnet-20241022" + self.model = "gpt-4o-mini" if self.provider == APIProvider.OPENAI else "claude-3-5-sonnet-20241022" self._initialize_client() From 7dd773645526bbd188922aa52bdc9a79a8fdee54 Mon Sep 17 00:00:00 2001 From: Sahil Bhatane <118365864+Sahilbhatane@users.noreply.github.com> Date: Tue, 11 Nov 2025 21:23:25 +0530 Subject: [PATCH 05/12] Update model selection for OpenAI provider --- LLM/interpreter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LLM/interpreter.py b/LLM/interpreter.py index cdebe46..67f9525 100644 --- a/LLM/interpreter.py +++ b/LLM/interpreter.py @@ -22,7 +22,7 @@ def __init__( if model: self.model = model else: - self.model = "gpt-4o-mini" if self.provider == APIProvider.OPENAI else "claude-3-5-sonnet-20241022" + self.model = "gpt-4" if self.provider == APIProvider.OPENAI else "claude-3-5-sonnet-20241022" self._initialize_client() From fe3719397a91bd655da4c06e3363af232436a9d4 Mon Sep 17 00:00:00 2001 From: sahil Date: Fri, 14 Nov 2025 22:05:33 +0530 Subject: [PATCH 06/12] Add Kimi provider and integration tests issue #40 --- LLM/interpreter.py | 112 ++++++++++++- LLM/requirements.txt | 1 + LLM/test_interpreter.py | 65 +++++++- README.md | 24 ++- contribution.md | 63 ++++++++ cortex/cli.py | 226 ++++++++++++++++---------- cortex/coordinator.py | 39 ++++- test.md | 67 ++++++++ test/integration/__init__.py | 1 + test/integration/docker_utils.py | 108 +++++++++++++ test/integration/test_end_to_end.py | 114 +++++++++++++ test/run_all_tests.py | 37 +++++ test/test_cli.py | 243 ++++++++++++++++++++++++++++ 13 files changed, 998 insertions(+), 102 deletions(-) create mode 100644 contribution.md create mode 100644 test.md create mode 100644 test/integration/__init__.py create mode 100644 test/integration/docker_utils.py create mode 100644 test/integration/test_end_to_end.py create mode 100644 test/run_all_tests.py create mode 100644 test/test_cli.py diff --git a/LLM/interpreter.py b/LLM/interpreter.py index 67f9525..d9907ca 100644 --- a/LLM/interpreter.py +++ b/LLM/interpreter.py @@ -1,3 +1,5 @@ +"""Natural language to shell command interpreter backed by multiple LLMs.""" + import os import json from typing import List, Optional, Dict, Any @@ -5,11 +7,17 @@ class APIProvider(Enum): + """Supported large-language-model providers for command generation.""" + CLAUDE = "claude" OPENAI = "openai" + KIMI = "kimi" + FAKE = "fake" class CommandInterpreter: + """Translate natural language intents into shell commands via LLMs.""" + def __init__( self, api_key: str, @@ -22,11 +30,12 @@ def __init__( if model: self.model = model else: - self.model = "gpt-4" if self.provider == APIProvider.OPENAI else "claude-3-5-sonnet-20241022" + self.model = self._default_model() self._initialize_client() def _initialize_client(self): + """Instantiate the SDK client for the selected provider.""" if self.provider == APIProvider.OPENAI: try: from openai import OpenAI @@ -39,8 +48,20 @@ def _initialize_client(self): self.client = Anthropic(api_key=self.api_key) except ImportError: raise ImportError("Anthropic package not installed. Run: pip install anthropic") + elif self.provider == APIProvider.KIMI: + try: + import requests # type: ignore + except ImportError as exc: + raise ImportError("Requests package not installed. Run: pip install requests") from exc + + self.client = requests + self._kimi_base_url = os.environ.get("KIMI_API_BASE_URL", "https://api.moonshot.cn") + elif self.provider == APIProvider.FAKE: + # Fake provider is used for deterministic offline or integration tests. + self.client = None def _get_system_prompt(self) -> str: + """Return the base instructions shared across all provider calls.""" return """You are a Linux system command expert. Convert natural language requests into safe, validated bash commands. Rules: @@ -59,6 +80,7 @@ def _get_system_prompt(self) -> str: Example response: {"commands": ["sudo apt update", "sudo apt install -y docker.io", "sudo apt install -y nvidia-docker2", "sudo systemctl restart docker"]}""" def _call_openai(self, user_input: str) -> List[str]: + """Call the OpenAI Chat Completions API and parse the response.""" try: response = self.client.chat.completions.create( model=self.model, @@ -76,6 +98,7 @@ def _call_openai(self, user_input: str) -> List[str]: raise RuntimeError(f"OpenAI API call failed: {str(e)}") def _call_claude(self, user_input: str) -> List[str]: + """Call the Anthropic Messages API and parse the response.""" try: response = self.client.messages.create( model=self.model, @@ -91,8 +114,77 @@ def _call_claude(self, user_input: str) -> List[str]: return self._parse_commands(content) except Exception as e: raise RuntimeError(f"Claude API call failed: {str(e)}") + + def _call_kimi(self, user_input: str) -> List[str]: + """Call the Kimi K2 HTTP API and parse the response body.""" + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + payload = { + "model": self.model, + "messages": [ + {"role": "system", "content": self._get_system_prompt()}, + {"role": "user", "content": user_input}, + ], + "temperature": 0.3, + "max_tokens": 1000, + } + + try: + response = self.client.post( + f"{self._kimi_base_url.rstrip('/')}/v1/chat/completions", + headers=headers, + json=payload, + timeout=60, + ) + response.raise_for_status() + data = response.json() + choices = data.get("choices", []) + if not choices: + raise RuntimeError("Kimi API returned no choices") + content = choices[0].get("message", {}).get("content", "").strip() + if not content: + raise RuntimeError("Kimi API returned empty content") + return self._parse_commands(content) + except Exception as exc: + raise RuntimeError(f"Kimi API call failed: {str(exc)}") from exc + + def _call_fake(self, user_input: str) -> List[str]: + """Return predetermined commands without hitting a real provider.""" + + payload = os.environ.get("CORTEX_FAKE_COMMANDS") + if payload: + try: + data = json.loads(payload) + except json.JSONDecodeError as exc: + raise ValueError("CORTEX_FAKE_COMMANDS must contain valid JSON") from exc + if not isinstance(data, dict) or "commands" not in data: + raise ValueError("CORTEX_FAKE_COMMANDS must define a 'commands' list") + return self._parse_commands(payload) + + safe_defaults = { + "docker": [ + "echo Updating package cache", + "echo Installing docker packages", + "echo Enabling docker service", + ], + "python": [ + "echo Installing Python", + "echo Setting up virtual environment", + "echo Installing pip packages", + ], + } + + for key, commands in safe_defaults.items(): + if key in user_input.lower(): + return commands + + return ["echo Preparing environment", "echo Completed simulation"] def _parse_commands(self, content: str) -> List[str]: + """Parse the JSON payload returned by an LLM into command strings.""" try: if content.startswith("```json"): content = content.split("```json")[1].split("```")[0].strip() @@ -110,6 +202,7 @@ def _parse_commands(self, content: str) -> List[str]: raise ValueError(f"Failed to parse LLM response: {str(e)}") def _validate_commands(self, commands: List[str]) -> List[str]: + """Filter the provided commands to remove obviously dangerous patterns.""" dangerous_patterns = [ "rm -rf /", "dd if=", @@ -129,6 +222,7 @@ def _validate_commands(self, commands: List[str]) -> List[str]: return validated def parse(self, user_input: str, validate: bool = True) -> List[str]: + """Parse the user's request into a list of shell commands.""" if not user_input or not user_input.strip(): raise ValueError("User input cannot be empty") @@ -136,6 +230,10 @@ def parse(self, user_input: str, validate: bool = True) -> List[str]: commands = self._call_openai(user_input) elif self.provider == APIProvider.CLAUDE: commands = self._call_claude(user_input) + elif self.provider == APIProvider.KIMI: + commands = self._call_kimi(user_input) + elif self.provider == APIProvider.FAKE: + commands = self._call_fake(user_input) else: raise ValueError(f"Unsupported provider: {self.provider}") @@ -150,9 +248,21 @@ def parse_with_context( system_info: Optional[Dict[str, Any]] = None, validate: bool = True ) -> List[str]: + """Parse a request while appending structured system context.""" context = "" if system_info: context = f"\n\nSystem context: {json.dumps(system_info)}" enriched_input = user_input + context return self.parse(enriched_input, validate=validate) + + def _default_model(self) -> str: + """Return the default model identifier for the active provider.""" + + if self.provider == APIProvider.OPENAI: + return "gpt-4" + if self.provider == APIProvider.CLAUDE: + return "claude-3-5-sonnet-20241022" + if self.provider == APIProvider.KIMI: + return os.environ.get("KIMI_DEFAULT_MODEL", "kimi-k2") + return "fake-local-model" diff --git a/LLM/requirements.txt b/LLM/requirements.txt index b49cf35..f8e36a0 100644 --- a/LLM/requirements.txt +++ b/LLM/requirements.txt @@ -1,2 +1,3 @@ openai>=1.0.0 anthropic>=0.18.0 +requests>=2.31.0 diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index 30914e2..eeb17d5 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -1,8 +1,9 @@ -import unittest -from unittest.mock import Mock, patch, MagicMock import json -import sys import os +import sys +import unittest +from types import SimpleNamespace +from unittest.mock import Mock, patch sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) @@ -13,6 +14,14 @@ class TestCommandInterpreter(unittest.TestCase): def setUp(self): self.api_key = "test-api-key" + openai_stub = SimpleNamespace(OpenAI=Mock()) + anthropic_stub = SimpleNamespace(Anthropic=Mock()) + self.sys_modules_patcher = patch.dict(sys.modules, { + 'openai': openai_stub, + 'anthropic': anthropic_stub, + }) + self.sys_modules_patcher.start() + self.addCleanup(self.sys_modules_patcher.stop) @patch('openai.OpenAI') def test_initialization_openai(self, mock_openai): @@ -37,6 +46,13 @@ def test_initialization_custom_model(self, mock_openai): ) self.assertEqual(interpreter.model, "gpt-4-turbo") + @patch.dict(os.environ, {}, clear=True) + @patch.dict(sys.modules, {'requests': Mock()}) + def test_initialization_kimi(self): + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + self.assertEqual(interpreter.provider, APIProvider.KIMI) + self.assertEqual(interpreter.model, "kimi-k2") + def test_parse_commands_valid_json(self): interpreter = CommandInterpreter.__new__(CommandInterpreter) @@ -224,6 +240,49 @@ def test_parse_docker_installation(self, mock_openai): self.assertGreater(len(result), 0) self.assertTrue(any("docker" in cmd.lower() for cmd in result)) + @patch.dict(os.environ, {'CORTEX_FAKE_COMMANDS': '{"commands": ["echo test"]}'}, clear=True) + def test_call_fake_with_env_commands(self): + interpreter = CommandInterpreter(api_key="ignore", provider="fake") + result = interpreter.parse("install docker", validate=False) + self.assertEqual(result, ["echo test"]) + + @patch.dict(os.environ, {}, clear=True) + def test_call_fake_with_defaults(self): + interpreter = CommandInterpreter(api_key="ignore", provider="fake") + result = interpreter.parse("install docker", validate=False) + self.assertTrue(len(result) > 0) + + @patch.dict(os.environ, {}, clear=True) + @patch.dict(sys.modules, {'requests': Mock()}) + def test_call_kimi_success(self): + requests_mock = sys.modules['requests'] + response_mock = Mock() + response_mock.json.return_value = { + 'choices': [ + {'message': {'content': '{"commands": ["apt update"]}'}} + ] + } + response_mock.raise_for_status.return_value = None + requests_mock.post.return_value = response_mock + + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + interpreter.client = requests_mock + + result = interpreter._call_kimi("install docker") + self.assertEqual(result, ["apt update"]) + + @patch.dict(os.environ, {}, clear=True) + @patch.dict(sys.modules, {'requests': Mock()}) + def test_call_kimi_failure(self): + requests_mock = sys.modules['requests'] + requests_mock.post.side_effect = Exception("API Error") + + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + interpreter.client = requests_mock + + with self.assertRaises(RuntimeError): + interpreter._call_kimi("install docker") + if __name__ == "__main__": unittest.main() diff --git a/README.md b/README.md index 5b32a9c..af89b86 100644 --- a/README.md +++ b/README.md @@ -58,11 +58,29 @@ Cortex Linux embeds AI at the operating system level. Tell it what you need in p ## Tech Stack - **Base OS**: Ubuntu 24.04 LTS (Debian packaging) -- **AI Layer**: Python 3.11+, LangChain, Claude API +- **AI Layer**: Python 3.11+, OpenAI GPT-4, Anthropic Claude 3.5, Kimi K2 - **Security**: Firejail sandboxing, AppArmor policies - **Package Management**: apt wrapper with semantic understanding - **Hardware Detection**: hwinfo, lspci, nvidia-smi integration +### Supported LLM Providers + +Configure the CLI by exporting the relevant API key (or using the fake provider +for offline development): + +| Provider | Environment Variable | Default Model | +|----------|----------------------|---------------| +| OpenAI | `OPENAI_API_KEY` | `gpt-4` | +| Claude | `ANTHROPIC_API_KEY` | `claude-3-5-sonnet-20241022` | +| Kimi K2 | `KIMI_API_KEY` | `kimi-k2` | +| Fake | `CORTEX_PROVIDER=fake` + optional `CORTEX_FAKE_COMMANDS` | Offline stubs | + +To run the CLI with the fake provider: + +```bash +CORTEX_PROVIDER=fake CORTEX_FAKE_COMMANDS='{"commands": ["echo Step 1"]}' cortex install docker --dry-run +``` + ## Get Involved **We need:** @@ -72,7 +90,9 @@ Cortex Linux embeds AI at the operating system level. Tell it what you need in p - Technical Writers - Beta Testers -Browse [Issues](../../issues) for contribution opportunities. +Browse [Issues](../../issues) for contribution opportunities and review the +[Contribution Guide](contribution.md) plus the [Testing Strategy](test.md) +before opening a pull request. ### Join the Community diff --git a/contribution.md b/contribution.md new file mode 100644 index 0000000..c2d0a5f --- /dev/null +++ b/contribution.md @@ -0,0 +1,63 @@ +# Contribution Guide + +Thank you for your interest in contributing to **Cortex**. This document explains the +project workflow, coding standards, and review expectations so that every pull +request is straightforward to review and merge. + +## Getting Started + +1. **Fork and clone the repository.** +2. **Create a feature branch** from `main` using a descriptive name, for example + `issue-40-kimi-k2`. +3. **Install dependencies** in a virtual environment: + ```bash + python -m venv .venv + source .venv/bin/activate # Windows: .venv\Scripts\activate + pip install --upgrade pip + pip install -r LLM/requirements.txt + pip install -r src/requirements.txt + pip install -e . + ``` +4. **Run the full test suite** (`python test/run_all_tests.py`) to ensure your + environment is healthy before you start coding. + +## Coding Standards + +- **Type hints and docstrings** are required for all public functions, classes, + and modules. CodeRabbit enforces an 80% docstring coverage threshold. +- **Formatting** follows `black` (line length 100) and `isort` ordering. Please run: + ```bash + black . + isort . + ``` +- **Linting** uses `ruff`. Address warnings locally before opening a pull request. +- **Logging and messages** must use the structured status labels (`[INFO]`, `[PLAN]`, + `[EXEC]`, `[SUCCESS]`, `[ERROR]`, etc.) to provide a consistent CLI experience. +- **Secrets** such as API keys must never be hard-coded or committed. +- **Dependency changes** must update both `LLM/requirements.txt` and any related + documentation (`README.md`, `test.md`). + +## Tests + +- Unit tests live under `test/` and should be added or updated alongside code + changes. +- Integration tests live under `test/integration/` and are designed to run inside + Docker. Use the helper utilities in `test/integration/docker_utils.py` to keep + the tests concise and reliable. +- Ensure that every new feature or regression fix includes corresponding test + coverage. Submissions without meaningful tests will be sent back for revision. +- Before requesting review, run: + ```bash + python test/run_all_tests.py + ``` + Optionally, include `CORTEX_PROVIDER=fake` to avoid contacting external APIs. + +## Pull Request Checklist + +- Provide a **clear title** that references the issue being addressed. +- Include a **summary** of the change, **testing notes**, and **risk assessment**. +- Confirm that **CI passes** and that **docstring coverage** meets the required threshold. +- Link the pull request to the relevant GitHub issue (`Fixes #`). +- Be responsive to review feedback and keep discussions on-topic. + +We appreciate your time and effort—welcome aboard! diff --git a/cortex/cli.py b/cortex/cli.py index 86b1682..6bdb22f 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -1,139 +1,173 @@ -import sys -import os +"""Command-line interface entry point for the Cortex automation toolkit.""" + import argparse -import time -from typing import List, Optional +import os import subprocess - -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) +import sys +import time +from typing import Optional from LLM.interpreter import CommandInterpreter from cortex.coordinator import InstallationCoordinator, StepStatus class CortexCLI: - def __init__(self): + """Command-line interface for Cortex AI-powered software installation.""" + + def __init__(self) -> None: + """Initialise spinner state used for interactive progress updates.""" self.spinner_chars = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'] self.spinner_idx = 0 - - def _get_api_key(self) -> Optional[str]: - api_key = os.environ.get('OPENAI_API_KEY') or os.environ.get('ANTHROPIC_API_KEY') - if not api_key: - self._print_error("API key not found. Set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable.") - return None - return api_key - + def _get_provider(self) -> str: + """Detect which LLM provider to use based on configuration and credentials.""" + provider_override = os.environ.get('CORTEX_PROVIDER') + if provider_override: + return provider_override.lower() + if os.environ.get('OPENAI_API_KEY'): return 'openai' - elif os.environ.get('ANTHROPIC_API_KEY'): + if os.environ.get('ANTHROPIC_API_KEY'): return 'claude' + if os.environ.get('KIMI_API_KEY'): + return 'kimi' + if os.environ.get('CORTEX_FAKE_COMMANDS'): + return 'fake' return 'openai' - - def _print_status(self, emoji: str, message: str): - print(f"{emoji} {message}") - - def _print_error(self, message: str): - print(f"❌ Error: {message}", file=sys.stderr) - - def _print_success(self, message: str): - print(f"✅ {message}") - - def _animate_spinner(self, message: str): + + def _get_api_key(self, provider: str) -> Optional[str]: + """Return the API key for the specified provider or emit guidance if missing.""" + env_map = { + 'openai': 'OPENAI_API_KEY', + 'claude': 'ANTHROPIC_API_KEY', + 'kimi': 'KIMI_API_KEY', + } + + env_var = env_map.get(provider) + if not env_var: + return None + + api_key = os.environ.get(env_var) + if not api_key: + self._print_error(f"API key not found. Set {env_var} environment variable.") + return None + return api_key + + def _print_status(self, label: str, message: str) -> None: + """Emit informational output with a consistent status label.""" + print(f"{label} {message}") + + def _print_error(self, message: str) -> None: + """Emit an error message to ``stderr`` with standard formatting.""" + print(f"[ERROR] {message}", file=sys.stderr) + + def _print_success(self, message: str) -> None: + """Emit a success message to ``stdout`` with the success label.""" + print(f"[SUCCESS] {message}") + + def _animate_spinner(self, message: str) -> None: + """Render a single spinner frame with the supplied ``message``.""" sys.stdout.write(f"\r{self.spinner_chars[self.spinner_idx]} {message}") sys.stdout.flush() self.spinner_idx = (self.spinner_idx + 1) % len(self.spinner_chars) time.sleep(0.1) - - def _clear_line(self): + + def _clear_line(self) -> None: + """Clear the active terminal line to hide spinner artifacts.""" sys.stdout.write('\r\033[K') sys.stdout.flush() - - def install(self, software: str, execute: bool = False, dry_run: bool = False): - api_key = self._get_api_key() - if not api_key: - return 1 - + + def install(self, software: str, execute: bool = False, dry_run: bool = False) -> int: + """Interpret a natural-language request and optionally execute the plan.""" + provider = self._get_provider() - + + if provider == 'fake': + api_key = os.environ.get('CORTEX_FAKE_API_KEY', 'fake-api-key') + else: + api_key = self._get_api_key(provider) + if not api_key: + return 1 + try: - self._print_status("🧠", "Understanding request...") - + self._print_status("[INFO]", "Understanding request...") + interpreter = CommandInterpreter(api_key=api_key, provider=provider) - - self._print_status("📦", "Planning installation...") - + + self._print_status("[PLAN]", "Planning installation...") + for _ in range(10): self._animate_spinner("Analyzing system requirements...") self._clear_line() - + commands = interpreter.parse(f"install {software}") - + if not commands: self._print_error("No commands generated. Please try again with a different request.") return 1 - - self._print_status("⚙️", f"Installing {software}...") + + self._print_status("[EXEC]", f"Installing {software}...") print("\nGenerated commands:") - for i, cmd in enumerate(commands, 1): - print(f" {i}. {cmd}") - + for index, command in enumerate(commands, 1): + print(f" {index}. {command}") + if dry_run: print("\n(Dry run mode - commands not executed)") return 0 - + if execute: - def progress_callback(current, total, step): - status_emoji = "⏳" + def progress_callback(current: int, total: int, step) -> None: + status_label = "[PENDING]" if step.status == StepStatus.SUCCESS: - status_emoji = "✅" + status_label = "[OK]" elif step.status == StepStatus.FAILED: - status_emoji = "❌" - print(f"\n[{current}/{total}] {status_emoji} {step.description}") + status_label = "[FAIL]" + print(f"\n[{current}/{total}] {status_label} {step.description}") print(f" Command: {step.command}") - + print("\nExecuting commands...") - + coordinator = InstallationCoordinator( commands=commands, - descriptions=[f"Step {i+1}" for i in range(len(commands))], + descriptions=[f"Step {i + 1}" for i in range(len(commands))], timeout=300, stop_on_error=True, - progress_callback=progress_callback + progress_callback=progress_callback, ) - + result = coordinator.execute() - + if result.success: self._print_success(f"{software} installed successfully!") print(f"\nCompleted in {result.total_duration:.2f} seconds") return 0 + + if result.failed_step is not None: + self._print_error(f"Installation failed at step {result.failed_step + 1}") else: - if result.failed_step is not None: - self._print_error(f"Installation failed at step {result.failed_step + 1}") - else: - self._print_error("Installation failed") - if result.error_message: - print(f" Error: {result.error_message}", file=sys.stderr) - return 1 - else: - print("\nTo execute these commands, run with --execute flag") - print("Example: cortex install docker --execute") - + self._print_error("Installation failed") + if result.error_message: + print(f" Error: {result.error_message}", file=sys.stderr) + return 1 + + print("\nTo execute these commands, run with --execute flag") + print("Example: cortex install docker --execute") return 0 - - except ValueError as e: - self._print_error(str(e)) + + except ValueError as exc: + self._print_error(str(exc)) return 1 - except RuntimeError as e: - self._print_error(f"API call failed: {str(e)}") + except RuntimeError as exc: + self._print_error(f"API call failed: {str(exc)}") return 1 - except Exception as e: - self._print_error(f"Unexpected error: {str(e)}") + except Exception as exc: + self._print_error(f"Unexpected error: {str(exc)}") return 1 -def main(): +def main() -> int: + """Entry point for the cortex CLI command.""" + parser = argparse.ArgumentParser( prog='cortex', description='AI-powered Linux command interpreter', @@ -144,31 +178,47 @@ def main(): cortex install docker --execute cortex install "python 3.11 with pip" cortex install nginx --dry-run + cortex --test Environment Variables: - OPENAI_API_KEY OpenAI API key for GPT-4 - ANTHROPIC_API_KEY Anthropic API key for Claude + OPENAI_API_KEY OpenAI API key for GPT models + ANTHROPIC_API_KEY Anthropic API key for Claude models + KIMI_API_KEY Moonshot Kimi API key for K2 models + CORTEX_PROVIDER Optional override (openai|claude|kimi|fake) """ ) - + + parser.add_argument('--test', action='store_true', help='Run all test suites') + subparsers = parser.add_subparsers(dest='command', help='Available commands') - + install_parser = subparsers.add_parser('install', help='Install software using natural language') install_parser.add_argument('software', type=str, help='Software to install (natural language)') install_parser.add_argument('--execute', action='store_true', help='Execute the generated commands') install_parser.add_argument('--dry-run', action='store_true', help='Show commands without executing') - + args = parser.parse_args() - + + if args.test: + test_dir = os.path.join(os.path.dirname(__file__), '..', 'test') + test_runner = os.path.join(test_dir, 'run_all_tests.py') + + if not os.path.exists(test_runner): + print("[ERROR] Test runner not found", file=sys.stderr) + return 1 + + result = subprocess.run([sys.executable, test_runner], check=False) + return result.returncode + if not args.command: parser.print_help() return 1 - + cli = CortexCLI() - + if args.command == 'install': return cli.install(args.software, execute=args.execute, dry_run=args.dry_run) - + return 0 diff --git a/cortex/coordinator.py b/cortex/coordinator.py index a2ae0a3..d8690dd 100644 --- a/cortex/coordinator.py +++ b/cortex/coordinator.py @@ -1,13 +1,17 @@ +"""Execution coordinator for multi-step software installation plans.""" + +import json import subprocess import time -import json -from typing import List, Dict, Any, Optional, Callable -from dataclasses import dataclass, field -from enum import Enum +from dataclasses import dataclass from datetime import datetime +from enum import Enum +from typing import Any, Callable, Dict, List, Optional class StepStatus(Enum): + """Lifecycle states for a single installation step.""" + PENDING = "pending" RUNNING = "running" SUCCESS = "success" @@ -17,6 +21,8 @@ class StepStatus(Enum): @dataclass class InstallationStep: + """Container describing an individual shell command execution.""" + command: str description: str status: StepStatus = StepStatus.PENDING @@ -27,6 +33,7 @@ class InstallationStep: return_code: Optional[int] = None def duration(self) -> Optional[float]: + """Return the elapsed execution time for the step if available.""" if self.start_time and self.end_time: return self.end_time - self.start_time return None @@ -34,6 +41,8 @@ def duration(self) -> Optional[float]: @dataclass class InstallationResult: + """Summary returned after executing all installation steps.""" + success: bool steps: List[InstallationStep] total_duration: float @@ -42,6 +51,8 @@ class InstallationResult: class InstallationCoordinator: + """Coordinate execution of shell commands with optional rollback.""" + def __init__( self, commands: List[str], @@ -52,6 +63,8 @@ def __init__( log_file: Optional[str] = None, progress_callback: Optional[Callable[[int, int, InstallationStep], None]] = None ): + """Build the coordinator and prepare execution metadata.""" + self.timeout = timeout self.stop_on_error = stop_on_error self.enable_rollback = enable_rollback @@ -71,7 +84,8 @@ def __init__( self.rollback_commands: List[str] = [] - def _log(self, message: str): + def _log(self, message: str) -> None: + """Append a timestamped line to the optional log file.""" timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") log_entry = f"[{timestamp}] {message}" @@ -83,6 +97,7 @@ def _log(self, message: str): pass def _execute_command(self, step: InstallationStep) -> bool: + """Run a step command and update the step state in place.""" step.status = StepStatus.RUNNING step.start_time = time.time() @@ -125,7 +140,8 @@ def _execute_command(self, step: InstallationStep) -> bool: self._log(f"Error: {step.command} - {str(e)}") return False - def _rollback(self): + def _rollback(self) -> None: + """Execute registered rollback commands in reverse order.""" if not self.enable_rollback or not self.rollback_commands: return @@ -143,10 +159,12 @@ def _rollback(self): except Exception as e: self._log(f"Rollback failed: {cmd} - {str(e)}") - def add_rollback_command(self, command: str): + def add_rollback_command(self, command: str) -> None: + """Register a command that reverts a successful step.""" self.rollback_commands.append(command) def execute(self) -> InstallationResult: + """Execute all commands and return a structured result object.""" start_time = time.time() failed_step_index = None @@ -195,6 +213,7 @@ def execute(self) -> InstallationResult: ) def verify_installation(self, verify_commands: List[str]) -> Dict[str, bool]: + """Run verification commands and return a mapping of pass/fail results.""" verification_results = {} self._log("Starting verification...") @@ -218,6 +237,7 @@ def verify_installation(self, verify_commands: List[str]) -> Dict[str, bool]: return verification_results def get_summary(self) -> Dict[str, Any]: + """Return a JSON-serialisable summary of the full installation run.""" total_steps = len(self.steps) success_steps = sum(1 for s in self.steps if s.status == StepStatus.SUCCESS) failed_steps = sum(1 for s in self.steps if s.status == StepStatus.FAILED) @@ -240,12 +260,15 @@ def get_summary(self) -> Dict[str, Any]: ] } - def export_log(self, filepath: str): + def export_log(self, filepath: str) -> None: + """Persist the run summary to ``filepath`` in JSON format.""" with open(filepath, 'w', encoding='utf-8') as f: json.dump(self.get_summary(), f, indent=2) def install_docker() -> InstallationResult: + """Provision Docker using the coordinator's default behaviour.""" + commands = [ "apt update", "apt install -y apt-transport-https ca-certificates curl software-properties-common", diff --git a/test.md b/test.md new file mode 100644 index 0000000..a0ecc61 --- /dev/null +++ b/test.md @@ -0,0 +1,67 @@ +# Testing Strategy + +Cortex relies on a mix of fast unit tests and Docker-backed integration tests to +validate the full installation workflow. This guide explains how to run the +suites locally and in CI. + +## Test Suites + +| Suite | Location | Purpose | Invocation | +|-------|----------|---------|------------| +| Unit | `test/*.py` | Validate individual modules (CLI, coordinator, interpreter). | `python test/run_all_tests.py` | +| Integration | `test/integration/*.py` | Exercise end-to-end scenarios inside disposable Docker containers. | `python -m unittest test.integration.test_end_to_end` | + +## Running Tests Locally + +1. **Prepare the environment** + ```bash + python -m venv .venv + source .venv/bin/activate # Windows: .venv\Scripts\activate + pip install --upgrade pip + pip install -r LLM/requirements.txt + pip install -r src/requirements.txt + pip install -e . + ``` + +2. **Unit tests** + ```bash + python test/run_all_tests.py + ``` + Use the fake provider to avoid external API calls when necessary: + ```bash + CORTEX_PROVIDER=fake python test/run_all_tests.py + ``` + +3. **Integration tests** (requires Docker) + ```bash + python -m unittest test.integration.test_end_to_end + ``` + Customise the Docker image with `CORTEX_INTEGRATION_IMAGE` if you need a + different base image: + ```bash + CORTEX_INTEGRATION_IMAGE=python:3.12-slim python -m unittest test.integration.test_end_to_end + ``` + +## Continuous Integration Recommendations + +- Run unit tests on every pull request. +- Schedule integration tests nightly or on demand using a GitHub Actions job + with the `docker` service enabled. +- Fail the workflow if docstring coverage (tracked by CodeRabbit) drops below + 80%. +- Publish the HTML report from `python -m coverage html` when running coverage + builds to assist reviewers. + +## Troubleshooting + +- **Docker not available** – Integration tests are skipped automatically when + the Docker CLI is missing. Install Docker Desktop (macOS/Windows) or the + `docker` package (Linux) to enable them. +- **Missing API keys** – Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or + `KIMI_API_KEY` as appropriate. For offline development use + `CORTEX_PROVIDER=fake` plus optional `CORTEX_FAKE_COMMANDS`. +- **Docstring coverage failures** – Add module/class/function docstrings. The + CodeRabbit gate requires 80% coverage. + +By following this guide, contributors can quickly validate their changes and +ship reliable improvements to Cortex. diff --git a/test/integration/__init__.py b/test/integration/__init__.py new file mode 100644 index 0000000..4630c8a --- /dev/null +++ b/test/integration/__init__.py @@ -0,0 +1 @@ +"""Integration tests for Cortex executed against Docker-based environments.""" diff --git a/test/integration/docker_utils.py b/test/integration/docker_utils.py new file mode 100644 index 0000000..23426f2 --- /dev/null +++ b/test/integration/docker_utils.py @@ -0,0 +1,108 @@ +"""Helpers for running Cortex integration tests inside Docker containers.""" + +from __future__ import annotations + +import os +import shutil +import subprocess +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Tuple + + +@dataclass +class DockerRunResult: + """Container execution result metadata.""" + + returncode: int + stdout: str + stderr: str + + def succeeded(self) -> bool: + """Return ``True`` when the container exited successfully.""" + return self.returncode == 0 + + +def docker_available() -> bool: + """Return ``True`` when the Docker client is available on the host.""" + + docker_path = shutil.which("docker") + if not docker_path: + return False + + try: + subprocess.run( + [docker_path, "--version"], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + timeout=5, + ) + subprocess.run( + [docker_path, "info"], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + timeout=5, + ) + return True + except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError, OSError): + return False + + +def run_in_docker( + image: str, + command: str, + *, + env: Optional[Dict[str, str]] = None, + mounts: Optional[Iterable[Tuple[Path, str]]] = None, + workdir: str = "/workspace", + timeout: int = 300, +) -> DockerRunResult: + """Run ``command`` inside the specified Docker ``image``. + + Parameters + ---------- + image: + Docker image tag to use. + command: + Shell command executed via ``bash -lc`` inside the container. + env: + Optional environment variables exported inside the container. + mounts: + Iterable of host ``Path`` instances mounted read-only to the same + location within the container. + workdir: + Working directory set inside the container. + timeout: + Maximum run time in seconds before raising ``TimeoutExpired``. + """ + + docker_cmd: List[str] = ["docker", "run", "--rm"] + + for key, value in (env or {}).items(): + docker_cmd.extend(["-e", f"{key}={value}"]) + + for host_path, container_path in mounts or []: + docker_cmd.extend([ + "-v", + f"{str(host_path.resolve())}:{container_path}", + ]) + + docker_cmd.extend(["-w", workdir]) + + docker_cmd.append(image) + docker_cmd.extend(["bash", "-lc", command]) + + result = subprocess.run( + docker_cmd, + check=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + timeout=timeout, + ) + + return DockerRunResult(result.returncode, result.stdout, result.stderr) diff --git a/test/integration/test_end_to_end.py b/test/integration/test_end_to_end.py new file mode 100644 index 0000000..f48212e --- /dev/null +++ b/test/integration/test_end_to_end.py @@ -0,0 +1,114 @@ +"""Docker-backed integration tests that exercise Cortex end-to-end flows.""" + +from __future__ import annotations + +import json +import os +import unittest +from pathlib import Path + +from .docker_utils import docker_available, run_in_docker + +REPO_ROOT = Path(__file__).resolve().parents[2] +DEFAULT_IMAGE = os.environ.get("CORTEX_INTEGRATION_IMAGE", "python:3.11-slim") +MOUNT = (REPO_ROOT, "/workspace") +BASE_ENV = { + "PYTHONUNBUFFERED": "1", + "PYTHONPATH": "/workspace", + "PYTHONDONTWRITEBYTECODE": "1", +} +PIP_BOOTSTRAP = "python -m pip install --quiet requests" + + +@unittest.skipUnless(docker_available(), "Docker is required for integration tests") +class TestEndToEndWorkflows(unittest.TestCase): + """Run Cortex commands inside disposable Docker containers.""" + + def _run(self, command: str, env: dict | None = None): + effective_env = dict(BASE_ENV) + if env: + effective_env.update(env) + return run_in_docker( + DEFAULT_IMAGE, + f"{PIP_BOOTSTRAP} && {command}", + env=effective_env, + mounts=[MOUNT], + workdir="/workspace", + ) + + def test_cli_help_executes(self): + """`cortex --help` should run successfully in a clean container.""" + + result = self._run("python -m cortex.cli --help") + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("AI-powered Linux command interpreter", result.stdout) + + def test_cli_dry_run_with_fake_provider(self): + """Dry-run installations rely on the fake provider and skip API calls.""" + + fake_commands = json.dumps({ + "commands": [ + "echo Step 1", + "echo Step 2", + ] + }) + env = { + "CORTEX_PROVIDER": "fake", + "CORTEX_FAKE_COMMANDS": fake_commands, + } + result = self._run("python -m cortex.cli install docker --dry-run", env=env) + + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("Generated commands", result.stdout) + self.assertIn("echo Step 1", result.stdout) + + def test_cli_execute_with_fake_provider(self): + """Execution mode should run fake commands without touching the host.""" + + fake_commands = json.dumps({ + "commands": [ + "echo Exec Step 1", + "echo Exec Step 2", + ] + }) + env = { + "CORTEX_PROVIDER": "fake", + "CORTEX_FAKE_COMMANDS": fake_commands, + } + result = self._run("python -m cortex.cli install docker --execute", env=env) + + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("[SUCCESS] docker installed successfully!", result.stdout) + + def test_coordinator_executes_in_container(self): + """InstallationCoordinator should execute simple commands inside Docker.""" + + script = ( + "python - <<'PY'\n" + "from cortex.coordinator import InstallationCoordinator\n" + "plan = InstallationCoordinator(['echo coordinator step'])\n" + "result = plan.execute()\n" + "assert result.success\n" + "print('STEPS', len(result.steps))\n" + "PY" + ) + result = self._run(script) + + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("STEPS 1", result.stdout) + + def test_project_tests_run_inside_container(self): + """The unified test runner should pass within the container.""" + + env = { + "CORTEX_PROVIDER": "fake", + "CORTEX_FAKE_COMMANDS": json.dumps({"commands": ["echo plan"]}), + } + result = self._run("python test/run_all_tests.py", env=env) + + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("OK", result.stdout.splitlines()[-1]) + + +if __name__ == "__main__": # pragma: no cover + unittest.main() diff --git a/test/run_all_tests.py b/test/run_all_tests.py new file mode 100644 index 0000000..6c45961 --- /dev/null +++ b/test/run_all_tests.py @@ -0,0 +1,37 @@ +"""Unified test runner that discovers unit and integration suites.""" + +from __future__ import annotations + +import argparse +import os +import sys +import unittest + + +def discover_tests(pattern: str = "test_*.py") -> unittest.TestSuite: + """Discover tests starting from the repository's ``test`` directory.""" + + start_dir = os.path.dirname(__file__) + loader = unittest.TestLoader() + return loader.discover(start_dir=start_dir, pattern=pattern) + + +def main(argv: list[str] | None = None) -> int: + """Execute all test suites and return the exit code.""" + + parser = argparse.ArgumentParser(description="Run Cortex unit/integration tests") + parser.add_argument( + "--pattern", + default="test_*.py", + help="Glob pattern used for discovery (defaults to test_*.py)", + ) + args = parser.parse_args(argv) + + suite = discover_tests(pattern=args.pattern) + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + return 0 if result.wasSuccessful() else 1 + + +if __name__ == "__main__": # pragma: no cover + sys.exit(main()) \ No newline at end of file diff --git a/test/test_cli.py b/test/test_cli.py new file mode 100644 index 0000000..bfdb1b0 --- /dev/null +++ b/test/test_cli.py @@ -0,0 +1,243 @@ +import os +import sys +import unittest +from unittest.mock import Mock, patch + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from cortex.cli import CortexCLI, main + + +class TestCortexCLI(unittest.TestCase): + """Unit tests covering the high-level CLI behaviours.""" + + def setUp(self) -> None: + self.cli = CortexCLI() + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + def test_get_api_key_openai(self) -> None: + api_key = self.cli._get_api_key('openai') + self.assertEqual(api_key, 'test-key') + + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key'}, clear=True) + def test_get_api_key_claude(self) -> None: + api_key = self.cli._get_api_key('claude') + self.assertEqual(api_key, 'test-claude-key') + + @patch.dict(os.environ, {'KIMI_API_KEY': 'kimi-key'}, clear=True) + def test_get_api_key_kimi(self) -> None: + api_key = self.cli._get_api_key('kimi') + self.assertEqual(api_key, 'kimi-key') + + @patch.dict(os.environ, {}, clear=True) + @patch('sys.stderr') + def test_get_api_key_not_found(self, mock_stderr) -> None: + api_key = self.cli._get_api_key('openai') + self.assertIsNone(api_key) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + def test_get_provider_openai(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, 'openai') + + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-key'}, clear=True) + def test_get_provider_claude(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, 'claude') + + @patch.dict(os.environ, {'KIMI_API_KEY': 'kimi'}, clear=True) + def test_get_provider_kimi(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, 'kimi') + + @patch.dict(os.environ, {'CORTEX_PROVIDER': 'fake'}, clear=True) + def test_get_provider_override(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, 'fake') + + @patch('sys.stdout') + def test_print_status(self, mock_stdout) -> None: + self.cli._print_status('[INFO]', 'Test message') + self.assertTrue(mock_stdout.write.called or print) + + @patch('sys.stderr') + def test_print_error(self, mock_stderr) -> None: + self.cli._print_error('Test error') + self.assertTrue(mock_stderr.write.called) + + @patch('sys.stdout') + def test_print_success(self, mock_stdout) -> None: + self.cli._print_success('Test success') + self.assertTrue(mock_stdout.write.called) + + @patch.dict(os.environ, {}, clear=True) + def test_install_no_api_key(self) -> None: + result = self.cli.install('docker') + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'CORTEX_PROVIDER': 'fake', 'CORTEX_FAKE_COMMANDS': ''}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_fake_provider_skips_api_key(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ['echo test'] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker') + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once_with('install docker') + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_dry_run(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ['apt update', 'apt install docker'] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker', dry_run=True) + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once_with('install docker') + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_no_execute(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ['apt update', 'apt install docker'] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker', execute=False) + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once() + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + @patch('cortex.cli.InstallationCoordinator') + def test_install_with_execute_success(self, mock_coordinator_class, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ['echo test'] + mock_interpreter_class.return_value = mock_interpreter + + mock_coordinator = Mock() + mock_result = Mock() + mock_result.success = True + mock_result.total_duration = 1.5 + mock_coordinator.execute.return_value = mock_result + mock_coordinator_class.return_value = mock_coordinator + + result = self.cli.install('docker', execute=True) + + self.assertEqual(result, 0) + mock_coordinator.execute.assert_called_once() + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + @patch('cortex.cli.InstallationCoordinator') + def test_install_with_execute_failure(self, mock_coordinator_class, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ['invalid command'] + mock_interpreter_class.return_value = mock_interpreter + + mock_coordinator = Mock() + mock_result = Mock() + mock_result.success = False + mock_result.failed_step = 0 + mock_result.error_message = 'command not found' + mock_coordinator.execute.return_value = mock_result + mock_coordinator_class.return_value = mock_coordinator + + result = self.cli.install('docker', execute=True) + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_no_commands_generated(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = [] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker') + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_value_error(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = ValueError('Invalid input') + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker') + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_runtime_error(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = RuntimeError('API failed') + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker') + + self.assertEqual(result, 1) + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_unexpected_error(self, mock_interpreter_class) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = Exception('Unexpected') + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker') + + self.assertEqual(result, 1) + + @patch('sys.argv', ['cortex']) + def test_main_no_command(self) -> None: + result = main() + self.assertEqual(result, 1) + + @patch('sys.argv', ['cortex', '--test']) + @patch('cortex.cli.subprocess.run') + def test_main_test_flag(self, mock_run) -> None: + mock_run.return_value.returncode = 0 + with patch('os.path.exists', return_value=True): + result = main() + self.assertEqual(result, 0) + mock_run.assert_called_once() + + @patch('sys.argv', ['cortex', 'install', 'docker']) + @patch('cortex.cli.CortexCLI.install') + def test_main_install_command(self, mock_install) -> None: + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with('docker', execute=False, dry_run=False) + + @patch('sys.argv', ['cortex', 'install', 'docker', '--execute']) + @patch('cortex.cli.CortexCLI.install') + def test_main_install_with_execute(self, mock_install) -> None: + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with('docker', execute=True, dry_run=False) + + @patch('sys.argv', ['cortex', 'install', 'docker', '--dry-run']) + @patch('cortex.cli.CortexCLI.install') + def test_main_install_with_dry_run(self, mock_install) -> None: + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with('docker', execute=False, dry_run=True) + + def test_spinner_animation(self) -> None: + initial_idx = self.cli.spinner_idx + self.cli._animate_spinner('Testing') + self.assertNotEqual(self.cli.spinner_idx, initial_idx) + + +if __name__ == '__main__': + unittest.main() From 27374195c1b59b6aa09b2893f8e90403994f45d5 Mon Sep 17 00:00:00 2001 From: sahil Date: Tue, 18 Nov 2025 19:28:29 +0530 Subject: [PATCH 07/12] Add Kimi provider and integration tests issue cortexlinux#40 --- LLM/interpreter.py | 39 ++++- LLM/test_interpreter.py | 33 ++++ cortex/cli.py | 324 ++++++++++++++++++++++++++++++++++++---- cortex/test_cli.py | 28 +++- 4 files changed, 386 insertions(+), 38 deletions(-) diff --git a/LLM/interpreter.py b/LLM/interpreter.py index d9907ca..9c499d3 100644 --- a/LLM/interpreter.py +++ b/LLM/interpreter.py @@ -12,6 +12,7 @@ class APIProvider(Enum): CLAUDE = "claude" OPENAI = "openai" KIMI = "kimi" + GROQ = "groq" FAKE = "fake" @@ -42,6 +43,14 @@ def _initialize_client(self): self.client = OpenAI(api_key=self.api_key) except ImportError: raise ImportError("OpenAI package not installed. Run: pip install openai") + elif self.provider == APIProvider.GROQ: + try: + from openai import OpenAI + base_url = os.environ.get("GROQ_BASE_URL", "https://api.groq.com/openai/v1") + self.client = OpenAI(api_key=self.api_key, base_url=base_url) + self._groq_base_url = base_url + except ImportError: + raise ImportError("OpenAI package not installed. Run: pip install openai") elif self.provider == APIProvider.CLAUDE: try: from anthropic import Anthropic @@ -133,7 +142,8 @@ def _call_kimi(self, user_input: str) -> List[str]: } try: - response = self.client.post( + import requests + response = requests.post( f"{self._kimi_base_url.rstrip('/')}/v1/chat/completions", headers=headers, json=payload, @@ -148,6 +158,8 @@ def _call_kimi(self, user_input: str) -> List[str]: if not content: raise RuntimeError("Kimi API returned empty content") return self._parse_commands(content) + except ImportError as ie: + raise RuntimeError("Requests package not installed. Run: pip install requests") from ie except Exception as exc: raise RuntimeError(f"Kimi API call failed: {str(exc)}") from exc @@ -232,6 +244,8 @@ def parse(self, user_input: str, validate: bool = True) -> List[str]: commands = self._call_claude(user_input) elif self.provider == APIProvider.KIMI: commands = self._call_kimi(user_input) + elif self.provider == APIProvider.GROQ: + commands = self._call_groq(user_input) elif self.provider == APIProvider.FAKE: commands = self._call_fake(user_input) else: @@ -260,9 +274,30 @@ def _default_model(self) -> str: """Return the default model identifier for the active provider.""" if self.provider == APIProvider.OPENAI: - return "gpt-4" + return "gpt-4o" + if self.provider == APIProvider.GROQ: + return os.environ.get("GROQ_DEFAULT_MODEL", "llama-3.3-70b-versatile") if self.provider == APIProvider.CLAUDE: return "claude-3-5-sonnet-20241022" if self.provider == APIProvider.KIMI: return os.environ.get("KIMI_DEFAULT_MODEL", "kimi-k2") return "fake-local-model" + + def _call_groq(self, user_input: str) -> List[str]: + """Call the Groq OpenAI-compatible endpoint and parse the response.""" + + try: + response = self.client.chat.completions.create( + model=self.model, + messages=[ + {"role": "system", "content": self._get_system_prompt()}, + {"role": "user", "content": user_input} + ], + temperature=0.3, + max_tokens=1000 + ) + + content = response.choices[0].message.content.strip() + return self._parse_commands(content) + except Exception as exc: + raise RuntimeError(f"Groq API call failed: {str(exc)}") from exc diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index eeb17d5..4da711a 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -30,6 +30,14 @@ def test_initialization_openai(self, mock_openai): self.assertEqual(interpreter.model, "gpt-4") mock_openai.assert_called_once_with(api_key=self.api_key) + @patch.dict(os.environ, {'GROQ_BASE_URL': 'https://example.com/api'}, clear=True) + @patch('openai.OpenAI') + def test_initialization_groq(self, mock_openai): + interpreter = CommandInterpreter(api_key=self.api_key, provider="groq") + self.assertEqual(interpreter.provider, APIProvider.GROQ) + self.assertEqual(interpreter.model, "llama-3.3-70b-versatile") + mock_openai.assert_called_once_with(api_key=self.api_key, base_url='https://example.com/api') + @patch('anthropic.Anthropic') def test_initialization_claude(self, mock_anthropic): interpreter = CommandInterpreter(api_key=self.api_key, provider="claude") @@ -125,6 +133,31 @@ def test_call_openai_failure(self, mock_openai): with self.assertRaises(RuntimeError): interpreter._call_openai("install docker") + + @patch('openai.OpenAI') + def test_call_groq_success(self, mock_openai): + mock_client = Mock() + mock_response = Mock() + mock_response.choices = [Mock()] + mock_response.choices[0].message.content = '{"commands": ["apt update"]}' + mock_client.chat.completions.create.return_value = mock_response + + interpreter = CommandInterpreter(api_key=self.api_key, provider="groq") + interpreter.client = mock_client + + result = interpreter._call_groq("install docker") + self.assertEqual(result, ["apt update"]) + + @patch('openai.OpenAI') + def test_call_groq_failure(self, mock_openai): + mock_client = Mock() + mock_client.chat.completions.create.side_effect = Exception("API Error") + + interpreter = CommandInterpreter(api_key=self.api_key, provider="groq") + interpreter.client = mock_client + + with self.assertRaises(RuntimeError): + interpreter._call_groq("install docker") @patch('anthropic.Anthropic') def test_call_claude_success(self, mock_anthropic): diff --git a/cortex/cli.py b/cortex/cli.py index 6bdb22f..be20516 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -1,16 +1,23 @@ """Command-line interface entry point for the Cortex automation toolkit.""" import argparse +import json import os import subprocess import sys import time -from typing import Optional +from typing import List, Optional +from getpass import getpass +from pathlib import Path from LLM.interpreter import CommandInterpreter from cortex.coordinator import InstallationCoordinator, StepStatus +class ProviderExecutionError(RuntimeError): + """Raised when an API provider fails in a recoverable way.""" + + class CortexCLI: """Command-line interface for Cortex AI-powered software installation.""" @@ -19,39 +26,107 @@ def __init__(self) -> None: self.spinner_chars = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'] self.spinner_idx = 0 + # ----------------------- + # Credential persistence + # ----------------------- + def _cred_path(self) -> Path: + """Return the path to the user credentials file ("~/.cortex/credentials.json").""" + base = Path.home() / ".cortex" + base.mkdir(parents=True, exist_ok=True) + return base / "credentials.json" + + def _load_creds(self) -> dict: + """Load persisted credentials if present; return an empty dict on first use.""" + try: + p = self._cred_path() + if p.exists(): + return json.loads(p.read_text(encoding="utf-8")) + except Exception: + pass + return {} + + def _save_api_key(self, provider: str, key: str) -> None: + """Persist API key for ``provider`` into the credentials file.""" + data = self._load_creds() + data.setdefault("providers", {})[provider] = {"api_key": key} + self._cred_path().write_text(json.dumps(data, indent=2), encoding="utf-8") + self._print_success(f"Saved {provider} API key to ~/.cortex/credentials.json") + + def _provider_priority(self) -> List[str]: + """Return providers in the order we auto-select them when no override is given.""" + return ['groq', 'openai', 'claude', 'kimi', 'fake'] + def _get_provider(self) -> str: """Detect which LLM provider to use based on configuration and credentials.""" provider_override = os.environ.get('CORTEX_PROVIDER') if provider_override: return provider_override.lower() - if os.environ.get('OPENAI_API_KEY'): - return 'openai' - if os.environ.get('ANTHROPIC_API_KEY'): - return 'claude' - if os.environ.get('KIMI_API_KEY'): - return 'kimi' - if os.environ.get('CORTEX_FAKE_COMMANDS'): - return 'fake' + for provider in self._provider_priority(): + if self._provider_has_credentials(provider): + return provider + return 'openai' - def _get_api_key(self, provider: str) -> Optional[str]: - """Return the API key for the specified provider or emit guidance if missing.""" - env_map = { + def _env_var_for(self, provider: str) -> Optional[str]: + """Return the environment variable name for the given provider.""" + return { + 'groq': 'GROQ_API_KEY', 'openai': 'OPENAI_API_KEY', 'claude': 'ANTHROPIC_API_KEY', 'kimi': 'KIMI_API_KEY', - } + }.get(provider) - env_var = env_map.get(provider) + def _provider_has_credentials(self, provider: str) -> bool: + """Return ``True`` if we can find credentials for the provider.""" + if provider == 'fake': + return bool(os.environ.get('CORTEX_FAKE_COMMANDS')) + + env_var = self._env_var_for(provider) + if env_var and os.environ.get(env_var): + return True + + creds = self._load_creds().get('providers', {}) + return bool(creds.get(provider, {}).get('api_key')) + + def _provider_candidates(self, primary: str) -> List[str]: + """Return a deduplicated provider attempt order for fallback handling.""" + ordered = [primary] + [p for p in self._provider_priority() if p != primary] + candidates: List[str] = [] + for provider in ordered: + if provider in candidates: + continue + if self._provider_has_credentials(provider): + candidates.append(provider) + + if not candidates: + candidates.append(primary) + return candidates + + def _get_api_key(self, provider: str, silent: bool = False) -> Optional[str]: + """Return the API key for the specified provider or emit guidance if missing. + + Order of resolution: + 1. Environment variables (session overrides) + 2. Persisted credentials in ``~/.cortex/credentials.json`` + """ + env_var = self._env_var_for(provider) if not env_var: return None api_key = os.environ.get(env_var) - if not api_key: - self._print_error(f"API key not found. Set {env_var} environment variable.") - return None - return api_key + if api_key: + return api_key + + # Fallback to persisted credentials + creds = self._load_creds() + key = creds.get("providers", {}).get(provider, {}).get("api_key") + if key: + return key + + if not silent: + self._print_error(f"API key not found. Set {env_var} or run 'cortex --set-{provider}'.") + return None def _print_status(self, label: str, message: str) -> None: """Emit informational output with a consistent status label.""" @@ -78,16 +153,48 @@ def _clear_line(self) -> None: sys.stdout.flush() def install(self, software: str, execute: bool = False, dry_run: bool = False) -> int: - """Interpret a natural-language request and optionally execute the plan.""" - - provider = self._get_provider() + """Interpret a natural-language request and optionally execute the plan with fallback.""" + + primary_provider = self._get_provider() + candidates = self._provider_candidates(primary_provider) + failure_messages: List[str] = [] + + for index, provider in enumerate(candidates): + is_fake = provider == 'fake' + if is_fake: + api_key = os.environ.get('CORTEX_FAKE_API_KEY', 'fake-api-key') + else: + api_key = self._get_api_key(provider, silent=index != 0) + if not api_key: + failure_messages.append(f"{provider}: missing API key") + continue + + if provider != primary_provider: + self._print_status("[INFO]", f"Switching to {provider} provider...") + + try: + return self._install_with_provider(provider, api_key, software, execute, dry_run) + except ProviderExecutionError as exc: + failure_messages.append(f"{provider}: {str(exc)}") + self._print_error(f"{provider} provider failed. Trying next option...") + continue + + self._print_error("All configured providers failed. Please verify your API keys and try again.") + if failure_messages: + print("Details:", file=sys.stderr) + for message in failure_messages: + print(f" - {message}", file=sys.stderr) + return 1 - if provider == 'fake': - api_key = os.environ.get('CORTEX_FAKE_API_KEY', 'fake-api-key') - else: - api_key = self._get_api_key(provider) - if not api_key: - return 1 + def _install_with_provider( + self, + provider: str, + api_key: str, + software: str, + execute: bool, + dry_run: bool, + ) -> int: + """Execute the install flow against a specific provider.""" try: self._print_status("[INFO]", "Understanding request...") @@ -155,15 +262,120 @@ def progress_callback(current: int, total: int, step) -> None: return 0 except ValueError as exc: - self._print_error(str(exc)) - return 1 + raise ProviderExecutionError(str(exc)) from exc except RuntimeError as exc: - self._print_error(f"API call failed: {str(exc)}") + raise ProviderExecutionError(f"API call failed: {str(exc)}") from exc + except Exception as exc: + raise ProviderExecutionError(f"Unexpected error: {str(exc)}") from exc + + # ----------------------- + # Support commands + # ----------------------- + def set_api_key(self, provider: str) -> int: + """Prompt user for API key and persist it for the selected provider.""" + names = { + 'groq': 'Groq', + 'openai': 'OpenAI', + 'claude': 'Anthropic Claude', + 'kimi': 'Moonshot Kimi', + } + pretty = names.get(provider, provider.capitalize()) + print(f"Enter {pretty} API key (input hidden): ", end="", flush=True) + key = getpass("") + if not key: + self._print_error("No key entered. Nothing changed.") return 1 + self._save_api_key(provider, key) + # Also set in current process so it works immediately + env_map = { + 'openai': 'OPENAI_API_KEY', + 'claude': 'ANTHROPIC_API_KEY', + 'kimi': 'KIMI_API_KEY', + 'openrouter': 'OPENROUTER_API_KEY', + } + env_var = env_map.get(provider) + if env_var: + os.environ[env_var] = key + self._print_success(f"{pretty} key is now active in this session.") + print("Tip: Run 'cortex --test-api' to verify connectivity.") + return 0 + + def test_api(self, provider: Optional[str] = None) -> int: + """Validate API connectivity and provide simple, human-readable feedback.""" + import requests + + prov = provider or self._get_provider() + if prov == 'fake': + print("[WARN] No real provider configured. Set a key or use --test-fake.") + return 1 + key = self._get_api_key(prov) + if not key: + return 1 + + try: + if prov == 'openai': + url = 'https://api.openai.com/v1/models' + headers = {"Authorization": f"Bearer {key}"} + elif prov == 'claude': + url = 'https://api.anthropic.com/v1/models' + headers = {"x-api-key": key, "anthropic-version": "2023-06-01"} + elif prov == 'kimi': + base = os.environ.get('KIMI_API_BASE_URL', 'https://api.moonshot.cn') + url = f"{base.rstrip('/')}/v1/models" + headers = {"Authorization": f"Bearer {key}"} + elif prov == 'groq': + base = os.environ.get('GROQ_BASE_URL', 'https://api.groq.com/openai/v1') + url = f"{base.rstrip('/')}/models" + headers = {"Authorization": f"Bearer {key}"} + else: + self._print_error("Unknown provider for API test.") + return 1 + + resp = requests.get(url, headers=headers, timeout=10) + if resp.status_code == 200: + data = resp.json() + # Try to pull a few model ids if present + ids = [] + if isinstance(data, dict) and isinstance(data.get('data'), list): + ids = [it.get('id') for it in data['data'] if isinstance(it, dict) and 'id' in it][:3] + models = (", ".join([m for m in ids if m])) or "(model list available)" + self._print_success(f"API connection OK ({prov}). Example models: {models}") + return 0 + + if resp.status_code in (401, 403): + self._print_error("Invalid or unauthorized API key. Please re-check your key.") + print("Hint: Reset it via 'cortex --set-gpt' / '--set-claude' / '--set-kimi'.") + return 1 + if resp.status_code == 429: + self._print_error("Rate limited. You might be out of credits or hitting limits.") + print("Try again later or check your account usage.") + return 1 + + msg = resp.text.strip() + self._print_error(f"Service returned {resp.status_code}. Details: {msg[:200]}") + return 1 + except Exception as exc: - self._print_error(f"Unexpected error: {str(exc)}") + self._print_error(f"Network check failed: {str(exc)}") + print("Please verify your internet connection and firewall settings.") return 1 + def test_fake(self) -> int: + """Run a tiny fake-provider demo with clear warnings for end users.""" + print("[WARN] Running in TEST MODE with a fake AI provider.") + print("No real API keys are needed, and no changes are made.") + os.environ['CORTEX_PROVIDER'] = 'fake' + # Prefer a deterministic fake output if none provided + os.environ.setdefault('CORTEX_FAKE_COMMANDS', json.dumps({ + "commands": [ + "echo Checking system...", + "echo Simulating install...", + "echo All good!" + ] + })) + # Show a single dry-run flow to keep it simple + return self.install("docker", execute=False, dry_run=True) + def main() -> int: """Entry point for the cortex CLI command.""" @@ -181,14 +393,22 @@ def main() -> int: cortex --test Environment Variables: + GROQ_API_KEY Groq API key for Llama models OPENAI_API_KEY OpenAI API key for GPT models ANTHROPIC_API_KEY Anthropic API key for Claude models KIMI_API_KEY Moonshot Kimi API key for K2 models - CORTEX_PROVIDER Optional override (openai|claude|kimi|fake) + CORTEX_PROVIDER Optional override (groq|openai|claude|kimi|fake) """ ) - parser.add_argument('--test', action='store_true', help='Run all test suites') + parser.add_argument('--test', action='store_true', help='Run all developer test suites') + parser.add_argument('--test-api', action='store_true', help='Test API connection for your configured provider') + parser.add_argument('--provider', choices=['groq','openai','claude','kimi','fake','all'], help='Provider to use for this command or for --test-api (use "all" only with --test-api)') + parser.add_argument('--test-fake', action='store_true', help='Quick check without API keys (uses fake commands)') + parser.add_argument('--set-gpt', action='store_true', help='Set your OpenAI API key') + parser.add_argument('--set-claude', dest='set_claude', action='store_true', help='Set your Anthropic Claude API key') + parser.add_argument('--set-kimi', action='store_true', help='Set your Moonshot Kimi API key') + parser.add_argument('--set-groq', action='store_true', help='Set your Groq API key') subparsers = parser.add_subparsers(dest='command', help='Available commands') @@ -199,6 +419,44 @@ def main() -> int: args = parser.parse_args() + if args.set_gpt: + return CortexCLI().set_api_key('openai') + + if getattr(args, 'set_claude', False): + return CortexCLI().set_api_key('claude') + + if args.set_kimi: + return CortexCLI().set_api_key('kimi') + + if args.set_groq: + return CortexCLI().set_api_key('groq') + + # If user supplied a provider on the CLI (e.g. `--provider groq`), make it + # active for this invocation. The special value `all` is only meaningful + # when used with `--test-api` and is not applied as an active provider. + if args.provider and args.provider != 'all': + os.environ['CORTEX_PROVIDER'] = args.provider + + if args.test_api: + cli = CortexCLI() + if args.provider == 'all': + any_success = False + for prov in cli._provider_priority(): + if prov == 'fake': + continue + if not cli._provider_has_credentials(prov): + continue + print(f"\n[CHECK] Testing {prov}...") + rc = cli.test_api(prov) + any_success = any_success or (rc == 0) + return 0 if any_success else 1 + if args.provider: + return cli.test_api(args.provider) + return cli.test_api() + + if args.test_fake: + return CortexCLI().test_fake() + if args.test: test_dir = os.path.join(os.path.dirname(__file__), '..', 'test') test_runner = os.path.join(test_dir, 'run_all_tests.py') diff --git a/cortex/test_cli.py b/cortex/test_cli.py index 635ad06..7515d90 100644 --- a/cortex/test_cli.py +++ b/cortex/test_cli.py @@ -12,27 +12,49 @@ class TestCortexCLI(unittest.TestCase): def setUp(self): self.cli = CortexCLI() + self.creds_patcher = patch.object(CortexCLI, '_load_creds', return_value={}) + self.creds_patcher.start() + self.addCleanup(self.creds_patcher.stop) @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) def test_get_api_key_openai(self): - api_key = self.cli._get_api_key() + api_key = self.cli._get_api_key('openai') self.assertEqual(api_key, 'test-key') @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key', 'OPENAI_API_KEY': ''}, clear=True) def test_get_api_key_claude(self): - api_key = self.cli._get_api_key() + api_key = self.cli._get_api_key('claude') self.assertEqual(api_key, 'test-claude-key') @patch.dict(os.environ, {}, clear=True) @patch('sys.stderr') def test_get_api_key_not_found(self, mock_stderr): - api_key = self.cli._get_api_key() + api_key = self.cli._get_api_key('openai') self.assertIsNone(api_key) + + @patch.dict(os.environ, {'GROQ_API_KEY': 'groq-key'}, clear=True) + def test_get_provider_groq(self): + provider = self.cli._get_provider() + self.assertEqual(provider, 'groq') @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) def test_get_provider_openai(self): provider = self.cli._get_provider() self.assertEqual(provider, 'openai') + + @patch.dict(os.environ, {'OPENAI_API_KEY': 'first', 'ANTHROPIC_API_KEY': 'second'}, clear=True) + @patch('cortex.cli.CommandInterpreter') + def test_install_fallback_to_second_provider(self, mock_interpreter_class): + first_instance = Mock() + first_instance.parse.side_effect = RuntimeError("OpenAI outage") + second_instance = Mock() + second_instance.parse.return_value = ["echo ok"] + mock_interpreter_class.side_effect = [first_instance, second_instance] + + result = self.cli.install("docker", dry_run=True) + + self.assertEqual(result, 0) + self.assertEqual(mock_interpreter_class.call_count, 2) @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-key'}, clear=True) def test_get_provider_claude(self): From 130df1a9de7b60f3d44901458ebf511d52247856 Mon Sep 17 00:00:00 2001 From: sahil Date: Tue, 2 Dec 2025 19:44:44 +0530 Subject: [PATCH 08/12] feat: Implement Kimi K2 API integration - Fixes #40 --- LLM/interpreter.py | 36 +-- LLM/requirements.txt | 1 + LLM/test_interpreter.py | 31 +-- cortex/cli.py | 49 +++- cortex/test_cli.py | 228 --------------- cortex/test_coordinator.py | 353 ------------------------ docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md | 237 ++++++++++++++++ setup.py | 8 +- test/test_cli.py | 38 ++- test/test_coordinator.py | 33 +-- 10 files changed, 326 insertions(+), 688 deletions(-) delete mode 100644 cortex/test_cli.py delete mode 100644 cortex/test_coordinator.py create mode 100644 docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md diff --git a/LLM/interpreter.py b/LLM/interpreter.py index 9c499d3..09d3b93 100644 --- a/LLM/interpreter.py +++ b/LLM/interpreter.py @@ -12,7 +12,6 @@ class APIProvider(Enum): CLAUDE = "claude" OPENAI = "openai" KIMI = "kimi" - GROQ = "groq" FAKE = "fake" @@ -43,14 +42,6 @@ def _initialize_client(self): self.client = OpenAI(api_key=self.api_key) except ImportError: raise ImportError("OpenAI package not installed. Run: pip install openai") - elif self.provider == APIProvider.GROQ: - try: - from openai import OpenAI - base_url = os.environ.get("GROQ_BASE_URL", "https://api.groq.com/openai/v1") - self.client = OpenAI(api_key=self.api_key, base_url=base_url) - self._groq_base_url = base_url - except ImportError: - raise ImportError("OpenAI package not installed. Run: pip install openai") elif self.provider == APIProvider.CLAUDE: try: from anthropic import Anthropic @@ -64,7 +55,7 @@ def _initialize_client(self): raise ImportError("Requests package not installed. Run: pip install requests") from exc self.client = requests - self._kimi_base_url = os.environ.get("KIMI_API_BASE_URL", "https://api.moonshot.cn") + self._kimi_base_url = os.environ.get("KIMI_API_BASE_URL", "https://api.moonshot.ai") elif self.provider == APIProvider.FAKE: # Fake provider is used for deterministic offline or integration tests. self.client = None @@ -244,8 +235,6 @@ def parse(self, user_input: str, validate: bool = True) -> List[str]: commands = self._call_claude(user_input) elif self.provider == APIProvider.KIMI: commands = self._call_kimi(user_input) - elif self.provider == APIProvider.GROQ: - commands = self._call_groq(user_input) elif self.provider == APIProvider.FAKE: commands = self._call_fake(user_input) else: @@ -275,29 +264,8 @@ def _default_model(self) -> str: if self.provider == APIProvider.OPENAI: return "gpt-4o" - if self.provider == APIProvider.GROQ: - return os.environ.get("GROQ_DEFAULT_MODEL", "llama-3.3-70b-versatile") if self.provider == APIProvider.CLAUDE: return "claude-3-5-sonnet-20241022" if self.provider == APIProvider.KIMI: - return os.environ.get("KIMI_DEFAULT_MODEL", "kimi-k2") + return os.environ.get("KIMI_DEFAULT_MODEL", "kimi-k2-turbo-preview") return "fake-local-model" - - def _call_groq(self, user_input: str) -> List[str]: - """Call the Groq OpenAI-compatible endpoint and parse the response.""" - - try: - response = self.client.chat.completions.create( - model=self.model, - messages=[ - {"role": "system", "content": self._get_system_prompt()}, - {"role": "user", "content": user_input} - ], - temperature=0.3, - max_tokens=1000 - ) - - content = response.choices[0].message.content.strip() - return self._parse_commands(content) - except Exception as exc: - raise RuntimeError(f"Groq API call failed: {str(exc)}") from exc diff --git a/LLM/requirements.txt b/LLM/requirements.txt index 9417894..1eea4fc 100644 --- a/LLM/requirements.txt +++ b/LLM/requirements.txt @@ -1,3 +1,4 @@ openai>=1.0.0 anthropic>=0.18.0 PyYAML>=6.0 +requests>=2.32.4 diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index 6df615f..6c66f98 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -1,6 +1,9 @@ import json -import sys import os +import sys +import unittest +from types import SimpleNamespace +from unittest.mock import Mock, patch sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) @@ -24,7 +27,7 @@ def setUp(self): def test_initialization_openai(self, mock_openai): interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") self.assertEqual(interpreter.provider, APIProvider.OPENAI) - self.assertEqual(interpreter.model, "gpt-4") + self.assertEqual(interpreter.model, "gpt-4o") mock_openai.assert_called_once_with(api_key=self.api_key) @patch('anthropic.Anthropic') @@ -124,30 +127,6 @@ def test_call_openai_failure(self, mock_openai): interpreter._call_openai("install docker") @patch('openai.OpenAI') - def test_call_groq_success(self, mock_openai): - mock_client = Mock() - mock_response = Mock() - mock_response.choices = [Mock()] - mock_response.choices[0].message.content = '{"commands": ["apt update"]}' - mock_client.chat.completions.create.return_value = mock_response - - interpreter = CommandInterpreter(api_key=self.api_key, provider="groq") - interpreter.client = mock_client - - result = interpreter._call_groq("install docker") - self.assertEqual(result, ["apt update"]) - - @patch('openai.OpenAI') - def test_call_groq_failure(self, mock_openai): - mock_client = Mock() - mock_client.chat.completions.create.side_effect = Exception("API Error") - - interpreter = CommandInterpreter(api_key=self.api_key, provider="groq") - interpreter.client = mock_client - - with self.assertRaises(RuntimeError): - interpreter._call_groq("install docker") - @patch('anthropic.Anthropic') def test_call_claude_success(self, mock_anthropic): mock_client = Mock() diff --git a/cortex/cli.py b/cortex/cli.py index b3981a9..7117ce4 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -28,19 +28,39 @@ def __init__(self): self.spinner_idx = 0 self.prefs_manager = None # Lazy initialization - def _get_api_key(self) -> Optional[str]: - api_key = os.environ.get('OPENAI_API_KEY') or os.environ.get('ANTHROPIC_API_KEY') - if not api_key: - self._print_error("API key not found. Set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable.") - return None - return api_key - def _get_provider(self) -> str: + """Detect which LLM provider to use based on configuration and credentials.""" + provider_override = os.environ.get('CORTEX_PROVIDER') + if provider_override: + return provider_override.lower() + if os.environ.get('OPENAI_API_KEY'): return 'openai' - elif os.environ.get('ANTHROPIC_API_KEY'): + if os.environ.get('ANTHROPIC_API_KEY'): return 'claude' + if os.environ.get('KIMI_API_KEY'): + return 'kimi' + if os.environ.get('CORTEX_FAKE_COMMANDS'): + return 'fake' return 'openai' + + def _get_api_key(self, provider: str) -> Optional[str]: + """Return the API key for the specified provider or emit guidance if missing.""" + env_map = { + 'openai': 'OPENAI_API_KEY', + 'claude': 'ANTHROPIC_API_KEY', + 'kimi': 'KIMI_API_KEY', + } + + env_var = env_map.get(provider) + if not env_var: + return None + + api_key = os.environ.get(env_var) + if not api_key: + self._print_error(f"API key not found. Set {env_var} environment variable.") + return None + return api_key def _print_status(self, emoji: str, message: str): print(f"{emoji} {message}") @@ -62,12 +82,15 @@ def _clear_line(self): sys.stdout.flush() def install(self, software: str, execute: bool = False, dry_run: bool = False): - api_key = self._get_api_key() - if not api_key: - return 1 - provider = self._get_provider() + if provider == 'fake': + api_key = os.environ.get('CORTEX_FAKE_API_KEY', 'fake-api-key') + else: + api_key = self._get_api_key(provider) + if not api_key: + return 1 + # Initialize installation history history = InstallationHistory() install_id = None @@ -510,6 +533,8 @@ def main(): Environment Variables: OPENAI_API_KEY OpenAI API key for GPT-4 ANTHROPIC_API_KEY Anthropic API key for Claude + KIMI_API_KEY Moonshot Kimi K2 API key + CORTEX_PROVIDER Optional provider override (openai|claude|kimi|fake) """ ) diff --git a/cortex/test_cli.py b/cortex/test_cli.py deleted file mode 100644 index 7515d90..0000000 --- a/cortex/test_cli.py +++ /dev/null @@ -1,228 +0,0 @@ -import unittest -from unittest.mock import Mock, patch, MagicMock, call -import sys -import os - -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) - -from cortex.cli import CortexCLI, main - - -class TestCortexCLI(unittest.TestCase): - - def setUp(self): - self.cli = CortexCLI() - self.creds_patcher = patch.object(CortexCLI, '_load_creds', return_value={}) - self.creds_patcher.start() - self.addCleanup(self.creds_patcher.stop) - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - def test_get_api_key_openai(self): - api_key = self.cli._get_api_key('openai') - self.assertEqual(api_key, 'test-key') - - @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key', 'OPENAI_API_KEY': ''}, clear=True) - def test_get_api_key_claude(self): - api_key = self.cli._get_api_key('claude') - self.assertEqual(api_key, 'test-claude-key') - - @patch.dict(os.environ, {}, clear=True) - @patch('sys.stderr') - def test_get_api_key_not_found(self, mock_stderr): - api_key = self.cli._get_api_key('openai') - self.assertIsNone(api_key) - - @patch.dict(os.environ, {'GROQ_API_KEY': 'groq-key'}, clear=True) - def test_get_provider_groq(self): - provider = self.cli._get_provider() - self.assertEqual(provider, 'groq') - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - def test_get_provider_openai(self): - provider = self.cli._get_provider() - self.assertEqual(provider, 'openai') - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'first', 'ANTHROPIC_API_KEY': 'second'}, clear=True) - @patch('cortex.cli.CommandInterpreter') - def test_install_fallback_to_second_provider(self, mock_interpreter_class): - first_instance = Mock() - first_instance.parse.side_effect = RuntimeError("OpenAI outage") - second_instance = Mock() - second_instance.parse.return_value = ["echo ok"] - mock_interpreter_class.side_effect = [first_instance, second_instance] - - result = self.cli.install("docker", dry_run=True) - - self.assertEqual(result, 0) - self.assertEqual(mock_interpreter_class.call_count, 2) - - @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-key'}, clear=True) - def test_get_provider_claude(self): - provider = self.cli._get_provider() - self.assertEqual(provider, 'claude') - - @patch('sys.stdout') - def test_print_status(self, mock_stdout): - self.cli._print_status("🧠", "Test message") - self.assertTrue(mock_stdout.write.called or print) - - @patch('sys.stderr') - def test_print_error(self, mock_stderr): - self.cli._print_error("Test error") - self.assertTrue(True) - - @patch('sys.stdout') - def test_print_success(self, mock_stdout): - self.cli._print_success("Test success") - self.assertTrue(True) - - @patch.dict(os.environ, {}, clear=True) - def test_install_no_api_key(self): - result = self.cli.install("docker") - self.assertEqual(result, 1) - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - def test_install_dry_run(self, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.return_value = ["apt update", "apt install docker"] - mock_interpreter_class.return_value = mock_interpreter - - result = self.cli.install("docker", dry_run=True) - - self.assertEqual(result, 0) - mock_interpreter.parse.assert_called_once_with("install docker") - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - def test_install_no_execute(self, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.return_value = ["apt update", "apt install docker"] - mock_interpreter_class.return_value = mock_interpreter - - result = self.cli.install("docker", execute=False) - - self.assertEqual(result, 0) - mock_interpreter.parse.assert_called_once() - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - @patch('cortex.cli.InstallationCoordinator') - def test_install_with_execute_success(self, mock_coordinator_class, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.return_value = ["echo test"] - mock_interpreter_class.return_value = mock_interpreter - - mock_coordinator = Mock() - mock_result = Mock() - mock_result.success = True - mock_result.total_duration = 1.5 - mock_coordinator.execute.return_value = mock_result - mock_coordinator_class.return_value = mock_coordinator - - result = self.cli.install("docker", execute=True) - - self.assertEqual(result, 0) - mock_coordinator.execute.assert_called_once() - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - @patch('cortex.cli.InstallationCoordinator') - def test_install_with_execute_failure(self, mock_coordinator_class, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.return_value = ["invalid command"] - mock_interpreter_class.return_value = mock_interpreter - - mock_coordinator = Mock() - mock_result = Mock() - mock_result.success = False - mock_result.failed_step = 0 - mock_result.error_message = "command not found" - mock_coordinator.execute.return_value = mock_result - mock_coordinator_class.return_value = mock_coordinator - - result = self.cli.install("docker", execute=True) - - self.assertEqual(result, 1) - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - def test_install_no_commands_generated(self, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.return_value = [] - mock_interpreter_class.return_value = mock_interpreter - - result = self.cli.install("docker") - - self.assertEqual(result, 1) - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - def test_install_value_error(self, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.side_effect = ValueError("Invalid input") - mock_interpreter_class.return_value = mock_interpreter - - result = self.cli.install("docker") - - self.assertEqual(result, 1) - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - def test_install_runtime_error(self, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.side_effect = RuntimeError("API failed") - mock_interpreter_class.return_value = mock_interpreter - - result = self.cli.install("docker") - - self.assertEqual(result, 1) - - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) - @patch('cortex.cli.CommandInterpreter') - def test_install_unexpected_error(self, mock_interpreter_class): - mock_interpreter = Mock() - mock_interpreter.parse.side_effect = Exception("Unexpected") - mock_interpreter_class.return_value = mock_interpreter - - result = self.cli.install("docker") - - self.assertEqual(result, 1) - - @patch('sys.argv', ['cortex']) - def test_main_no_command(self): - result = main() - self.assertEqual(result, 1) - - @patch('sys.argv', ['cortex', 'install', 'docker']) - @patch('cortex.cli.CortexCLI.install') - def test_main_install_command(self, mock_install): - mock_install.return_value = 0 - result = main() - self.assertEqual(result, 0) - mock_install.assert_called_once_with('docker', execute=False, dry_run=False) - - @patch('sys.argv', ['cortex', 'install', 'docker', '--execute']) - @patch('cortex.cli.CortexCLI.install') - def test_main_install_with_execute(self, mock_install): - mock_install.return_value = 0 - result = main() - self.assertEqual(result, 0) - mock_install.assert_called_once_with('docker', execute=True, dry_run=False) - - @patch('sys.argv', ['cortex', 'install', 'docker', '--dry-run']) - @patch('cortex.cli.CortexCLI.install') - def test_main_install_with_dry_run(self, mock_install): - mock_install.return_value = 0 - result = main() - self.assertEqual(result, 0) - mock_install.assert_called_once_with('docker', execute=False, dry_run=True) - - def test_spinner_animation(self): - initial_idx = self.cli.spinner_idx - self.cli._animate_spinner("Testing") - self.assertNotEqual(self.cli.spinner_idx, initial_idx) - - -if __name__ == '__main__': - unittest.main() diff --git a/cortex/test_coordinator.py b/cortex/test_coordinator.py deleted file mode 100644 index 6911e23..0000000 --- a/cortex/test_coordinator.py +++ /dev/null @@ -1,353 +0,0 @@ -import unittest -from unittest.mock import Mock, patch, call -import tempfile -import os -import time -import sys - -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) - -from cortex.coordinator import ( - InstallationCoordinator, - InstallationStep, - InstallationResult, - StepStatus, - install_docker -) - - -class TestInstallationStep(unittest.TestCase): - - def test_step_creation(self): - step = InstallationStep(command="echo test", description="Test step") - self.assertEqual(step.command, "echo test") - self.assertEqual(step.description, "Test step") - self.assertEqual(step.status, StepStatus.PENDING) - - def test_step_duration(self): - step = InstallationStep(command="test", description="test") - self.assertIsNone(step.duration()) - - step.start_time = 100.0 - step.end_time = 105.5 - self.assertEqual(step.duration(), 5.5) - - -class TestInstallationCoordinator(unittest.TestCase): - - def test_initialization(self): - commands = ["echo 1", "echo 2"] - coordinator = InstallationCoordinator(commands) - - self.assertEqual(len(coordinator.steps), 2) - self.assertEqual(coordinator.steps[0].command, "echo 1") - self.assertEqual(coordinator.steps[1].command, "echo 2") - - def test_initialization_with_descriptions(self): - commands = ["echo 1", "echo 2"] - descriptions = ["First", "Second"] - coordinator = InstallationCoordinator(commands, descriptions) - - self.assertEqual(coordinator.steps[0].description, "First") - self.assertEqual(coordinator.steps[1].description, "Second") - - def test_initialization_mismatched_descriptions(self): - commands = ["echo 1", "echo 2"] - descriptions = ["First"] - - with self.assertRaises(ValueError): - InstallationCoordinator(commands, descriptions) - - @patch('subprocess.run') - def test_execute_single_success(self, mock_run): - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["echo test"]) - result = coordinator.execute() - - self.assertTrue(result.success) - self.assertEqual(len(result.steps), 1) - self.assertEqual(result.steps[0].status, StepStatus.SUCCESS) - - @patch('subprocess.run') - def test_execute_single_failure(self, mock_run): - mock_result = Mock() - mock_result.returncode = 1 - mock_result.stdout = "" - mock_result.stderr = "error" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["false"]) - result = coordinator.execute() - - self.assertFalse(result.success) - self.assertEqual(result.failed_step, 0) - self.assertEqual(result.steps[0].status, StepStatus.FAILED) - - @patch('subprocess.run') - def test_execute_multiple_success(self, mock_run): - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["echo 1", "echo 2", "echo 3"]) - result = coordinator.execute() - - self.assertTrue(result.success) - self.assertEqual(len(result.steps), 3) - self.assertTrue(all(s.status == StepStatus.SUCCESS for s in result.steps)) - - @patch('subprocess.run') - def test_execute_stop_on_error(self, mock_run): - def side_effect(*args, **kwargs): - cmd = args[0] if args else kwargs.get('shell') - if "fail" in str(cmd): - result = Mock() - result.returncode = 1 - result.stdout = "" - result.stderr = "error" - return result - else: - result = Mock() - result.returncode = 0 - result.stdout = "success" - result.stderr = "" - return result - - mock_run.side_effect = side_effect - - coordinator = InstallationCoordinator( - ["echo 1", "fail", "echo 3"], - stop_on_error=True - ) - result = coordinator.execute() - - self.assertFalse(result.success) - self.assertEqual(result.failed_step, 1) - self.assertEqual(result.steps[0].status, StepStatus.SUCCESS) - self.assertEqual(result.steps[1].status, StepStatus.FAILED) - self.assertEqual(result.steps[2].status, StepStatus.SKIPPED) - - @patch('subprocess.run') - def test_execute_continue_on_error(self, mock_run): - def side_effect(*args, **kwargs): - cmd = args[0] if args else kwargs.get('shell') - if "fail" in str(cmd): - result = Mock() - result.returncode = 1 - result.stdout = "" - result.stderr = "error" - return result - else: - result = Mock() - result.returncode = 0 - result.stdout = "success" - result.stderr = "" - return result - - mock_run.side_effect = side_effect - - coordinator = InstallationCoordinator( - ["echo 1", "fail", "echo 3"], - stop_on_error=False - ) - result = coordinator.execute() - - self.assertFalse(result.success) - self.assertEqual(result.steps[0].status, StepStatus.SUCCESS) - self.assertEqual(result.steps[1].status, StepStatus.FAILED) - self.assertEqual(result.steps[2].status, StepStatus.SUCCESS) - - @patch('subprocess.run') - def test_timeout_handling(self, mock_run): - mock_run.side_effect = Exception("Timeout") - - coordinator = InstallationCoordinator(["sleep 1000"], timeout=1) - result = coordinator.execute() - - self.assertFalse(result.success) - self.assertEqual(result.steps[0].status, StepStatus.FAILED) - - def test_progress_callback(self): - callback_calls = [] - - def callback(current, total, step): - callback_calls.append((current, total, step.command)) - - with patch('subprocess.run') as mock_run: - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator( - ["echo 1", "echo 2"], - progress_callback=callback - ) - coordinator.execute() - - self.assertEqual(len(callback_calls), 2) - self.assertEqual(callback_calls[0], (1, 2, "echo 1")) - self.assertEqual(callback_calls[1], (2, 2, "echo 2")) - - def test_log_file(self): - with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.log') as f: - log_file = f.name - - try: - with patch('subprocess.run') as mock_run: - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator( - ["echo test"], - log_file=log_file - ) - coordinator.execute() - - self.assertTrue(os.path.exists(log_file)) - with open(log_file, 'r') as f: - content = f.read() - self.assertIn("Executing: echo test", content) - finally: - if os.path.exists(log_file): - os.unlink(log_file) - - @patch('subprocess.run') - def test_rollback(self, mock_run): - mock_result = Mock() - mock_result.returncode = 1 - mock_result.stdout = "" - mock_result.stderr = "error" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator( - ["fail"], - enable_rollback=True - ) - coordinator.add_rollback_command("echo rollback") - result = coordinator.execute() - - self.assertFalse(result.success) - self.assertGreaterEqual(mock_run.call_count, 2) - - @patch('subprocess.run') - def test_verify_installation(self, mock_run): - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "Docker version 20.10.0" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["echo test"]) - coordinator.execute() - - verify_results = coordinator.verify_installation(["docker --version"]) - - self.assertTrue(verify_results["docker --version"]) - - def test_get_summary(self): - with patch('subprocess.run') as mock_run: - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["echo 1", "echo 2"]) - coordinator.execute() - - summary = coordinator.get_summary() - - self.assertEqual(summary["total_steps"], 2) - self.assertEqual(summary["success"], 2) - self.assertEqual(summary["failed"], 0) - self.assertEqual(summary["skipped"], 0) - - def test_export_log(self): - with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json') as f: - export_file = f.name - - try: - with patch('subprocess.run') as mock_run: - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["echo test"]) - coordinator.execute() - coordinator.export_log(export_file) - - self.assertTrue(os.path.exists(export_file)) - - import json - with open(export_file, 'r') as f: - data = json.load(f) - self.assertIn("total_steps", data) - self.assertEqual(data["total_steps"], 1) - finally: - if os.path.exists(export_file): - os.unlink(export_file) - - @patch('subprocess.run') - def test_step_timing(self, mock_run): - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - coordinator = InstallationCoordinator(["echo test"]) - result = coordinator.execute() - - step = result.steps[0] - self.assertIsNotNone(step.start_time) - self.assertIsNotNone(step.end_time) - if step.end_time and step.start_time: - self.assertTrue(step.end_time > step.start_time) - self.assertIsNotNone(step.duration()) - - -class TestInstallDocker(unittest.TestCase): - - @patch('subprocess.run') - def test_install_docker_success(self, mock_run): - mock_result = Mock() - mock_result.returncode = 0 - mock_result.stdout = "success" - mock_result.stderr = "" - mock_run.return_value = mock_result - - result = install_docker() - - self.assertTrue(result.success) - self.assertEqual(len(result.steps), 8) - - @patch('subprocess.run') - def test_install_docker_failure(self, mock_run): - mock_result = Mock() - mock_result.returncode = 1 - mock_result.stdout = "" - mock_result.stderr = "error" - mock_run.return_value = mock_result - - result = install_docker() - - self.assertFalse(result.success) - self.assertIsNotNone(result.failed_step) - - -if __name__ == '__main__': - unittest.main() diff --git a/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md new file mode 100644 index 0000000..379a2bd --- /dev/null +++ b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md @@ -0,0 +1,237 @@ +# Issue #40: Kimi K2 API Integration + +**Issue Link:** https://github.com/cortexlinux/cortex/issues/40 +**PR Link:** https://github.com/cortexlinux/cortex/pull/192 +**Bounty:** $150 +**Status:** ✅ Implemented +**Date Completed:** December 2, 2025 + +## Summary + +Successfully integrated Moonshot AI's Kimi K2 model as a new LLM provider for Cortex, expanding the platform's multi-LLM capabilities. This implementation allows users to leverage Kimi K2 for natural language command interpretation as an alternative to OpenAI GPT-4o and Anthropic Claude 3.5. + +## Implementation Details + +### 1. Core Integration (LLM/interpreter.py) + +**Added:** +- `KIMI` enum value to `APIProvider` +- `_call_kimi()` method for Kimi K2 HTTP API integration +- Kimi-specific initialization in `_initialize_client()` +- Default model detection for Kimi K2 (`kimi-k2`) + +**Features:** +- Full HTTP-based API integration using `requests` library +- Configurable base URL via `KIMI_API_BASE_URL` environment variable (defaults to `https://api.moonshot.cn`) +- Configurable model via `KIMI_DEFAULT_MODEL` environment variable +- Proper error handling with descriptive exceptions +- Request timeout set to 60 seconds +- JSON response parsing with validation + +**Security:** +- Bearer token authentication +- Proper SSL/TLS via HTTPS +- Input validation and sanitization +- Error messages don't leak sensitive information + +### 2. CLI Support (cortex/cli.py) + +**Updated Methods:** +- `_get_provider()`: Added Kimi detection via `KIMI_API_KEY` +- `_get_api_key(provider)`: Added Kimi API key mapping +- Updated install workflow to support fake provider for testing + +**Environment Variables:** +- `KIMI_API_KEY`: Required for Kimi K2 authentication +- `CORTEX_PROVIDER`: Optional override (supports `openai`, `claude`, `kimi`, `fake`) +- `KIMI_API_BASE_URL`: Optional base URL override +- `KIMI_DEFAULT_MODEL`: Optional model override (default: `kimi-k2`) + +### 3. Dependencies (LLM/requirements.txt) + +**Updated:** +- Added `requests>=2.32.4` (addresses CVE-2024-35195, CVE-2024-37891, CVE-2023-32681) +- Security-focused version constraint ensures patched vulnerabilities + +### 4. Testing + +**Added Tests:** +- `test_get_provider_kimi`: Provider detection +- `test_get_api_key_kimi`: API key retrieval +- `test_initialization_kimi`: Kimi initialization +- `test_call_kimi_success`: Successful API call +- `test_call_kimi_failure`: Error handling +- `test_call_fake_with_env_commands`: Fake provider testing + +**Test Coverage:** +- Unit tests: ✅ 143 tests passing +- Integration tests: ✅ 5 Docker-based tests (skipped without Docker) +- All existing tests remain passing +- No regressions introduced + +### 5. Documentation + +**Updated Files:** +- `README.md`: Added Kimi K2 to supported providers table, usage examples +- `cortex/cli.py`: Updated help text with Kimi environment variables +- `docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md`: This summary document + +## Configuration Examples + +### Getting a Valid API Key + +1. Visit [Moonshot AI Platform](https://platform.moonshot.ai/) +2. Sign up or log in to your account +3. Navigate to [API Keys Console](https://platform.moonshot.ai/console/api-keys) +4. Click "Create API Key" and copy the key +5. The key format should start with `sk-` + +### Basic Usage + +```bash +# Set Kimi API key (get from https://platform.moonshot.ai/console/api-keys) +export KIMI_API_KEY="sk-your-actual-key-here" + +# Install with Kimi K2 (auto-detected) +cortex install docker + +# Explicit provider override +export CORTEX_PROVIDER=kimi +cortex install "nginx with ssl" +``` + +### Advanced Configuration + +```bash +# Custom model (options: kimi-k2-turbo-preview, kimi-k2-0905-preview, kimi-k2-thinking, kimi-k2-thinking-turbo) +export KIMI_DEFAULT_MODEL="kimi-k2-0905-preview" + +# Custom base URL (default: https://api.moonshot.ai) +export KIMI_API_BASE_URL="https://api.moonshot.ai" + +# Dry run mode +cortex install postgresql --dry-run +``` + +### Testing Without API Costs + +```bash +# Use fake provider for testing +export CORTEX_PROVIDER=fake +export CORTEX_FAKE_COMMANDS='{"commands": ["echo Step 1", "echo Step 2"]}' +cortex install docker --dry-run +``` + +## API Request Format + +The Kimi K2 integration uses the OpenAI-compatible chat completions endpoint: + +```json +POST https://api.moonshot.cn/v1/chat/completions + +Headers: + Authorization: Bearer {KIMI_API_KEY} + Content-Type: application/json + +Body: +{ + "model": "kimi-k2", + "messages": [ + {"role": "system", "content": "System prompt..."}, + {"role": "user", "content": "User request..."} + ], + "temperature": 0.3, + "max_tokens": 1000 +} +``` + +## Error Handling + +The implementation includes comprehensive error handling: + +1. **Missing Dependencies:** Clear error if `requests` package not installed +2. **API Failures:** Runtime errors with descriptive messages +3. **Empty Responses:** Validation that API returns valid choices +4. **Network Issues:** Timeout protection (60s) +5. **Authentication Errors:** HTTP status code validation via `raise_for_status()` + +## Code Quality Improvements + +Based on CodeRabbit feedback, the following improvements were made: + +1. ✅ **Security:** Updated `requests>=2.32.4` to address known CVEs +2. ✅ **Model Defaults:** Updated OpenAI default to `gpt-4o` (current best practice) +3. ✅ **Test Organization:** Removed duplicate test files (`cortex/test_cli.py`, `cortex/test_coordinator.py`) +4. ✅ **Import Fixes:** Added missing imports (`unittest`, `Mock`, `patch`, `SimpleNamespace`) +5. ✅ **Method Signatures:** Updated `_get_api_key(provider)` to accept provider parameter +6. ✅ **Provider Exclusions:** Removed Groq provider as per requirements (only Kimi K2 added) +7. ✅ **Setup.py Fix:** Corrected syntax errors in package configuration + +## Performance Considerations + +- **HTTP Request Timeout:** 60 seconds prevents hanging on slow connections +- **Connection Reuse:** `requests` library handles connection pooling automatically +- **Error Recovery:** Fast-fail on API errors with informative messages +- **Memory Efficiency:** JSON parsing directly from response without intermediate storage + +## Future Enhancements + +Potential improvements for future iterations: + +1. **Streaming Support:** Add streaming response support for real-time feedback +2. **Retry Logic:** Implement exponential backoff for transient failures +3. **Rate Limiting:** Add rate limit awareness and queuing +4. **Batch Operations:** Support multiple requests in parallel +5. **Model Selection:** UI/CLI option to select specific Kimi models +6. **Caching:** Cache common responses to reduce API costs + +## Testing Results + +``` +Ran 143 tests in 10.136s + +OK (skipped=5) +``` + +All tests pass successfully: +- ✅ 138 tests passed +- ⏭️ 5 integration tests skipped (require Docker) +- ❌ 0 failures +- ❌ 0 errors + +## Migration Notes + +For users upgrading: + +1. **Backward Compatible:** Existing OpenAI and Claude configurations continue to work +2. **New Dependency:** `pip install requests>=2.32.4` required +3. **Environment Variables:** Optional - no breaking changes to existing setups +4. **Default Behavior:** No change - OpenAI remains default if multiple keys present + +## Related Issues + +- **Issue #16:** Integration test suite (optional, addressed in PR #192) +- **Issue #11:** CLI interface improvements (referenced in commits) +- **Issue #8:** Multi-step coordinator (referenced in commits) + +## Contributors + +- @Sahilbhatane - Primary implementation +- @mikejmorgan-ai - Code review and issue management +- @dhvll - Code review +- @coderabbitai - Automated code review and suggestions + +## Lessons Learned + +1. **API Documentation:** Kimi K2 follows OpenAI-compatible format, simplifying integration +2. **Security First:** Always use latest patched dependencies (`requests>=2.32.4`) +3. **Test Coverage:** Comprehensive testing prevents regressions +4. **Error Messages:** Descriptive errors improve user experience +5. **Environment Variables:** Flexible configuration reduces hard-coded values + +## References + +- **Kimi K2 Documentation:** https://platform.moonshot.cn/docs +- **Original PR:** https://github.com/cortexlinux/cortex/pull/192 +- **Issue Discussion:** https://github.com/cortexlinux/cortex/issues/40 +- **CVE Fixes:** CVE-2024-35195, CVE-2024-37891, CVE-2023-32681 diff --git a/setup.py b/setup.py index 3504d45..b30b66b 100644 --- a/setup.py +++ b/setup.py @@ -12,8 +12,12 @@ with open(requirements_path, "r", encoding="utf-8") as fh: requirements = [line.strip() for line in fh if line.strip() and not line.startswith("#") and not line.startswith("-r")] else: - requirements = ["anthropic>=0.18.0", "openai>=1.0.0"] - Linux", + requirements = ["anthropic>=0.18.0", "openai>=1.0.0", "requests>=2.32.4"] + +setup( + name="cortex-linux", + version="0.1.0", + author="Cortex Linux", author_email="mike@cortexlinux.com", description="AI-powered Linux command interpreter", long_description=long_description, diff --git a/test/test_cli.py b/test/test_cli.py index 635ad06..6e00e96 100644 --- a/test/test_cli.py +++ b/test/test_cli.py @@ -15,21 +15,26 @@ def setUp(self): @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) def test_get_api_key_openai(self): - api_key = self.cli._get_api_key() + api_key = self.cli._get_api_key('openai') self.assertEqual(api_key, 'test-key') - @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key', 'OPENAI_API_KEY': ''}, clear=True) + @patch.dict(os.environ, {'ANTHROPIC_API_KEY': 'test-claude-key'}, clear=True) def test_get_api_key_claude(self): - api_key = self.cli._get_api_key() + api_key = self.cli._get_api_key('claude') self.assertEqual(api_key, 'test-claude-key') + @patch.dict(os.environ, {'KIMI_API_KEY': 'test-kimi-key'}, clear=True) + def test_get_api_key_kimi(self): + api_key = self.cli._get_api_key('kimi') + self.assertEqual(api_key, 'test-kimi-key') + @patch.dict(os.environ, {}, clear=True) @patch('sys.stderr') def test_get_api_key_not_found(self, mock_stderr): - api_key = self.cli._get_api_key() + api_key = self.cli._get_api_key('openai') self.assertIsNone(api_key) - @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}, clear=True) def test_get_provider_openai(self): provider = self.cli._get_provider() self.assertEqual(provider, 'openai') @@ -39,6 +44,16 @@ def test_get_provider_claude(self): provider = self.cli._get_provider() self.assertEqual(provider, 'claude') + @patch.dict(os.environ, {'KIMI_API_KEY': 'test-key'}, clear=True) + def test_get_provider_kimi(self): + provider = self.cli._get_provider() + self.assertEqual(provider, 'kimi') + + @patch.dict(os.environ, {'CORTEX_PROVIDER': 'fake'}, clear=True) + def test_get_provider_override(self): + provider = self.cli._get_provider() + self.assertEqual(provider, 'fake') + @patch('sys.stdout') def test_print_status(self, mock_stdout): self.cli._print_status("🧠", "Test message") @@ -59,6 +74,19 @@ def test_install_no_api_key(self): result = self.cli.install("docker") self.assertEqual(result, 1) + @patch.dict(os.environ, {'CORTEX_PROVIDER': 'fake', 'CORTEX_FAKE_COMMANDS': ''}, clear=True) + @patch('cortex.cli.CommandInterpreter') + @patch('cortex.cli.InstallationHistory') + def test_install_fake_provider_skips_api_key(self, mock_history, mock_interpreter_class): + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ['echo test'] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install('docker') + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once_with('install docker') + @patch.dict(os.environ, {'OPENAI_API_KEY': 'test-key'}) @patch('cortex.cli.CommandInterpreter') def test_install_dry_run(self, mock_interpreter_class): diff --git a/test/test_coordinator.py b/test/test_coordinator.py index acdb45a..4ac5a04 100644 --- a/test/test_coordinator.py +++ b/test/test_coordinator.py @@ -1,7 +1,8 @@ import unittest -from unittest.mock import Mock, patch +from unittest.mock import Mock, patch, call import tempfile import os +import time import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) @@ -9,9 +10,9 @@ from cortex.coordinator import ( InstallationCoordinator, InstallationStep, + InstallationResult, StepStatus, - install_docker, - example_cuda_install_plan + install_docker ) @@ -42,20 +43,6 @@ def test_initialization(self): self.assertEqual(coordinator.steps[0].command, "echo 1") self.assertEqual(coordinator.steps[1].command, "echo 2") - def test_from_plan_initialization(self): - plan = [ - {"command": "echo 1", "description": "First step"}, - {"command": "echo 2", "rollback": "echo rollback"} - ] - - coordinator = InstallationCoordinator.from_plan(plan) - - self.assertEqual(len(coordinator.steps), 2) - self.assertEqual(coordinator.steps[0].description, "First step") - self.assertEqual(coordinator.steps[1].description, "Step 2") - self.assertTrue(coordinator.enable_rollback) - self.assertEqual(coordinator.rollback_commands, ["echo rollback"]) - def test_initialization_with_descriptions(self): commands = ["echo 1", "echo 2"] descriptions = ["First", "Second"] @@ -329,7 +316,7 @@ def test_step_timing(self, mock_run): self.assertIsNotNone(step.start_time) self.assertIsNotNone(step.end_time) if step.end_time and step.start_time: - self.assertGreater(step.end_time, step.start_time) + self.assertTrue(step.end_time > step.start_time) self.assertIsNotNone(step.duration()) @@ -362,15 +349,5 @@ def test_install_docker_failure(self, mock_run): self.assertIsNotNone(result.failed_step) -class TestInstallationPlans(unittest.TestCase): - - def test_example_cuda_install_plan_structure(self): - plan = example_cuda_install_plan() - - self.assertGreaterEqual(len(plan), 5) - self.assertTrue(all("command" in step for step in plan)) - self.assertTrue(any("rollback" in step for step in plan)) - - if __name__ == '__main__': unittest.main() From 9b9a36ef6b99da8974c4f58c092bb16c6ca65dae Mon Sep 17 00:00:00 2001 From: sahil Date: Tue, 2 Dec 2025 20:32:53 +0530 Subject: [PATCH 09/12] feat: Implement Kimi K2 API integration - Fixes [#40](https://github.com/cortexlinux/cortex/issues/40) --- LLM/test_interpreter.py | 105 +++++++++++++++++++++++++++++++++++++++- requirements-dev.txt | 1 + requirements.txt | 4 ++ 3 files changed, 108 insertions(+), 2 deletions(-) diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index 6c66f98..1c68b88 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -51,7 +51,37 @@ def test_initialization_custom_model(self, mock_openai): def test_initialization_kimi(self): interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") self.assertEqual(interpreter.provider, APIProvider.KIMI) - self.assertEqual(interpreter.model, "kimi-k2") + self.assertEqual(interpreter.model, "kimi-k2-turbo-preview") + + @patch('requests.post') + def test_call_kimi_success(self, mock_post): + mock_response = Mock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "choices": [{"message": {"content": '{"commands": ["apt update", "apt install curl"]}'}}] + } + mock_post.return_value = mock_response + + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + result = interpreter._call_kimi("install curl") + + self.assertEqual(result, ["apt update", "apt install curl"]) + mock_post.assert_called_once() + call_args = mock_post.call_args + self.assertIn("Authorization", call_args[1]["headers"]) + self.assertEqual(call_args[1]["headers"]["Authorization"], f"Bearer {self.api_key}") + + @patch('requests.post') + def test_call_kimi_failure(self, mock_post): + mock_response = Mock() + mock_response.status_code = 401 + mock_response.raise_for_status.side_effect = Exception("401 Unauthorized") + mock_post.return_value = mock_response + + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + + with self.assertRaises(RuntimeError): + interpreter._call_kimi("install docker") def test_parse_commands_valid_json(self): interpreter = CommandInterpreter.__new__(CommandInterpreter) @@ -126,7 +156,6 @@ def test_call_openai_failure(self, mock_openai): with self.assertRaises(RuntimeError): interpreter._call_openai("install docker") - @patch('openai.OpenAI') @patch('anthropic.Anthropic') def test_call_claude_success(self, mock_anthropic): mock_client = Mock() @@ -242,5 +271,77 @@ def test_parse_docker_installation(self, mock_openai): self.assertTrue(any("docker" in cmd.lower() for cmd in result)) +@unittest.skipUnless( + os.environ.get('RUN_KIMI_INTEGRATION_TESTS') == '1', + "Skipping Kimi K2 integration tests. Set RUN_KIMI_INTEGRATION_TESTS=1 to run them." +) +class TestKimiK2Integration(unittest.TestCase): + """Integration tests for Kimi K2 API with real API calls + + To run these tests: + - Set environment variable: RUN_KIMI_INTEGRATION_TESTS=1 + - Set environment variable: KIMI_API_KEY=your-api-key + - Run: pytest LLM/test_interpreter.py::TestKimiK2Integration -v + """ + + def setUp(self): + # Use the actual API key from environment + self.api_key = os.environ.get('KIMI_API_KEY') + + if not self.api_key: + self.skipTest("KIMI_API_KEY not set for integration tests") + + def test_kimi_real_api_basic_request(self): + """Test Kimi K2 with real API - basic installation request""" + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + + result = interpreter.parse("Install curl on Ubuntu") + + self.assertIsInstance(result, list) + self.assertGreater(len(result), 0) + self.assertTrue(any("curl" in cmd.lower() for cmd in result)) + print(f"\n✅ Kimi K2 API Test - Generated {len(result)} commands: {result}") + + def test_kimi_real_api_complex_request(self): + """Test Kimi K2 with real API - complex installation request""" + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + + result = interpreter.parse("Install nginx web server and configure it to start on boot") + + self.assertIsInstance(result, list) + self.assertGreater(len(result), 2) + self.assertTrue(any("nginx" in cmd.lower() for cmd in result)) + print(f"\n✅ Kimi K2 API Complex Test - Generated {len(result)} commands: {result}") + + def test_kimi_real_api_with_custom_model(self): + """Test Kimi K2 with different model""" + os.environ['KIMI_DEFAULT_MODEL'] = 'kimi-k2-0905-preview' + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + + result = interpreter.parse("Install git") + + self.assertIsInstance(result, list) + self.assertGreater(len(result), 0) + self.assertTrue(any("git" in cmd.lower() for cmd in result)) + print(f"\n✅ Kimi K2 Custom Model Test - Generated {len(result)} commands: {result}") + + # Cleanup + os.environ.pop('KIMI_DEFAULT_MODEL', None) + + def test_kimi_real_api_validation(self): + """Test Kimi K2 with command validation""" + interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") + + result = interpreter.parse("Install docker", validate=True) + + self.assertIsInstance(result, list) + self.assertGreater(len(result), 0) + # Ensure no dangerous commands passed validation + for cmd in result: + self.assertNotIn("rm -rf", cmd.lower()) + self.assertNotIn("dd if=", cmd.lower()) + print(f"\n✅ Kimi K2 Validation Test - Validated commands: {result}") + + if __name__ == "__main__": unittest.main() diff --git a/requirements-dev.txt b/requirements-dev.txt index ada5858..0cf4c8a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,6 +7,7 @@ pytest>=7.0.0 pytest-cov>=4.0.0 pytest-mock>=3.10.0 +requests>=2.32.4 # Code Quality black>=23.0.0 diff --git a/requirements.txt b/requirements.txt index 25a4cd2..66e0cd8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,10 @@ # LLM Provider APIs anthropic>=0.18.0 openai>=1.0.0 +requests>=2.32.4 + +# Configuration +PyYAML>=6.0.0 # Type hints for older Python versions typing-extensions>=4.0.0 From e0faa7b4c8df8921c37e11c945fa2b1bf3e3e8f7 Mon Sep 17 00:00:00 2001 From: sahil Date: Tue, 2 Dec 2025 20:48:38 +0530 Subject: [PATCH 10/12] feat: Implement Kimi K2 API integration - Fixes (#40) --- LLM/test_interpreter.py | 5 +---- docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index 1c68b88..a9f6ef6 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -313,9 +313,9 @@ def test_kimi_real_api_complex_request(self): self.assertTrue(any("nginx" in cmd.lower() for cmd in result)) print(f"\n✅ Kimi K2 API Complex Test - Generated {len(result)} commands: {result}") + @patch.dict(os.environ, {'KIMI_DEFAULT_MODEL': 'kimi-k2-0905-preview'}) def test_kimi_real_api_with_custom_model(self): """Test Kimi K2 with different model""" - os.environ['KIMI_DEFAULT_MODEL'] = 'kimi-k2-0905-preview' interpreter = CommandInterpreter(api_key=self.api_key, provider="kimi") result = interpreter.parse("Install git") @@ -324,9 +324,6 @@ def test_kimi_real_api_with_custom_model(self): self.assertGreater(len(result), 0) self.assertTrue(any("git" in cmd.lower() for cmd in result)) print(f"\n✅ Kimi K2 Custom Model Test - Generated {len(result)} commands: {result}") - - # Cleanup - os.environ.pop('KIMI_DEFAULT_MODEL', None) def test_kimi_real_api_validation(self): """Test Kimi K2 with command validation""" diff --git a/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md index 379a2bd..ec68991 100644 --- a/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md +++ b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md @@ -22,7 +22,7 @@ Successfully integrated Moonshot AI's Kimi K2 model as a new LLM provider for Co **Features:** - Full HTTP-based API integration using `requests` library -- Configurable base URL via `KIMI_API_BASE_URL` environment variable (defaults to `https://api.moonshot.cn`) +- Configurable base URL via `KIMI_API_BASE_URL` environment variable (defaults to `https://api.moonshot.ai`) - Configurable model via `KIMI_DEFAULT_MODEL` environment variable - Proper error handling with descriptive exceptions - Request timeout set to 60 seconds From 9746f920a0a245c9be176a826ed946c3744fc91d Mon Sep 17 00:00:00 2001 From: sahil Date: Wed, 3 Dec 2025 19:59:56 +0530 Subject: [PATCH 11/12] feat: Implement Kimi K2 API integration - Fixes #40 --- LLM/interpreter.py | 8 +++----- LLM/test_interpreter.py | 2 +- cortex/user_preferences.py | 4 ++-- docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md | 6 +++--- test/integration/docker_utils.py | 6 ++---- test/integration/test_end_to_end.py | 4 ++-- test/test_coordinator.py | 6 ++---- 7 files changed, 15 insertions(+), 21 deletions(-) diff --git a/LLM/interpreter.py b/LLM/interpreter.py index 09d3b93..20404a9 100644 --- a/LLM/interpreter.py +++ b/LLM/interpreter.py @@ -149,8 +149,6 @@ def _call_kimi(self, user_input: str) -> List[str]: if not content: raise RuntimeError("Kimi API returned empty content") return self._parse_commands(content) - except ImportError as ie: - raise RuntimeError("Requests package not installed. Run: pip install requests") from ie except Exception as exc: raise RuntimeError(f"Kimi API call failed: {str(exc)}") from exc @@ -163,9 +161,9 @@ def _call_fake(self, user_input: str) -> List[str]: data = json.loads(payload) except json.JSONDecodeError as exc: raise ValueError("CORTEX_FAKE_COMMANDS must contain valid JSON") from exc - if not isinstance(data, dict) or "commands" not in data: - raise ValueError("CORTEX_FAKE_COMMANDS must define a 'commands' list") - return self._parse_commands(payload) + if not isinstance(data["commands"], list): + raise ValueError("'commands' must be a list in CORTEX_FAKE_COMMANDS") + return data["commands"] safe_defaults = { "docker": [ diff --git a/LLM/test_interpreter.py b/LLM/test_interpreter.py index a9f6ef6..844a485 100644 --- a/LLM/test_interpreter.py +++ b/LLM/test_interpreter.py @@ -281,7 +281,7 @@ class TestKimiK2Integration(unittest.TestCase): To run these tests: - Set environment variable: RUN_KIMI_INTEGRATION_TESTS=1 - Set environment variable: KIMI_API_KEY=your-api-key - - Run: pytest LLM/test_interpreter.py::TestKimiK2Integration -v + - Run: python -m unittest LLM.test_interpreter.TestKimiK2Integration -v """ def setUp(self): diff --git a/cortex/user_preferences.py b/cortex/user_preferences.py index 5f1c20c..098a213 100644 --- a/cortex/user_preferences.py +++ b/cortex/user_preferences.py @@ -11,6 +11,7 @@ import os import yaml import json +import shutil from pathlib import Path from typing import Any, Dict, Optional, List from dataclasses import dataclass, asdict, field @@ -221,7 +222,6 @@ def _create_backup(self) -> Optional[Path]: # Create a simple ..bak backup (e.g., preferences.yaml.bak) try: backup_path = self.config_path.with_suffix(self.config_path.suffix + self.BACKUP_SUFFIX) - import shutil shutil.copy2(self.config_path, backup_path) return backup_path except Exception as e: @@ -493,7 +493,7 @@ def validate(self) -> List[str]: errors.append("At least one package source required") # Basic language validation - valid_langs = ["en", "es", "fr", "de"] + valid_langs = ["en", "es", "fr", "de", "ja", "zh", "pt", "ru"] if self._preferences.language not in valid_langs: errors.append("language must be a supported two-letter code") diff --git a/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md index ec68991..fa3699a 100644 --- a/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md +++ b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md @@ -18,7 +18,7 @@ Successfully integrated Moonshot AI's Kimi K2 model as a new LLM provider for Co - `KIMI` enum value to `APIProvider` - `_call_kimi()` method for Kimi K2 HTTP API integration - Kimi-specific initialization in `_initialize_client()` -- Default model detection for Kimi K2 (`kimi-k2`) +- Default model detection for Kimi K2 (`kimi-k2/kimi-k2-turbo-preview`) **Features:** - Full HTTP-based API integration using `requests` library @@ -127,7 +127,7 @@ cortex install docker --dry-run The Kimi K2 integration uses the OpenAI-compatible chat completions endpoint: ```json -POST https://api.moonshot.cn/v1/chat/completions +POST https://api.moonshot.ai/v1/chat/completions Headers: Authorization: Bearer {KIMI_API_KEY} @@ -231,7 +231,7 @@ For users upgrading: ## References -- **Kimi K2 Documentation:** https://platform.moonshot.cn/docs +- **Kimi K2 Documentation:** https://platform.moonshot.ai/docs - **Original PR:** https://github.com/cortexlinux/cortex/pull/192 - **Issue Discussion:** https://github.com/cortexlinux/cortex/issues/40 - **CVE Fixes:** CVE-2024-35195, CVE-2024-37891, CVE-2023-32681 diff --git a/test/integration/docker_utils.py b/test/integration/docker_utils.py index 23426f2..012df86 100644 --- a/test/integration/docker_utils.py +++ b/test/integration/docker_utils.py @@ -1,8 +1,6 @@ """Helpers for running Cortex integration tests inside Docker containers.""" from __future__ import annotations - -import os import shutil import subprocess from dataclasses import dataclass @@ -72,8 +70,8 @@ def run_in_docker( env: Optional environment variables exported inside the container. mounts: - Iterable of host ``Path`` instances mounted read-only to the same - location within the container. + Iterable of (host_path, container_path) tuples for mounting directories. + workdir: Working directory set inside the container. timeout: diff --git a/test/integration/test_end_to_end.py b/test/integration/test_end_to_end.py index f48212e..b5e7ece 100644 --- a/test/integration/test_end_to_end.py +++ b/test/integration/test_end_to_end.py @@ -7,7 +7,7 @@ import unittest from pathlib import Path -from .docker_utils import docker_available, run_in_docker +from .docker_utils import docker_available, run_in_docker, DockerRunResult REPO_ROOT = Path(__file__).resolve().parents[2] DEFAULT_IMAGE = os.environ.get("CORTEX_INTEGRATION_IMAGE", "python:3.11-slim") @@ -24,7 +24,7 @@ class TestEndToEndWorkflows(unittest.TestCase): """Run Cortex commands inside disposable Docker containers.""" - def _run(self, command: str, env: dict | None = None): + def _run(self, command: str, env: dict | None = None) -> DockerRunResult: effective_env = dict(BASE_ENV) if env: effective_env.update(env) diff --git a/test/test_coordinator.py b/test/test_coordinator.py index 4ac5a04..442b816 100644 --- a/test/test_coordinator.py +++ b/test/test_coordinator.py @@ -1,8 +1,7 @@ import unittest -from unittest.mock import Mock, patch, call +from unittest.mock import Mock, patch import tempfile import os -import time import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) @@ -10,7 +9,6 @@ from cortex.coordinator import ( InstallationCoordinator, InstallationStep, - InstallationResult, StepStatus, install_docker ) @@ -316,7 +314,7 @@ def test_step_timing(self, mock_run): self.assertIsNotNone(step.start_time) self.assertIsNotNone(step.end_time) if step.end_time and step.start_time: - self.assertTrue(step.end_time > step.start_time) + self.assertGreater(step.end_time, step.start_time) self.assertIsNotNone(step.duration()) From 9d1ea39bdf6301c2ac8570ff484022a8050b1096 Mon Sep 17 00:00:00 2001 From: sahil Date: Wed, 3 Dec 2025 20:08:31 +0530 Subject: [PATCH 12/12] added yaml for automation to test. --- requirements-dev.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/requirements-dev.txt b/requirements-dev.txt index 0cf4c8a..0fa8ade 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,6 +9,9 @@ pytest-cov>=4.0.0 pytest-mock>=3.10.0 requests>=2.32.4 +# Configuration +PyYAML>=6.0.0 + # Code Quality black>=23.0.0 pylint>=2.17.0