diff --git a/codegen-examples/examples/cyclomatic_complexity/README.md b/codegen-examples/examples/cyclomatic_complexity/README.md new file mode 100644 index 000000000..1115d8d9d --- /dev/null +++ b/codegen-examples/examples/cyclomatic_complexity/README.md @@ -0,0 +1,148 @@ +# Cyclomatic Complexity Analyzer + +This example demonstrates how to analyze the cyclomatic complexity of Python codebases using Codegen. The script provides detailed insights into code complexity by analyzing control flow structures and providing a comprehensive report. + +> [!NOTE] +> The cyclomatic complexity metric helps identify complex code that might need refactoring. A higher score indicates more complex code with multiple decision points. + +## How the Analysis Script Works + +The script (`run.py`) performs the complexity analysis in several key steps: + +1. **Codebase Loading** + + ```python + codebase = Codebase.from_repo("fastapi/fastapi") + ``` + + - Loads any Python codebase into Codegen's analysis engine + - Works with local or remote Git repositories + - Supports analyzing specific commits + +1. **Complexity Calculation** + + ```python + def calculate_cyclomatic_complexity(code_block): + complexity = 1 # Base complexity + for statement in code_block.statements: + if isinstance(statement, IfBlockStatement): + complexity += 1 + len(statement.elif_statements) + ``` + + - Analyzes control flow structures (if/elif/else, loops, try/except) + - Calculates complexity based on decision points + - Handles nested structures appropriately + +1. **Function Analysis** + + ```python + callables = codebase.functions + [m for c in codebase.classes for m in c.methods] + for function in callables: + complexity = calculate_cyclomatic_complexity(function.code_block) + ``` + + - Processes both standalone functions and class methods + - Calculates complexity for each callable + - Tracks file locations and function names + +1. **Report Generation** + + ```python + print("\n๐Ÿ“Š Cyclomatic Complexity Analysis") + print(f" โ€ข Total Functions: {total_functions}") + print(f" โ€ข Average Complexity: {average:.2f}") + ``` + + - Provides comprehensive complexity statistics + - Shows distribution of complexity across functions + - Identifies the most complex functions + +## Output + +``` +๐Ÿ“Š Cyclomatic Complexity Analysis +============================================================ + +๐Ÿ“ˆ Overall Stats: + โ€ข Total Functions: 3538 + โ€ข Average Complexity: 1.27 + โ€ข Total Complexity: 4478 + +๐Ÿ” Top 10 Most Complex Functions: +------------------------------------------------------------ + โ€ข jsonable_encoder 16 | fastapi/encoders.py + โ€ข get_openapi 13 | fastapi/openapi/utils.py + โ€ข __init__ 12 | fastapi/routing.py + โ€ข solve_dependencies 10 | fastapi/dependencies/utils.py + โ€ข main 9 | scripts/notify_translations.py + โ€ข analyze_param 9 | fastapi/dependencies/utils.py + โ€ข __init__ 8 | fastapi/params.py + โ€ข __init__ 8 | fastapi/params.py + โ€ข main 7 | scripts/deploy_docs_status.py + โ€ข create_model_field 7 | fastapi/utils.py + +๐Ÿ“‰ Complexity Distribution: + โ€ข Low (1-5): 3514 functions (99.3%) + โ€ข Medium (6-10): 21 functions (0.6%) + โ€ข High (>10): 3 functions (0.1%) +``` + +## Complexity Metrics + +The analyzer tracks several key metrics: + +### Complexity Sources + +- If statements (+1) +- Elif statements (+1 each) +- Else statements (+1) +- Loops (while/for) (+1) +- Try-except blocks (+1 per except) + +### Complexity Categories + +- Low (1-5): Generally clean and maintainable code +- Medium (6-10): Moderate complexity, may need attention +- High (>10): Complex code that should be reviewed + +## Running the Analysis + +```bash +# Install Codegen +pip install codegen + +# Run the analysis +python run.py +``` + +## Example Output + +``` +๐Ÿ“Š Cyclomatic Complexity Analysis +============================================================ + +๐Ÿ“ˆ Overall Stats: + โ€ข Total Functions: 150 + โ€ข Average Complexity: 3.45 + โ€ข Total Complexity: 518 + +๐Ÿ” Top 10 Most Complex Functions: +------------------------------------------------------------ + โ€ข validate_response 12 | ...api/endpoints/auth.py + โ€ข process_request 10 | ...core/middleware.py + โ€ข handle_exception 9 | ...utils/error_handlers.py + +๐Ÿ“‰ Complexity Distribution: + โ€ข Low (1-5): 105 functions (70.0%) + โ€ข Medium (6-10): 35 functions (23.3%) + โ€ข High (>10): 10 functions (6.7%) +``` + +## Learn More + +- [About Cyclomatic Complexity](https://en.wikipedia.org/wiki/Cyclomatic_complexity) +- [Codegen Documentation](https://docs.codegen.com) + +## Contributing + +Feel free to submit issues and enhancement requests! diff --git a/codegen-examples/examples/cyclomatic_complexity/run.py b/codegen-examples/examples/cyclomatic_complexity/run.py new file mode 100644 index 000000000..0b5943ad1 --- /dev/null +++ b/codegen-examples/examples/cyclomatic_complexity/run.py @@ -0,0 +1,88 @@ +import codegen +from codegen import Codebase +from codegen.sdk.enums import ProgrammingLanguage +from codegen.sdk.core.statements.for_loop_statement import ForLoopStatement +from codegen.sdk.core.statements.if_block_statement import IfBlockStatement +from codegen.sdk.core.statements.try_catch_statement import TryCatchStatement +from codegen.sdk.core.statements.while_statement import WhileStatement + + +@codegen.function("cyclomatic-complexity") +def run(codebase: Codebase): + def calculate_cyclomatic_complexity(code_block): + # Initialize cyclomatic complexity count + complexity = 1 # Start with one for the default path + + # Count decision points + for statement in code_block.statements: + if isinstance(statement, IfBlockStatement): + complexity += 1 + len(statement.elif_statements) # +1 for if, each elif adds another path + if statement.else_statement: + complexity += 1 + elif isinstance(statement, WhileStatement) or isinstance(statement, ForLoopStatement): + complexity += 1 # Loops introduce a new path + elif isinstance(statement, TryCatchStatement): + complexity += 1 # try-catch introduces a new path + # Count except blocks by counting nested code blocks after the first one (try block) + complexity += len(statement.nested_code_blocks) - 1 # -1 to exclude the try block itself + + return complexity + + # Initialize total complexity + total_complexity = 0 + # Count total functions + total_functions = 0 + # Store results for sorting + results = [] + + # Get all functions or methods + callables = codebase.functions + [m for c in codebase.classes for m in c.methods] + + # Analyze each function + for function in callables: + complexity = calculate_cyclomatic_complexity(function.code_block) + results.append((function.name, complexity, function.filepath)) + total_complexity += complexity + total_functions += 1 + + # Sort by complexity (highest first) + results.sort(key=lambda x: x[1], reverse=True) + + # Print summary + print("\n๐Ÿ“Š Cyclomatic Complexity Analysis") + print("=" * 60) + + if total_functions > 0: + average = total_complexity / total_functions + print("\n๐Ÿ“ˆ Overall Stats:") + print(f" โ€ข Total Functions: {total_functions}") + print(f" โ€ข Average Complexity: {average:.2f}") + print(f" โ€ข Total Complexity: {total_complexity}") + + print("\n๐Ÿ” Top 10 Most Complex Functions:") + print("-" * 60) + for name, complexity, filepath in results[:10]: + # Truncate filepath if too long + if len(filepath) > 40: + filepath = "..." + filepath[-37:] + print(f" โ€ข {name:<30} {complexity:>3} | {filepath}") + + # Complexity distribution + low = sum(1 for _, c, _ in results if c <= 5) + medium = sum(1 for _, c, _ in results if 5 < c <= 10) + high = sum(1 for _, c, _ in results if c > 10) + + print("\n๐Ÿ“‰ Complexity Distribution:") + print(f" โ€ข Low (1-5): {low} functions ({low / total_functions * 100:.1f}%)") + print(f" โ€ข Medium (6-10): {medium} functions ({medium / total_functions * 100:.1f}%)") + print(f" โ€ข High (>10): {high} functions ({high / total_functions * 100:.1f}%)") + else: + print("โŒ No functions found in the codebase to analyze.") + + +if __name__ == "__main__": + print("๐Ÿ” Analyzing codebase...") + codebase = Codebase.from_repo("fastapi/fastapi", commit="887270ff8a54bb58c406b0651678a27589793d2f", programming_language=ProgrammingLanguage.PYTHON) + + print("Running analysis...") + run(codebase) diff --git a/codegen-examples/examples/delete_dead_code/README.md b/codegen-examples/examples/delete_dead_code/README.md new file mode 100644 index 000000000..3792cbd26 --- /dev/null +++ b/codegen-examples/examples/delete_dead_code/README.md @@ -0,0 +1,82 @@ +# Delete Dead Code + +This example demonstrates how to identify and remove dead code from a codebase using Codegen. The script efficiently cleans up unused functions and variables, helping maintain a lean and efficient codebase. + +> [!NOTE] +> Dead code refers to code that is not being used or referenced anywhere in your codebase. However, some code might appear unused but should not be deleted, such as test files, functions with decorators, public API endpoints, and event handlers. + +## How the Dead Code Removal Script Works + +The script (`run.py`) performs the dead code removal in several key steps: + +1. **Codebase Loading** + + ```python + codebase = Codebase.from_repo("tox-dev/tox", programming_language=ProgrammingLanguage.PYTHON) + ``` + + - Loads a codebase using the `Codebase.from_repo` method + - This example uses the `tox-dev/tox` repository because it is mostly self-contained + +1. **Function Removal** + + ```python + for function in codebase.functions: + if "test" in function.file.filepath: + continue + if function.decorators: + continue + if not function.usages and not function.call_sites: + print(f"๐Ÿ—‘๏ธ Removing unused function: {function.name}") + function.remove() + ``` + + - Skips test files and decorated functions + - Removes functions with no usages or call sites + +1. **Variable Cleanup** + + ```python + for func in codebase.functions: + for var_assignments in func.code_block.local_var_assignments: + if not var_assignments.local_usages: + print(f"๐Ÿงน Removing unused variable: {var_assignments.name}") + var_assignments.remove() + ``` + + - Iterates through local variable assignments + - Removes variables with no local usages + +## Running the Script + +```bash +# Install Codegen +pip install codegen + +# Run the script +python run.py +``` + +## Example Output + +``` +๏ฟฝ Deleting dead code... + +๐Ÿ—‘๏ธ Removing unused function: _get_parser_doc +๐Ÿงน Removing unused variable: decoded +๐Ÿงน Removing unused variable: shebang_line +... +๐Ÿงน Removing unused variable: _ + +๐Ÿ”ง Total functions removed: 2 +๐Ÿ“ฆ Total variables removed: 240 +``` + +## Learn More + +- [Deleting Dead Code](https://docs.codegen.com/tutorials/deleting-dead-code) +- [Codegen Documentation](https://docs.codegen.com) + +## Contributing + +Feel free to submit issues and enhancement requests! diff --git a/codegen-examples/examples/delete_dead_code/run.py b/codegen-examples/examples/delete_dead_code/run.py new file mode 100644 index 000000000..888cc4012 --- /dev/null +++ b/codegen-examples/examples/delete_dead_code/run.py @@ -0,0 +1,44 @@ +import codegen +from codegen import Codebase +from codegen.sdk.enums import ProgrammingLanguage + + +@codegen.function("delete-dead-code") +def run(codebase: Codebase): + removed_functions_count = 0 + removed_variables_count = 0 + + for function in codebase.functions: + # Skip test files + if "test" in function.file.filepath: + continue + + # Skip decorated functions + if function.decorators: + continue + + # Check if the function has no usages and no call sites + if not function.usages and not function.call_sites: + print(f"๐Ÿ—‘๏ธ Removing unused function: {function.name}") + function.remove() + removed_functions_count += 1 + + # Clean up unused variables + for func in codebase.functions: + for var_assignments in func.code_block.local_var_assignments: + if not var_assignments.local_usages: + print(f"๐Ÿงน Removing unused variable: {var_assignments.name}") + var_assignments.remove() + removed_variables_count += 1 + + print("\n") + print(f"๐Ÿ”ง Total functions removed: {removed_functions_count}") + print(f"๐Ÿ“ฆ Total variables removed: {removed_variables_count}") + + +if __name__ == "__main__": + print("๐Ÿ” Analyzing codebase...") + codebase = Codebase.from_repo("tox-dev/tox", programming_language=ProgrammingLanguage.PYTHON, commit="b588b696e0940c1813014b31b68d7660d8a1914f") + + print("๐Ÿšฎ Deleting dead code...") + run(codebase) diff --git a/codegen-examples/examples/document_functions/README.md b/codegen-examples/examples/document_functions/README.md new file mode 100644 index 000000000..38db37e56 --- /dev/null +++ b/codegen-examples/examples/document_functions/README.md @@ -0,0 +1,100 @@ +# Automated Function Documentation Generator + +This example demonstrates how to use Codegen to automatically generate comprehensive docstrings for functions by analyzing their dependencies and usage patterns within a codebase. + +## Overview + +The script uses Codegen's symbol analysis capabilities to: + +1. Identify functions without docstrings +1. Analyze their dependencies and usages up to N degrees deep +1. Generate contextually aware docstrings using AI + +## Key Features + +### Recursive Context Collection + +The script recursively collects both dependencies and usages to provide comprehensive context for docstring generation: + +```python +def get_extended_context(symbol: Symbol, degree: int) -> tuple[set[Symbol], set[Symbol]]: + """Recursively collect dependencies and usages up to the specified degree.""" + dependencies = set() + usages = set() + + if degree > 0: + for dep in symbol.dependencies: + if isinstance(dep, Import): + dep = hop_through_imports(dep) + if isinstance(dep, Symbol): + dependencies.add(dep) + # Recursively collect nested context + dep_deps, dep_usages = get_extended_context(dep, degree - 1) + dependencies.update(dep_deps) + usages.update(dep_usages) +``` + +### Import Resolution + +The script intelligently resolves imports to find the actual symbol definitions: + +```python +def hop_through_imports(imp: Import) -> Symbol | ExternalModule: + """Finds the root symbol for an import""" + if isinstance(imp.imported_symbol, Import): + return hop_through_imports(imp.imported_symbol) + return imp.imported_symbol +``` + +## Running the Conversion + +```bash +# Install Codegen +pip install codegen + +# Run the conversion +python run.py +``` + +The script will: + +- Process each function in the codebase +- Skip functions that already have docstrings +- Generate contextually aware docstrings for undocumented functions +- Commit changes incrementally for safe early termination + +## Example Output + +The script provides detailed progress information: + +``` +[1/150] Skipping my_function - already has docstring +[2/150] Generating docstring for process_data at src/utils.py + โœ“ Generated docstring +[3/150] Generating docstring for validate_input at src/validation.py + โœ— Failed to generate docstring +``` + +## Features + +- **Intelligent Context Collection**: Analyzes both dependencies and usages to understand function purpose +- **Import Resolution**: Follows import chains to find actual symbol definitions +- **Incremental Commits**: Saves progress after each function for safe interruption +- **Progress Tracking**: Detailed logging of processing status +- **Existing Docstring Preservation**: Skips functions that are already documented + +## Use Cases + +- Documenting legacy codebases +- Maintaining documentation standards in large projects +- Onboarding new team members with better code documentation +- Preparing codebases for public release + +## Learn More + +- [Creating Documentation](https://docs.codegen.com/tutorials/creating-documentation#creating-documentation) +- [Codegen Documentation](https://docs.codegen.com) + +## Contributing + +Feel free to submit issues and enhancement requests! diff --git a/codegen-examples/examples/document_functions/run.py b/codegen-examples/examples/document_functions/run.py new file mode 100644 index 000000000..3cc991218 --- /dev/null +++ b/codegen-examples/examples/document_functions/run.py @@ -0,0 +1,119 @@ +import codegen +from codegen import Codebase +from codegen.sdk.core.external_module import ExternalModule +from codegen.sdk.core.import_resolution import Import +from codegen.sdk.core.symbol import Symbol + + +def hop_through_imports(imp: Import) -> Symbol | ExternalModule: + """Finds the root symbol for an import""" + if isinstance(imp.imported_symbol, Import): + return hop_through_imports(imp.imported_symbol) + return imp.imported_symbol + + +def get_extended_context(symbol: Symbol, degree: int) -> tuple[set[Symbol], set[Symbol]]: + """Recursively collect dependencies and usages up to the specified degree. + + Args: + symbol: The symbol to collect context for + degree: How many levels deep to collect dependencies and usages + + Returns: + A tuple of (dependencies, usages) where each is a set of related Symbol objects + """ + dependencies = set() + usages = set() + + if degree > 0: + # Collect direct dependencies + for dep in symbol.dependencies: + # Hop through imports to find the root symbol + if isinstance(dep, Import): + dep = hop_through_imports(dep) + + if isinstance(dep, Symbol) and dep not in dependencies: + dependencies.add(dep) + dep_deps, dep_usages = get_extended_context(dep, degree - 1) + dependencies.update(dep_deps) + usages.update(dep_usages) + + # Collect usages in the current symbol + for usage in symbol.usages: + usage_symbol = usage.usage_symbol + # Hop through imports for usage symbols too + if isinstance(usage_symbol, Import): + usage_symbol = hop_through_imports(usage_symbol) + + if isinstance(usage_symbol, Symbol) and usage_symbol not in usages: + usages.add(usage_symbol) + usage_deps, usage_usages = get_extended_context(usage_symbol, degree - 1) + dependencies.update(usage_deps) + usages.update(usage_usages) + + return dependencies, usages + + +@codegen.function("document-functions") +def run(codebase: Codebase): + # Define the maximum degree of dependencies and usages to consider for context + N_DEGREE = 2 + + # Filter out test and tutorial functions first + functions = [f for f in codebase.functions if not any(pattern in f.name.lower() for pattern in ["test", "tutorial"]) and not any(pattern in f.filepath.lower() for pattern in ["test", "tutorial"])] + + # Track progress for user feedback + total_functions = len(functions) + processed = 0 + + print(f"Found {total_functions} functions to process (excluding tests and tutorials)") + + for function in functions: + processed += 1 + + # Skip if already has docstring + if function.docstring: + print(f"[{processed}/{total_functions}] Skipping {function.name} - already has docstring") + continue + + print(f"[{processed}/{total_functions}] Generating docstring for {function.name} at {function.filepath}") + + # Collect context using N-degree dependencies and usages + dependencies, usages = get_extended_context(function, N_DEGREE) + + # Generate a docstring using the AI with the context + docstring = codebase.ai( + """ + Generate a docstring for this function using the provided context. + The context includes: + - dependencies: other symbols this function depends on + - usages: other symbols that use this function + """, + target=function, + # `codebase.ai` is smart about stringifying symbols + context={"dependencies": list(dependencies), "usages": list(usages)}, + ) + + # Set the generated docstring for the function + if docstring: + function.set_docstring(docstring) + print(" โœ“ Generated docstring") + else: + print(" โœ— Failed to generate docstring") + + # Commit after each function so work is saved incrementally + # This allows for: + # 1. Safe early termination - progress won't be lost + # 2. Immediate feedback - can check results while running + # 3. Smaller atomic changes - easier to review/revert if needed + codebase.commit() + + print(f"\nCompleted processing {total_functions} functions") + + +if __name__ == "__main__": + print("Parsing codebase...") + codebase = Codebase.from_repo("fastapi/fastapi", commit="887270ff8a54bb58c406b0651678a27589793d2f") + + print("Running function...") + run(codebase) diff --git a/codegen-examples/examples/pr_review_bot/README.md b/codegen-examples/examples/pr_review_bot/README.md new file mode 100644 index 000000000..979750a29 --- /dev/null +++ b/codegen-examples/examples/pr_review_bot/README.md @@ -0,0 +1,129 @@ +# AI-Powered Pull Request Review Bot + +This example demonstrates how to use Codegen to create an intelligent PR review bot that analyzes code changes and their dependencies to provide comprehensive code reviews. The bot uses GPT-4 to generate contextual feedback based on modified code and its relationships. + +> [!NOTE] +> This codemod helps development teams by providing automated, context-aware code reviews that consider both direct and indirect code dependencies. + +## How the PR Review Bot Works + +The script analyzes pull requests in several key steps: + +1. **Symbol Analysis** + + ```python + modified_symbols = codebase.get_modified_symbols_in_pr(pr_number) + for symbol in modified_symbols: + deps = codebase.get_symbol_dependencies(symbol, max_depth=2) + rev_deps = codebase.get_symbol_dependents(symbol, max_depth=2) + ``` + + - Identifies modified symbols in the PR + - Analyzes dependencies up to 2 levels deep + - Tracks reverse dependencies (symbols that depend on changes) + +1. **Context Building** + + ```python + context = {"pr_title": pr.title, "pr_body": pr.body, "modified_symbols": [...], "context_symbols": [...]} + ``` + + - Gathers PR metadata + - Collects modified code content + - Includes relevant dependency context + +1. **AI Review Generation** + + ```python + review = codebase.ai_client.llm_query_with_retry(messages=[...], model="gpt-4", max_tokens=2000) + ``` + + - Uses GPT-4 for analysis + - Generates comprehensive review feedback + - Considers full context of changes + +## Why This Makes Code Review Better + +1. **Context-Aware Analysis** + + - Understands code dependencies + - Considers impact of changes + - Reviews code in proper context + +1. **Comprehensive Review** + + - Analyzes direct modifications + - Evaluates dependency impact + - Suggests improvements + +1. **Consistent Feedback** + + - Structured review format + - Thorough analysis every time + - Scalable review process + +## Review Output Format + +The bot provides structured feedback including: + +``` +1. Overall Assessment + - High-level review of changes + - Impact analysis + +2. Specific Code Feedback + - Detailed code comments + - Style suggestions + - Best practices + +3. Potential Issues + - Security concerns + - Performance impacts + - Edge cases + +4. Dependency Analysis + - Impact on dependent code + - Breaking changes + - Integration considerations + +``` + +## Key Benefits to Note + +1. **Better Code Quality** + + - Thorough code analysis + - Consistent review standards + - Early issue detection + +1. **Time Savings** + + - Automated initial review + - Quick feedback loop + - Reduced review burden + +1. **Knowledge Sharing** + + - Educational feedback + - Best practice suggestions + - Team learning + +## Configuration Options + +You can customize the review by: + +- Adjusting dependency depth +- Modifying the AI prompt +- Changing the review focus areas +- Tuning the GPT-4 parameters + +## Learn More + +- [Codegen Documentation](https://docs.codegen.com) +- [OpenAI API Documentation](https://platform.openai.com/docs/api-reference) +- [GitHub API Documentation](https://docs.github.com/en/rest) +- [Codegen llm integration](https://docs.codegen.com/building-with-codegen/calling-out-to-llms) + +## Contributing + +Feel free to submit issues and enhancement requests! Contributions to improve the review bot's capabilities are welcome. diff --git a/codegen-examples/examples/pr_review_bot/run.py b/codegen-examples/examples/pr_review_bot/run.py new file mode 100644 index 000000000..c3cd3206e --- /dev/null +++ b/codegen-examples/examples/pr_review_bot/run.py @@ -0,0 +1,92 @@ +import codegen +from codegen import Codebase +from codegen.sdk.enums import ProgrammingLanguage +from codegen.sdk.codebase.config import CodebaseConfig, GSFeatureFlags, Secrets +import json + +github_token = "Your github token" +open_ai_key = "your open ai key" +pr_number = 0 # Your PR number must be an integer + +codegen.function("pr-review-bot") + + +def run(codebase: Codebase): + context_symbols = set() + + modified_symbols = codebase.get_modified_symbols_in_pr(pr_number) + for symbol in modified_symbols: + # Get direct dependencies + deps = codebase.get_symbol_dependencies(symbol, max_depth=2) + context_symbols.update(deps) + + # Get reverse dependencies (symbols that depend on this one) + rev_deps = codebase.get_symbol_dependents(symbol, max_depth=2) + context_symbols.update(rev_deps) + + # Prepare context for LLM + context = { + "modified_symbols": [ + { + "name": symbol.name, + "type": symbol.symbol_type.value, + "filepath": symbol.filepath, + "content": symbol.content, + } + for symbol in modified_symbols + ], + "context_symbols": [ + { + "name": symbol.name, + "type": symbol.symbol_type.value, + "filepath": symbol.filepath, + "content": symbol.content, + } + for symbol in context_symbols + ], + } + + system_prompt = """ + You are a helpful assistant that reviews pull requests and provides feedback on the code. + """ + # Generate review using AI + prompt = f"""Please review this pull request based on the following context: + +Title: {context["pr_title"]} +Description: {context["pr_body"]} + +Modified Symbols: +{json.dumps(context["modified_symbols"], indent=2)} + +Related Context (Dependencies): +{json.dumps(context["context_symbols"], indent=2)} + +Please provide a thorough code review that includes: +1. Overall assessment +2. Specific feedback on modified code +3. Potential issues or improvements +4. Impact on dependencies +5. Suggestions for testing +""" + + review = codebase.ai_client.llm_query_with_retry(messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}], model="gpt-4", max_tokens=2000, temperature=0.7) + return review + + +if __name__ == "__main__": + print("Starting codebase analysis...") + codebase = Codebase.from_repo( + "getsentry/sentry", + shallow=False, + programming_language=ProgrammingLanguage.PYTHON, + config=CodebaseConfig( + secrets=Secrets(openai_key=open_ai_key, github_api_key=github_token), + feature_flags=GSFeatureFlags( + sync_enabled=True, + ), + ), + ) + review = run(codebase) + print(review) + + print("Codebase analysis complete.")