Skip to content
Merged
13 changes: 4 additions & 9 deletions codeflash/api/aiservice.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import platform
import time
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast

import requests
Expand All @@ -24,6 +23,8 @@
from codeflash.version import __version__ as codeflash_version

if TYPE_CHECKING:
from pathlib import Path

from codeflash.discovery.functions_to_optimize import FunctionToOptimize
from codeflash.models.ExperimentMetadata import ExperimentMetadata
from codeflash.models.models import AIServiceRefinerRequest
Expand Down Expand Up @@ -557,7 +558,6 @@ def get_optimization_review(
function_trace_id: str,
coverage_message: str,
replay_tests: str,
root_dir: Path,
concolic_tests: str, # noqa: ARG002
calling_fn_details: str,
) -> str:
Expand All @@ -583,18 +583,13 @@ def get_optimization_review(
"""
diff_str = "\n".join(
[
unified_diff_strings(
code1=original_code[p],
code2=new_code[p],
fromfile=Path(p).relative_to(root_dir).as_posix(),
tofile=Path(p).relative_to(root_dir).as_posix(),
)
unified_diff_strings(code1=original_code[p], code2=new_code[p])
for p in original_code
if not is_zero_diff(original_code[p], new_code[p])
]
)
code_diff = f"```diff\n{diff_str}\n```"
logger.info("!lsp|Computing Optimization Review…")
logger.info("loading|Reviewing Optimization…")
payload = {
"code_diff": code_diff,
"explanation": explanation.raw_explanation_message,
Expand Down
1 change: 1 addition & 0 deletions codeflash/lsp/features/perform_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,4 +134,5 @@ def sync_perform_optimization(server: CodeflashLanguageServer, cancel_event: thr
"patch_file": str(patch_path),
"task_id": params.task_id,
"explanation": best_optimization.explanation_v2,
"optimizationReview": function_optimizer.optimization_review.capitalize(),
}
21 changes: 11 additions & 10 deletions codeflash/optimization/function_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ def __init__(
self.executor = concurrent.futures.ThreadPoolExecutor(
max_workers=n_tests + 3 if self.experiment_id is None else n_tests + 4
)
self.optimization_review = ""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we including optimization review lavel to e2e tests?


def can_be_optimized(self) -> Result[tuple[bool, CodeOptimizationContext, dict[Path, str]], str]:
should_run_experiment = self.experiment_id is not None
Expand Down Expand Up @@ -1517,17 +1518,17 @@ def process_review(
raise_pr = not self.args.no_pr
staging_review = self.args.staging_review
opt_review_response = ""
# Skip optimization review for async functions for now
if (raise_pr or staging_review) and not self.function_to_optimize.is_async:
data["root_dir"] = git_root_dir()
try:
opt_review_response = self.aiservice_client.get_optimization_review(
**data, calling_fn_details=function_references
)
except Exception as e:
logger.debug(f"optimization review response failed, investigate {e}")
# Always set optimization_review in data (empty string for async functions)
# this will now run regardless of pr, staging review flags
try:
opt_review_response = self.aiservice_client.get_optimization_review(
**data, calling_fn_details=function_references
)
except Exception as e:
logger.debug(f"optimization review response failed, investigate {e}")
data["optimization_review"] = opt_review_response
self.optimization_review = opt_review_response
if raise_pr or staging_review:
data["root_dir"] = git_root_dir()
if raise_pr and not staging_review and opt_review_response != "low":
# Ensure root_dir is set for PR creation (needed for async functions that skip opt_review)
if "root_dir" not in data:
Expand Down
Loading