diff --git a/kaizen/helpers/output.py b/kaizen/helpers/output.py index 7c5a281e..b53078ac 100644 --- a/kaizen/helpers/output.py +++ b/kaizen/helpers/output.py @@ -13,8 +13,10 @@ PR_COLLAPSIBLE_TEMPLATE = """
-[{confidence}] -> {comment} -**Fix:** {solution} +[{confidence}] -> {comment} | \n +**Potential Solution:**: {solution} + + {file_name} | {start_line} - {end_line}
diff --git a/kaizen/llms/provider.py b/kaizen/llms/provider.py index 8da1788b..b5ba3b64 100644 --- a/kaizen/llms/provider.py +++ b/kaizen/llms/provider.py @@ -59,6 +59,7 @@ def chat_completion(self, prompt, user: str = None): return response["choices"][0]["message"]["content"], response["usage"] def is_inside_token_limit(self, PROMPT, percentage=0.7): + # TODO: Also include system prompt messages = [{"user": "role", "content": PROMPT}] if ( litellm.token_counter(model=self.model, messages=messages) diff --git a/kaizen/reviewer/code_review.py b/kaizen/reviewer/code_review.py index 9e330bdc..2c9ddb8f 100644 --- a/kaizen/reviewer/code_review.py +++ b/kaizen/reviewer/code_review.py @@ -164,14 +164,9 @@ def create_pr_review_text(self, topics): file_name=review.get("file_name", "NA"), ) markdown_output += ct + "\n" + if high_ranked_issues > 0: - markdown_output = ( - "❗ This review needs attention. 🚨\n\nHere are some feedback:\n\n" - + markdown_output - ) + status_msg = "❗ Attention Required: This PR has potential issues. 🚨\n\n" else: - markdown_output = ( - "✅ This is a good review! 👍\n\nHere are some feedback:\n\n" - + markdown_output - ) - return markdown_title + markdown_output + status_msg = "✅ All Clear: This PR is ready to merge! 👍\n\n" + return markdown_title + status_msg + markdown_output