From b21dd041bfe836fc5e5cd409cd7a4f9d4bb42478 Mon Sep 17 00:00:00 2001 From: Saurav Panda Date: Wed, 19 Jun 2024 14:21:15 -0400 Subject: [PATCH] updated how the total_token is handled --- kaizen/generator/ui.py | 5 ++++- kaizen/reviewer/code_review.py | 12 +++++++++--- kaizen/reviewer/work_summarizer.py | 5 ++++- pyproject.toml | 2 +- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/kaizen/generator/ui.py b/kaizen/generator/ui.py index 0c252f04..4adf751d 100644 --- a/kaizen/generator/ui.py +++ b/kaizen/generator/ui.py @@ -81,7 +81,10 @@ def generate_module_tests(self, web_content: str, test_modules: dict, web_url: s This method generates UI testing points for all modules. """ ui_tests = test_modules - total_usage = None + total_usage = { + "prompt_tokens": 0, + "completion_tokens": 0 + } for module in ui_tests: for test in module["tests"]: test_description = test["test_description"] diff --git a/kaizen/reviewer/code_review.py b/kaizen/reviewer/code_review.py index 9700d7d1..f210e902 100644 --- a/kaizen/reviewer/code_review.py +++ b/kaizen/reviewer/code_review.py @@ -63,7 +63,10 @@ def review_pull_request( PULL_REQUEST_DESC=pull_request_desc, CODE_DIFF=diff_text, ) - total_usage = None + total_usage = { + "prompt_tokens": 0, + "completion_tokens": 0 + } if self.provider.is_inside_token_limit(PROMPT=prompt): self.logger.debug("Processing Directly from Diff") resp, usage = self.provider.chat_completion(prompt, user=user) @@ -86,7 +89,7 @@ def review_pull_request( PULL_REQUEST_DESC=pull_request_desc, FILE_PATCH=patch_details, ) - if self.provider.is_inside_token_limit(PROMPT=prompt): + if self.provider.is_inside_token_limit(PROMPT=prompt, percentage=85): # TODO: Chunk this big files and process them continue resp, usage = self.provider.chat_completion(prompt, user=user) @@ -124,7 +127,10 @@ def generate_pull_request_desc( CODE_DIFF=diff_text, ) - total_usage = None + total_usage = { + "prompt_tokens": 0, + "completion_tokens": 0 + } if self.provider.is_inside_token_limit(PROMPT=prompt): self.logger.debug("Processing Directly from Diff") resp, usage = self.provider.chat_completion(prompt, user=user) diff --git a/kaizen/reviewer/work_summarizer.py b/kaizen/reviewer/work_summarizer.py index 4b0670f1..7270f161 100644 --- a/kaizen/reviewer/work_summarizer.py +++ b/kaizen/reviewer/work_summarizer.py @@ -22,7 +22,10 @@ def generate_work_summaries( summaries = [] # Try to merge the files untill LLM can process the response combined_diff_data = "" - total_usage = None + total_usage = { + "prompt_tokens": 0, + "completion_tokens": 0 + } for file_dict in diff_file_data: temp_prompt = combined_diff_data temp_prompt += f"""\n---->\nFile Name: {file_dict["file"]}\nPatch: {file_dict["patch"]}\n Status: {file_dict["status"]}""" diff --git a/pyproject.toml b/pyproject.toml index c6fb82d2..650fb430 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "kaizen-cloudcode" -version = "0.2.5" +version = "0.2.6" description = "An intelligent coding companion that accelerates your development workflow by providing efficient assistance, enabling you to craft high-quality code more rapidly." authors = ["Saurav Panda "] license = "Apache2.0"