Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix for Tokens Limit/Context issue | GPT4FREE | Auto Install Windows | Folders collapse/expand Monaco Editor #539

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,5 @@ duckduckgo-search
orjson
gevent
gevent-websocket
g4f[all]
nodriver
5 changes: 3 additions & 2 deletions sample.config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,6 @@ OPENAI = "https://api.openai.com/v1"
LOG_REST_API = "true"
LOG_PROMPTS = "false"

[TIMEOUT]
INFERENCE = 60
[CUSTOM]
BLACKLIST_FOLDER = "node_modules, libs, package-lock.json, bun.lockb"
TIMEOUT_INFERENCE = 60
25 changes: 14 additions & 11 deletions src/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,14 +99,17 @@ def get_logs_dir(self):
def get_repos_dir(self):
return self.config["STORAGE"]["REPOS_DIR"]

def get_blacklist_dir(self):
return self.config["CUSTOM"]["BLACKLIST_FOLDER"]

def get_timeout_inference(self):
return self.config["CUSTOM"]["TIMEOUT_INFERENCE"]

def get_logging_rest_api(self):
return self.config["LOGGING"]["LOG_REST_API"] == "true"

def get_logging_prompts(self):
return self.config["LOGGING"]["LOG_PROMPTS"] == "true"

def get_timeout_inference(self):
return self.config["TIMEOUT"]["INFERENCE"]

def set_bing_api_key(self, key):
self.config["API_KEYS"]["BING"] = key
Expand Down Expand Up @@ -168,8 +171,12 @@ def set_logging_prompts(self, value):
self.config["LOGGING"]["LOG_PROMPTS"] = "true" if value else "false"
self.save_config()

def set_blacklist_folder(self, value):
self.config["CUSTOM"]["BLACKLIST_FOLDER"] = value
self.save_config()

def set_timeout_inference(self, value):
self.config["TIMEOUT"]["INFERENCE"] = value
self.config["CUSTOM"]["TIMEOUT_INFERENCE"] = value
self.save_config()

def save_config(self):
Expand All @@ -179,10 +186,6 @@ def save_config(self):
def update_config(self, data):
for key, value in data.items():
if key in self.config:
with open("config.toml", "r+") as f:
config = toml.load(f)
for sub_key, sub_value in value.items():
self.config[key][sub_key] = sub_value
config[key][sub_key] = sub_value
f.seek(0)
toml.dump(config, f)
for sub_key, sub_value in value.items():
self.config[key][sub_key] = sub_value
self.save_config()
6 changes: 6 additions & 0 deletions src/filesystem/read_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,20 @@ class ReadCode:
def __init__(self, project_name: str):
config = Config()
project_path = config.get_projects_dir()
blacklist_dir = config.get_blacklist_dir()
self.directory_path = os.path.join(project_path, project_name.lower().replace(" ", "-"))
self.blacklist_dirs = [dir.strip() for dir in blacklist_dir.split(', ')]

def read_directory(self):
files_list = []

for root, _dirs, files in os.walk(self.directory_path):
for file in files:
try:
file_path = os.path.join(root, file)
if any(blacklist_dir in file_path for blacklist_dir in self.blacklist_dirs):
print(f"SKIPPED FILE: {file_path}")
continue
with open(file_path, 'r') as file_content:
files_list.append({"filename": file_path, "code": file_content.read()})
except:
Expand Down
24 changes: 24 additions & 0 deletions src/llm/g4f_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from g4f.client import Client as g4f
import asyncio

from src.config import Config

# adding g4f- in and removing it while calling inference is needed because if i put the same model name in llm.py as an other model it won't work
class GPT4FREE:
def __init__(self):
config = Config()
self.client = g4f()

def inference(self, model_id: str, prompt: str) -> str:
model_id = model_id.replace("g4f-", "")
chat_completion = self.client.chat.completions.create(
model=model_id,
messages=[
{
"role": "user",
"content": prompt.strip(),
}
],
temperature=0
)
return chat_completion.choices[0].message.content
31 changes: 31 additions & 0 deletions src/llm/llm.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
import sys

import tiktoken
import asyncio
from asyncio import WindowsSelectorEventLoopPolicy
from typing import List, Tuple

from src.socket_instance import emit_agent
from .ollama_client import Ollama
from .claude_client import Claude
from .g4f_client import GPT4FREE
from .openai_client import OpenAi
from .gemini_client import Gemini
from .mistral_client import MistralAi
Expand All @@ -19,17 +22,35 @@
TIKTOKEN_ENC = tiktoken.get_encoding("cl100k_base")

ollama = Ollama()
gpt4f = GPT4FREE()
logger = Logger()
agentState = AgentState()
config = Config()

# asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())

class LLM:
def __init__(self, model_id: str = None):
self.model_id = model_id
self.log_prompts = config.get_logging_prompts()
self.timeout_inference = config.get_timeout_inference()
self.models = {
"GPT4FREE": [
("Free GPT-4 Turbo", "g4f-gpt-4-turbo"),
("Free GPT-4", "g4f-gpt-4"),
("Free GPT-3.5 Turbo", "g4f-gpt-3.5-turbo-16k"),
("Free GPT-3.5", "g4f-gpt-3.5-long"),
("Free Llama3 70b", "g4f-llama3-70b"),
("Free Llama3 8b", "g4f-llama3-8b"),
("Free Llama3 70b Instruct", "g4f-llama3-70b-instruct"),
("Free Llama3 8b Instruct", "g4f-llama3-8b-instruct"),
("Free Mixtral 8x7B", "g4f-mixtral-8x7b"),
("Free Gemini", "g4f-gemini"),
("Free Gemini Pro", "g4f-gemini-pro"),
("Free Claude 3 Sonnet", "g4f-claude-3-sonnet"),
("Free Claude 3 Opus", "g4f-claude-3-opus"),
("Free Openchat 3.5", "g4f-openchat_3.5"),
],
"CLAUDE": [
("Claude 3 Opus", "claude-3-opus-20240229"),
("Claude 3 Sonnet", "claude-3-sonnet-20240229"),
Expand Down Expand Up @@ -83,6 +104,14 @@ def update_global_token_usage(string: str, project_name: str):

def inference(self, prompt: str, project_name: str) -> str:
self.update_global_token_usage(prompt, project_name)
if sys.platform == 'win32':
try:
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
except ImportError:
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
print("WindowsSelectorEventLoopPolicy not available, using default event loop policy.")
else:
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())

model_enum, model_name = self.model_enum(self.model_id)

Expand All @@ -92,6 +121,7 @@ def inference(self, prompt: str, project_name: str) -> str:

model_mapping = {
"OLLAMA": ollama,
"GPT4FREE": gpt4f,
"CLAUDE": Claude(),
"OPENAI": OpenAi(),
"GOOGLE": Gemini(),
Expand Down Expand Up @@ -143,5 +173,6 @@ def inference(self, prompt: str, project_name: str) -> str:
logger.debug(f"Response ({model}): --> {response}")

self.update_global_token_usage(response, project_name)
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())

return response
58 changes: 58 additions & 0 deletions start.cmd
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
@echo off

rem Check if python is installed
python --version | findstr /R "Python 3\.[1-9][0-9]*\." >nul
if %errorlevel% neq 0 (
echo Python is not installed, downloading Python 3.10.11...
PowerShell.exe -Command "irm https://www.python.org/ftp/python/3.10.11/python-3.10.11-amd64.exe -OutFile python-3.10.11-amd64.exe"
echo Download of Python 3.10.11 completed.

echo Installing Python 3.10.11...
python-3.10.11-amd64.exe /quiet InstallAllUsers=1 InstallLauncherAllUsers=1 PrependPath=1 Include_test=0
echo Python 3.10.11 has been installed successfully.
) else (
echo Python already installed.
)

where bun >nul 2>nul
if %errorlevel% neq 0 (
echo Installing Bun. Accept Administrator request
PowerShell.exe -Command "Start-Process PowerShell -Verb RunAs -ArgumentList '-Command', 'irm bun.sh/install.ps1 | iex' -Wait"
echo Bun is installed.
) else (
echo Bun is already installed.
)

where uv >nul 2>nul
if %errorlevel% neq 0 (
echo Installing Uv. Accept Administrator request
PowerShell.exe -Command "Start-Process PowerShell -Verb RunAs -ArgumentList '-Command', 'irm https://astral.sh/uv/install.ps1 | iex' -Wait"
echo Uv is installed.
) else (
echo Uv is already installed.
)

rem Check if the virtual environment exists
if not exist .venv (
echo Creating virtual environment...
uv venv
)

rem Activate the virtual environment
echo Activating virtual environment...
start cmd /k ".venv\Scripts\activate & echo Installing Python dependencies... & uv pip install -r requirements.txt & playwright install & echo Starting AI server... & python devika.py"

rem Navigate to the UI directory
cd ui/

rem Install frontend dependencies
echo Installing frontend dependencies...
bun install

rem Launch the UI
echo Launching UI...
bun run start

rem Deactivate the virtual environment
echo Deactivating virtual environment...
deactivate
11 changes: 11 additions & 0 deletions ui/src/app.pcss
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,17 @@
body {
@apply bg-background text-foreground;
}

.align-container {
display: flex;
justify-content: space-between;
}

.smooth-anim {
transition-duration: 0.2s;
opacity: 1;
transform: translateY(0px);
}

/* Styling for scrollbar */

Expand Down