diff --git a/agents/__init__.py b/agents/__init__.py deleted file mode 100644 index 4eb3949..0000000 --- a/agents/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from main import * - - diff --git a/agents/main/__init__.py b/agents/main/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/agents/main/control_center/__init__.py b/agents/main/control_center/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/agents/main/control_center/browser/__init__.py b/agents/main/control_center/browser/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/agents/main/control_center/browser/browser.py b/agents/main/control_center/browser/browser.py deleted file mode 100644 index 4a5f4b8..0000000 --- a/agents/main/control_center/browser/browser.py +++ /dev/null @@ -1,40 +0,0 @@ -import requests - -class Browser: - def __init__(self, computer): - self.computer = computer - - def search(self, query): - """ - Searches the web for the specified query and returns the results - """ - response = requests.get( - f'{self.computer.api_base.strip("/")}/browser/search', - params={"query": query}, - ) - return response.json()["result"] - - class Search: - def __init__(self, browser, query): - self.browser = browser - self.query = query - self.results = self.browser.search(query) - - def filter_by_keyword(self, keyword): - """ - Filters the search results to only include items containing the specified keyword. - """ - filtered_results = [] - for result in self.results: - if keyword.lower() in result['title'].lower(): - filtered_results.append(result) - return filtered_results - -# Example usage -if __name__ == "__main__": - # Assuming `computer` is an object with an `api_base` attribute - computer = type('Computer', (object,), {'api_base': 'https://example.com'})() - browser = Browser(computer) - search = Browser.Search(browser, "Python programming") - filtered_results = search.filter_by_keyword("Python") - print(filtered_results) diff --git a/agents/main/control_center/control_board.py b/agents/main/control_center/control_board.py deleted file mode 100644 index dfe8fcb..0000000 --- a/agents/main/control_center/control_board.py +++ /dev/null @@ -1,30 +0,0 @@ -# # controlboard.py - -from llms import LLM_Switcher, ModelInterface - -# Example models -model1 = ModelInterface("Model1", "https://api.model1.com") -model2 = ModelInterface("Model2", "https://api.model2.com") -model3 = ModelInterface("Model3", "https://api.model3.com") - -# List of models to switch between -models = [model1, model2, model3] - -def control_board(query): - """ - Orchestrates the process of handling user queries by using the LLM switcher. - """ - switcher = LLM_Switcher(models) - response = switcher.switch_and_request(query) - if response: - # Process the response as needed - # For demonstration, we'll just return the response as is - return response - else: - return {"error": "No model could handle the request."} - -if __name__ == "__main__": - # Example usage - query = "What is the meaning of life?" - result = control_board(query) - print(result) diff --git a/agents/main/llms/__init__.py b/agents/main/llms/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/agents/main/llms/main.py b/agents/main/llms/main.py deleted file mode 100644 index 5745c34..0000000 --- a/agents/main/llms/main.py +++ /dev/null @@ -1,51 +0,0 @@ -## Here you will attach or call your own model.. you can bring your own model or pay for SLAM-1 which was designed to work with Stable Agents - -import requests -import time - -class ModelInterface: - def __init__(self, name, endpoint): - self.name = name - self.endpoint = endpoint - - def request(self, query, timeout=None, max_tokens=None): - # Placeholder for the actual request logic - # This example simulates a request with a delay and a response - time.sleep(timeout) # Simulate a delay - response = {"tokens": 100, "result": "This is a placeholder response."} - if max_tokens and response["tokens"] > max_tokens: - raise Exception("Token count exceeded.") - return response - -# Example models -model1 = ModelInterface("Model1", "https://api.model1.com") -model2 = ModelInterface("Model2", "https://api.model2.com") -model3 = ModelInterface("Model3", "https://api.model3.com") - -# List of models to switch between -models = [model1, model2, model3] - -class LLM_Switcher: - def __init__(self, models): - self.models = models - - def switch_and_request(self, query, timeout=5, max_tokens=100): - for model in self.models: - try: - response = model.request(query, timeout=timeout, max_tokens=max_tokens) - if response: - return response - except TimeoutError: - print(f"{model.name} timed out. Switching to the next model.") - except Exception as e: - print(f"Error with {model.name}: {e}. Switching to the next model.") - return None - -if __name__ == "__main__": - switcher = LLM_Switcher(models) - query = "What is the meaning of life?" - response = switcher.switch_and_request(query) - if response: - print("Response received:", response) - else: - print("No model could handle the request.") diff --git a/agents/main/utils/__init__.py b/agents/main/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/agents/server.py b/agents/server.py deleted file mode 100644 index 2624dac..0000000 --- a/agents/server.py +++ /dev/null @@ -1,19 +0,0 @@ -import uvicorn -import json -from typing import Generator, Union - -from fastapi import Body, FastAPI, Request, Response, Websocket - - -def server(agents, host="0.0.0.0", port=8000): - app = FastAPI() - router = APIRouter() - - @app.post("/test/agents") - async def action(): - return {"message": "test"} - - @app. - - -# uvicorn.run(app, host=host, port=port)