Skip to content

Commit

Permalink
tools for flow and general cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
kyegomez committed Nov 11, 2023
1 parent a610ff7 commit 0e335b1
Show file tree
Hide file tree
Showing 23 changed files with 185 additions and 78 deletions.
8 changes: 5 additions & 3 deletions docs/swarms/models/biogpt.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ Let's explore how to use the `BioGPT` class with different scenarios and applica
#### Example 1: Generating Biomedical Text

```python
from biogpt import BioGPT
from swarms.models import BioGPT

# Initialize the BioGPT model
biogpt = BioGPT()
Expand All @@ -81,7 +81,8 @@ print(generated_text)
#### Example 2: Extracting Features

```python
from biogpt import BioGPT
from swarms.models import BioGPT


# Initialize the BioGPT model
biogpt = BioGPT()
Expand All @@ -96,7 +97,8 @@ print(features)
#### Example 3: Using Beam Search Decoding

```python
from biogpt import BioGPT
from swarms.models import BioGPT


# Initialize the BioGPT model
biogpt = BioGPT()
Expand Down
49 changes: 0 additions & 49 deletions groupchat.py

This file was deleted.

Binary file removed images/10f498c2-e22a-4f7f-9e50-56bf1ef92629.png
Binary file not shown.
Binary file removed images/1c990ee0-ed68-4375-9731-9c9c25a72fac.png
Binary file not shown.
Binary file removed images/2570cc4b-fafe-4f41-8193-ea9b563156e4.png
Binary file not shown.
Binary file removed images/35661b4a-f230-47a1-91bf-f876935151ed.png
Binary file not shown.
Binary file removed images/4b2161eb-bc44-4ee9-b106-208408b81d42.png
Binary file not shown.
Binary file removed images/4e4ea9d1-e1e3-4609-a200-8d83b5912a44.png
Binary file not shown.
Binary file removed images/5081867f-bb73-4ece-b746-df2247e55da5.png
Binary file not shown.
Binary file removed images/a3fd26f3-0ee7-49b1-9e05-60b1dde1a1a8.png
Binary file not shown.
Binary file removed images/af8e6856-9d24-46d5-81fc-c9b2010d5d77.png
Binary file not shown.
Binary file removed images/f0f1d0e8-1672-4b9c-af1f-e6979f8a407c.png
Binary file not shown.
Binary file removed images/f4992864-b211-4510-9e4a-1148470dd5ec.png
Binary file not shown.
Binary file removed images/ffd2e03f-4238-4b6d-b29e-a3b41624ceae.png
Binary file not shown.
5 changes: 0 additions & 5 deletions openai_example.py

This file was deleted.

7 changes: 7 additions & 0 deletions playground/models/openai_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from swarms.models.openai_chat import OpenAIChat

model = OpenAIChat()

out = model("Hello, how are you?")

print(out)
62 changes: 62 additions & 0 deletions playground/structs/flow_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from swarms.models import Anthropic
from swarms.structs import Flow
from swarms.tools.tool import tool

import asyncio


llm = Anthropic(
anthropic_api_key="",
)


async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright

results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)

page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")

for script in soup(["script", "style"]):
script.extract()

text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results


def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)


@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))


## Initialize the workflow
flow = Flow(
llm=llm,
max_loops=5,
tools=[browse_web_page],
dashboard=True,
)

out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
)
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ pydantic = "*"
tenacity = "*"
Pillow = "*"
chromadb = "*"
opencv-python-headless = "*"
tabulate = "*"
termcolor = "*"
black = "*"
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ controlnet-aux
diffusers
einops
imageio
opencv-python-headless
imageio-ffmpeg
invisible-watermark
kornia
Expand Down
3 changes: 2 additions & 1 deletion swarms/models/openai_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,8 @@ def is_lc_serializable(cls) -> bool:
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
# openai_api_key: Optional[str] = Field(default=None, alias="api_key")
openai_api_key = "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"
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
Expand Down
103 changes: 96 additions & 7 deletions swarms/structs/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,15 @@
- Add batched inputs
"""
import asyncio
import re
import json
import logging
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
from termcolor import colored
import inspect
import random
from swarms.tools.tool import BaseTool

# Prompts
DYNAMIC_STOP_PROMPT = """
Expand All @@ -32,13 +34,25 @@
generate long-form content like blogs, screenplays, or SOPs,
and accomplish tasks. You can have internal dialogues with yourself or can interact with the user
to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand.
{DYNAMIC_STOP_PROMPT}
"""

# Utility functions
# Make it able to handle multi input tools
DYNAMICAL_TOOL_USAGE = """
You have access to the following tools:
Output a JSON object with the following structure to use the tools
commands: {
"tools": {
tool1: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
}
}
{tools}
"""


# Custom stopping condition
Expand Down Expand Up @@ -137,7 +151,7 @@ def __init__(
# The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops:
self.max_loops = "auto"
# self.tools = tools
# self.tools = tools or []
self.system_prompt = system_prompt
self.agent_name = agent_name
self.saved_state_path = saved_state_path
Expand Down Expand Up @@ -193,6 +207,73 @@ def get_llm_init_params(self) -> str:

return "\n".join(params_str_list)

def parse_tool_command(self, text: str):
# Parse the text for tool usage
pass

def get_tool_description(self):
"""Get the tool description"""
tool_descriptions = []
for tool in self.tools:
description = f"{tool.name}: {tool.description}"
tool_descriptions.append(description)
return "\n".join(tool_descriptions)

def find_tool_by_name(self, name: str):
"""Find a tool by name"""
for tool in self.tools:
if tool.name == name:
return tool
return None

def construct_dynamic_prompt(self):
"""Construct the dynamic prompt"""
tools_description = self.get_tool_description()
return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)

def extract_tool_commands(self, text: str):
"""
Extract the tool commands from the text
Example:
```json
{
"tool": "tool_name",
"params": {
"tool1": "inputs",
"param2": "value2"
}
}
```
"""
# Regex to find JSON like strings
pattern = r"```json(.+?)```"
matches = re.findall(pattern, text, re.DOTALL)
json_commands = []
for match in matches:
try:
json_commands = json.loads(match)
json_commands.append(json_commands)
except Exception as error:
print(f"Error parsing JSON command: {error}")

def parse_and_execute_tools(self, response):
"""Parse and execute the tools"""
json_commands = self.extract_tool_commands(response)
for command in json_commands:
tool_name = command.get("tool")
params = command.get("parmas", {})
self.execute_tool(tool_name, params)

def execute_tools(self, tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)
print(tool_result)

def truncate_history(self):
"""
Take the history and truncate it to fit into the model context length
Expand Down Expand Up @@ -287,10 +368,13 @@ def run(self, task: str, **kwargs):
5. Repeat until stopping condition is met or max_loops is reached
"""
dynamic_prompt = self.construct_dynamic_prompt()
combined_prompt = f"{dynamic_prompt}\n{task}"

# Activate Autonomous agent message
self.activate_autonomous_agent()

response = task
response = combined_prompt # or task
history = [f"{self.user_name}: {task}"]

# If dashboard = True then print the dashboard
Expand Down Expand Up @@ -318,8 +402,13 @@ def run(self, task: str, **kwargs):
while attempt < self.retry_attempts:
try:
response = self.llm(
task**kwargs,
task,
**kwargs,
)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)

if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
Expand Down

0 comments on commit 0e335b1

Please sign in to comment.