Skip to content

Commit

Permalink
[CLEANUP]
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye Gomez authored and Kye Gomez committed May 21, 2024
1 parent 8889664 commit 07083d9
Show file tree
Hide file tree
Showing 9 changed files with 22 additions and 168 deletions.
151 changes: 5 additions & 146 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -587,50 +587,6 @@ workflow.run()



### `ModelParallelizer`
The ModelParallelizer allows you to run multiple models concurrently, comparing their outputs. This feature enables you to easily compare the performance and results of different models, helping you make informed decisions about which model to use for your specific task.

Plug-and-Play Integration: The structure provides a seamless integration with various models, including OpenAIChat, Anthropic, Mixtral, and Gemini. You can easily plug in any of these models and start using them without the need for extensive modifications or setup.


```python
import os

from dotenv import load_dotenv

from swarms import Anthropic, Gemini, Mixtral, ModelParallelizer, OpenAIChat

load_dotenv()

# API Keys
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")

# Initialize the models
llm = OpenAIChat(openai_api_key=openai_api_key)
anthropic = Anthropic(anthropic_api_key=anthropic_api_key)
mixtral = Mixtral()
gemini = Gemini(gemini_api_key=gemini_api_key)

# Initialize the parallelizer
llms = [llm, anthropic, mixtral, gemini]
parallelizer = ModelParallelizer(llms)

# Set the task
task = "Generate a 10,000 word blog on health and wellness."

# Run the task
out = parallelizer.run(task)

# Print the responses 1 by 1
for i in range(len(out)):
print(f"Response from LLM {i}: {out[i]}")
```




### `SwarmNetwork`
`SwarmNetwork` provides the infrasturcture for building extremely dense and complex multi-agent applications that span across various types of agents.

Expand Down Expand Up @@ -757,106 +713,6 @@ print(f"Task result: {task.result}")



### `BlockList`
- Modularity and Flexibility: BlocksList allows users to create custom swarms by adding or removing different classes or functions as blocks. This means users can easily tailor the functionality of their swarm to suit their specific needs.

- Ease of Management: With methods to add, remove, update, and retrieve blocks, BlocksList provides a straightforward way to manage the components of a swarm. This makes it easier to maintain and update the swarm over time.

- Enhanced Searchability: BlocksList offers methods to get blocks by various attributes such as name, type, ID, and parent-related properties. This makes it easier for users to find and work with specific blocks in a large and complex swarm.

```python
import os

from dotenv import load_dotenv
from transformers import AutoModelForCausalLM, AutoTokenizer
from pydantic import BaseModel
from swarms import BlocksList, Gemini, GPT4VisionAPI, Mixtral, OpenAI, ToolAgent

# Load the environment variables
load_dotenv()

# Get the environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")

# Tool Agent
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")

# Initialize the schema for the person's information
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(
..., title="Whether the person is a student"
)
courses: list[str] = Field(
..., title="List of courses the person is taking"
)

# Convert the schema to a JSON string
json_schema = base_model_to_json(Schema)


toolagent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)

# Blocks List which enables you to build custom swarms by adding classes or functions
swarm = BlocksList(
"SocialMediaSwarm",
"A swarm of social media agents",
[
OpenAI(openai_api_key=openai_api_key),
Mixtral(),
GPT4VisionAPI(openai_api_key=openai_api_key),
Gemini(gemini_api_key=gemini_api_key),
],
)


# Add the new block to the swarm
swarm.add(toolagent)

# Remove a block from the swarm
swarm.remove(toolagent)

# Update a block in the swarm
swarm.update(toolagent)

# Get a block at a specific index
block_at_index = swarm.get(0)

# Get all blocks in the swarm
all_blocks = swarm.get_all()

# Get blocks by name
openai_blocks = swarm.get_by_name("OpenAI")

# Get blocks by type
gpt4_blocks = swarm.get_by_type("GPT4VisionAPI")

# Get blocks by ID
block_by_id = swarm.get_by_id(toolagent.id)

# Get blocks by parent
blocks_by_parent = swarm.get_by_parent(swarm)

# Get blocks by parent ID
blocks_by_parent_id = swarm.get_by_parent_id(swarm.id)

# Get blocks by parent name
blocks_by_parent_name = swarm.get_by_parent_name(swarm.name)

# Get blocks by parent type
blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__)

# Get blocks by parent description
blocks_by_parent_description = swarm.get_by_parent_description(swarm.description)

# Run the block in the swarm
inference = swarm.run_block(toolagent, "Hello World")
print(inference)
```


## Majority Voting
Multiple-agents will evaluate an idea based off of an parsing or evaluation function. From papers like "[More agents is all you need](https://arxiv.org/pdf/2402.05120.pdf)
Expand Down Expand Up @@ -1249,6 +1105,9 @@ print(output)

```

## `HierarhicalSwarm`
Coming soon...


---

Expand All @@ -1265,6 +1124,7 @@ The swarms package has been meticlously crafted for extreme use-ability and unde
├── agents
├── artifacts
├── memory
├── schemas
├── models
├── prompts
├── structs
Expand Down Expand Up @@ -1313,13 +1173,12 @@ Accelerate Bugs, Features, and Demos to implement by supporting us here:


## Docker Instructions
- [Learn More Here About Deployments In Docker]()
- [Learn More Here About Deployments In Docker](https://swarms.apac.ai/en/latest/docker_setup/)


## Swarm Newsletter 🤖 🤖 🤖 📧
Sign up to the Swarm newsletter to receive updates on the latest Autonomous agent research papers, step by step guides on creating multi-agent app, and much more Swarmie goodiness 😊


[CLICK HERE TO SIGNUP](https://docs.google.com/forms/d/e/1FAIpQLSfqxI2ktPR9jkcIwzvHL0VY6tEIuVPd-P2fOWKnd6skT9j1EQ/viewform?usp=sf_link)

# License
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "swarms"
version = "5.0.4"
version = "5.0.5"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
Expand Down
7 changes: 4 additions & 3 deletions scripts/code_quality.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,16 @@

# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
# on all Python files (*.py) under the 'tests' directory.
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes zeta/
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes swarms/

# Run black with default settings, since black does not have an aggressiveness level.
# Black will format all Python files it finds in the 'tests' directory.
black --experimental-string-processing zeta/
black .

# Run ruff on the 'tests' directory.
# Add any additional flags if needed according to your version of ruff.
ruff zeta/ --fix
ruff . --fix
ruff clean

# YAPF
yapf --recursive --in-place --verbose --style=google --parallel tests
5 changes: 1 addition & 4 deletions swarms/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
from swarms.models.kosmos_two import Kosmos # noqa: E402
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
from swarms.models.llava import LavaMultiModal # noqa: E402
from swarms.models.mistral import Mistral # noqa: E402
from swarms.models.mixtral import Mixtral # noqa: E402

from swarms.models.nougat import Nougat # noqa: E402
from swarms.models.palm import GooglePalm as Palm # noqa: E402
from swarms.models.openai_tts import OpenAITTS # noqa: E402
Expand Down Expand Up @@ -53,8 +52,6 @@
"Kosmos",
"LayoutLMDocumentQA",
"LavaMultiModal",
"Mistral",
"Mixtral",
"Nougat",
"Palm",
"OpenAITTS",
Expand Down
5 changes: 3 additions & 2 deletions swarms/models/llama3_hosted.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def __init__(
temperature: float = 0.8,
max_tokens: int = 4000,
system_prompt: str = "You are a helpful assistant.",
base_url: str = "http://34.204.8.31:30001/v1/chat/completions",
*args,
**kwargs,
):
Expand All @@ -40,6 +41,7 @@ def __init__(
self.temperature = temperature
self.max_tokens = max_tokens
self.system_prompt = system_prompt
self.base_url = base_url

def run(self, task: str, *args, **kwargs) -> str:
"""
Expand All @@ -52,7 +54,6 @@ def run(self, task: str, *args, **kwargs) -> str:
str: The generated response from the Llama3 model.
"""
url = "http://34.204.8.31:30001/v1/chat/completions"

payload = json.dumps(
{
Expand All @@ -70,7 +71,7 @@ def run(self, task: str, *args, **kwargs) -> str:
headers = {"Content-Type": "application/json"}

response = requests.request(
"POST", url, headers=headers, data=payload
"POST", self.base_url, headers=headers, data=payload
)

response_json = response.json()
Expand Down
2 changes: 1 addition & 1 deletion swarms/schemas/plan.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import List
from pydantic import BaseModel
from swarms.structs.step import Step
from swarms.schemas.step import Step


class Plan(BaseModel):
Expand Down
10 changes: 3 additions & 7 deletions swarms/structs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from swarms.structs.base_structure import BaseStructure
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.block_wrapper import block
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation
from swarms.structs.groupchat import GroupChat, GroupChatManager
Expand All @@ -21,19 +20,18 @@
)
from swarms.structs.message import Message
from swarms.structs.message_pool import MessagePool
from swarms.structs.model_parallizer import ModelParallelizer
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
from swarms.structs.multi_process_workflow import (
MultiProcessWorkflow,
)
from swarms.structs.multi_threaded_workflow import (
MultiThreadedWorkflow,
)
from swarms.structs.plan import Plan
from swarms.schemas.plan import Plan
from swarms.structs.rearrange import AgentRearrange, rearrange
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.round_robin import RoundRobinSwarm
from swarms.structs.schemas import (
from swarms.schemas.schemas import (
Artifact,
ArtifactUpload,
StepInput,
Expand All @@ -43,7 +41,7 @@
TaskRequestBody,
)
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.step import Step
from swarms.schemas.step import Step
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.swarming_architectures import (
broadcast,
Expand Down Expand Up @@ -96,7 +94,6 @@
"BaseStructure",
"BaseSwarm",
"BaseWorkflow",
"block",
"ConcurrentWorkflow",
"Conversation",
"GroupChat",
Expand All @@ -106,7 +103,6 @@
"most_frequent",
"parse_code_completion",
"Message",
"ModelParallelizer",
"MultiAgentCollaboration",
"MultiProcessWorkflow",
"MultiThreadedWorkflow",
Expand Down
1 change: 1 addition & 0 deletions swarms/structs/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import time
import uuid
from typing import Any, Callable, Dict, List, Optional, Tuple, Union

import yaml
from loguru import logger
from pydantic import BaseModel
Expand Down
7 changes: 3 additions & 4 deletions swarms/structs/message.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import datetime
from typing import Dict, Optional

from pydantic import BaseModel
from datetime import datetime
from pydantic import BaseModel, Field


class Message(BaseModel):
Expand All @@ -18,7 +17,7 @@ class Message(BaseModel):
print(mes)
"""

timestamp: datetime = datetime.now()
timestamp: datetime = Field(default_factory=datetime.now)
sender: str
content: str
metadata: Optional[Dict[str, str]] = {}
Expand Down

0 comments on commit 07083d9

Please sign in to comment.