Skip to content

Commit

Permalink
playground + flow docs fix
Browse files Browse the repository at this point in the history
  • Loading branch information
kyegomez committed Nov 6, 2023
1 parent a70a2b0 commit 336bffe
Show file tree
Hide file tree
Showing 8 changed files with 120 additions and 140 deletions.
11 changes: 8 additions & 3 deletions docs/swarms/structs/flow.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,13 @@ Here are three usage examples:

```python
from swarms.structs import Flow
# Select any Language model from the models folder
from swarms.models import Mistral, OpenAIChat

flow = Flow(llm=my_language_model, max_loops=5)
llm = Mistral()
# llm = OpenAIChat()

flow = Flow(llm=llm, max_loops=5)

# Define a starting task or message
initial_task = "Generate an long form analysis on the transformer model architecture."
Expand All @@ -126,15 +131,15 @@ from swarms.structs import Flow
def stop_when_repeats(response: str) -> bool:
return "Stop" in response.lower()

flow = Flow(llm=my_language_model, max_loops=5, stopping_condition=stop_when_repeats)
flow = Flow(llm=llm, max_loops=5, stopping_condition=stop_when_repeats)
```

### Example 3: Interactive Conversation

```python
from swarms.structs import Flow

flow = Flow(llm=my_language_model, max_loops=5, interactive=True)
flow = Flow(llm=llm, max_loops=5, interactive=True)

# Provide initial task
initial_task = "Rank and prioritize the following financial documents and cut out 30% of our expenses"
Expand Down
File renamed without changes.
56 changes: 0 additions & 56 deletions playground/models/multitemp.py

This file was deleted.

4 changes: 2 additions & 2 deletions playground/models/openai_model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from swarms.models.openai_models import OpenAIChat

openai = OpenAIChat(openai_api_key="", verbose=False)
openai = OpenAIChat(openai_api_key="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", verbose=False)

chat = openai("Are quantum fields everywhere?")
chat = openai("What are quantum fields?")
print(chat)
35 changes: 35 additions & 0 deletions playground/structs/flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
from swarms.models import OpenAIChat
from swarms.structs import Flow

api_key = ""

# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat(
# model_name="gpt-4"
openai_api_key=api_key,
temperature=0.5,
# max_tokens=100,
)

## Initialize the workflow
flow = Flow(
llm=llm,
max_loops=2,
dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
)

# out = flow.load_state("flow_state.json")
# temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run("Generate a 10,000 word blog on health and wellness.")
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)
31 changes: 31 additions & 0 deletions playground/structs/sequential_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from swarms.models import OpenAIChat
from swarms.structs import Flow
from swarms.structs.sequential_workflow import SequentialWorkflow

# Example usage
llm = OpenAIChat(
temperature=0.5,
max_tokens=3000,
)

# Initialize the Flow with the language flow
flow1 = Flow(llm=llm, max_loops=1, dashboard=False)

# Create another Flow for a different task
flow2 = Flow(llm=llm, max_loops=1, dashboard=False)

# Create the workflow
workflow = SequentialWorkflow(max_loops=1)

# Add tasks to the workflow
workflow.add("Generate a 10,000 word blog on health and wellness.", flow1)

# Suppose the next task takes the output of the first task as input
workflow.add("Summarize the generated blog", flow2)

# Run the workflow
workflow.run()

# Output the results
for task in workflow.tasks:
print(f"Task: {task.description}, Result: {task.result}")
39 changes: 8 additions & 31 deletions playground/swarms/godmode.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,16 @@
from swarms.swarms import GodMode
from swarms.models import OpenAIChat

from swarms.swarms import GodMode
from swarms.workers.worker import Worker
api_key = ""

llm = OpenAIChat(openai_api_key=api_key)

llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5)

worker1 = Worker(
llm=llm,
ai_name="Bumble Bee",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
)
worker2 = Worker(
llm=llm,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
)
worker3 = Worker(
llm=llm,
ai_name="Megatron",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
)
# Usage
agents = [worker1, worker2, worker3]
llms = [llm, llm, llm]

god_mode = GodMode(agents)
god_mode = GodMode(llms)

task = "What are the biggest risks facing humanity?"
task = "Generate a 10,000 word blog on health and wellness."

out = god_mode.run(task)
god_mode.print_responses(task)
84 changes: 36 additions & 48 deletions playground/swarms/groupchat.py
Original file line number Diff line number Diff line change
@@ -1,61 +1,49 @@
from swarms.models import OpenAIChat
from swarms.swarms import GroupChat, GroupChatManager
from swarms.workers import Worker
from swarms import OpenAI, Flow
from swarms.swarms.groupchat import GroupChatManager, GroupChat

llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5)

node = Worker(
llm=llm,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
api_key = ""

llm = OpenAI(
openai_api_key=api_key,
temperature=0.5,
max_tokens=3000,
)

node2 = Worker(
# Initialize the flow
flow1 = Flow(
llm=llm,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
max_loops=1,
system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
name="silly",
dashboard=True,
)

node3 = Worker(
flow2 = Flow(
llm=llm,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
max_loops=1,
system_message="YOU ARE VERY SMART AND ANSWER RIDDLES",
name="detective",
dashboard=True,
)

nodes = [node, node2, node3]

messages = [
{
"role": "system",
"context": "Create an a small feedforward in pytorch",
}
]

group = GroupChat(
workers=nodes,
messages=messages,
max_rounds=3,
flow3 = Flow(
llm=llm,
max_loops=1,
system_message="YOU MAKE RIDDLES",
name="riddler",
dashboard=True,
)


manager = GroupChatManager(
groupchat=group,
max_consecutive_auto_reply=3,
manager = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE A GROUP CHAT MANAGER",
name="manager",
dashboard=True,
)

output = group.run(
messages,
sender=node,
config=group,
)

print(output)
# Example usage:
agents = [flow1, flow2, flow3]

group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
chat_history = chat_manager("Write me a riddle")

0 comments on commit 336bffe

Please sign in to comment.