Skip to content

Commit 2454d69

Browse files
committed
Adds the architecture diagram
1 parent 7413a60 commit 2454d69

File tree

3 files changed

+30
-22
lines changed

3 files changed

+30
-22
lines changed

README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@ This project implements a sophisticated multi-agent AI system designed to conduc
1818
- [Langchain](https://github.com/hwchase17/langchain): For building applications with large language models.
1919
- [Gemini API](https://ai.google.dev/docs): Google's advanced language model for natural language processing tasks.
2020

21+
22+
![Multi Agent AI System Architecture](/image.jpg)
23+
2124
## Installation
2225

2326
1. Clone the repository:

agents.py

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,35 @@
1+
import os
12
from crewai import Crew, Task, Agent, Process
23
from crewai_tools import SerperDevTool
34
from langchain_google_genai import ChatGoogleGenerativeAI
4-
import os
55

6-
os.environ["GOOGLE_API_KEY"] = "AIzaSyAdvi5FDIVPLUokRD4KBjve4UdfZOSmbVo"
7-
os.environ["SERPER_API_KEY"] = "29e9856df645a3ac5c5bcb6bdad3e582be0322fa"
86

7+
from langchain_huggingface import HuggingFaceEndpoint
8+
from langchain_community.chat_models.huggingface import ChatHuggingFace
99

10+
# Get the Hugging Face token
11+
huggingface_token = "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"
1012

11-
# Create the first LLM
13+
# Initialize the Hugging Face model
14+
llm = HuggingFaceEndpoint(
15+
repo_id="meta-llama/Meta-Llama-3.1-8B-Instruct", # Replace with the model you want to use
16+
huggingfacehub_api_token=huggingface_token
17+
)
1218

13-
messages = [
14-
(
15-
"system",
16-
"You are a helpful assistant that translates English to French. Translate the user sentence.",
17-
),
18-
("human", "I love programming."),
19-
]
2019

21-
llm = ChatGoogleGenerativeAI(
22-
model="gemini-1.5-pro",
23-
temperature=0,
24-
max_tokens=None,
25-
timeout=None,
26-
max_retries=2
27-
)
20+
# os.environ["GOOGLE_API_KEY"] = "AIzaSyAdvi5FDIVPLUokRD4KBjve4UdfZOSmbVo"
21+
os.environ["SERPER_API_KEY"] = "29e9856df645a3ac5c5bcb6bdad3e582be0322fa"
22+
# os.environ["HUGGINGFACE_TOKEN"] = "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"
23+
24+
25+
# # Create the first LLM
26+
# llm = ChatGoogleGenerativeAI(
27+
# model="gemini-1.5-pro",
28+
# temperature=0,
29+
# max_tokens=300,
30+
# timeout=None,
31+
# max_retries=2
32+
# )
2833

2934
search_tool = SerperDevTool()
3035

@@ -45,7 +50,7 @@
4550
role='Content Synthesizer and Analyst',
4651
goal='Analyze and synthesize research findings into coherent, insightful, and well-structured reports',
4752
backstory="""You are a skilled content creator with a background in data analysis and journalism. Your forte is taking complex, multi-faceted information and distilling it into clear, engaging, and informative content. You have a talent for identifying key themes, drawing connections between diverse pieces of information, and presenting findings in a way that is both accessible and comprehensive.""",
48-
verbose=True,
53+
verbose=0,
4954
llm=llm,
5055
allow_delegation=False
5156
)
@@ -59,16 +64,16 @@
5964
)
6065

6166
task2 = Task(
62-
description="""Using the research findings provided, create a comprehensive blog post that synthesizes the information gathered from various subreddits. Structure the post to include an introduction that outlines the main themes discovered, followed by sections that delve into specific insights, trends, or debates found across different communities. Include relevant statistics on post engagement to support your analysis. Conclude with a summary of the overall Reddit sentiment on the topic and any implications or future trends this might suggest.""",
63-
expected_output="Full blog post of at least 2 paragraphs",
67+
description=f"""User query: {user_query}. Using the research findings provided, and user query create a well composed answer. Make sure it answers my query properly.""",
68+
expected_output="Text in simple english max 200 words.",
6469
agent=writer
6570
)
6671

6772
# Instantiate your crew with a sequential process
6873
crew = Crew(
6974
agents=[researcher, writer],
7075
tasks=[task1, task2],
71-
verbose=True,
76+
verbose=0,
7277
process = Process.sequential
7378
)
7479

image.jpg

93.2 KB
Loading

0 commit comments

Comments
 (0)