|
| 1 | +import os |
1 | 2 | from crewai import Crew, Task, Agent, Process |
2 | 3 | from crewai_tools import SerperDevTool |
3 | 4 | from langchain_google_genai import ChatGoogleGenerativeAI |
4 | | -import os |
5 | 5 |
|
6 | | -os.environ["GOOGLE_API_KEY"] = "AIzaSyAdvi5FDIVPLUokRD4KBjve4UdfZOSmbVo" |
7 | | -os.environ["SERPER_API_KEY"] = "29e9856df645a3ac5c5bcb6bdad3e582be0322fa" |
8 | 6 |
|
| 7 | +from langchain_huggingface import HuggingFaceEndpoint |
| 8 | +from langchain_community.chat_models.huggingface import ChatHuggingFace |
9 | 9 |
|
| 10 | +# Get the Hugging Face token |
| 11 | +huggingface_token = "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" |
10 | 12 |
|
11 | | -# Create the first LLM |
| 13 | +# Initialize the Hugging Face model |
| 14 | +llm = HuggingFaceEndpoint( |
| 15 | + repo_id="meta-llama/Meta-Llama-3.1-8B-Instruct", # Replace with the model you want to use |
| 16 | + huggingfacehub_api_token=huggingface_token |
| 17 | +) |
12 | 18 |
|
13 | | -messages = [ |
14 | | - ( |
15 | | - "system", |
16 | | - "You are a helpful assistant that translates English to French. Translate the user sentence.", |
17 | | - ), |
18 | | - ("human", "I love programming."), |
19 | | -] |
20 | 19 |
|
21 | | -llm = ChatGoogleGenerativeAI( |
22 | | - model="gemini-1.5-pro", |
23 | | - temperature=0, |
24 | | - max_tokens=None, |
25 | | - timeout=None, |
26 | | - max_retries=2 |
27 | | -) |
| 20 | +# os.environ["GOOGLE_API_KEY"] = "AIzaSyAdvi5FDIVPLUokRD4KBjve4UdfZOSmbVo" |
| 21 | +os.environ["SERPER_API_KEY"] = "29e9856df645a3ac5c5bcb6bdad3e582be0322fa" |
| 22 | +# os.environ["HUGGINGFACE_TOKEN"] = "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" |
| 23 | + |
| 24 | + |
| 25 | +# # Create the first LLM |
| 26 | +# llm = ChatGoogleGenerativeAI( |
| 27 | +# model="gemini-1.5-pro", |
| 28 | +# temperature=0, |
| 29 | +# max_tokens=300, |
| 30 | +# timeout=None, |
| 31 | +# max_retries=2 |
| 32 | +# ) |
28 | 33 |
|
29 | 34 | search_tool = SerperDevTool() |
30 | 35 |
|
|
45 | 50 | role='Content Synthesizer and Analyst', |
46 | 51 | goal='Analyze and synthesize research findings into coherent, insightful, and well-structured reports', |
47 | 52 | backstory="""You are a skilled content creator with a background in data analysis and journalism. Your forte is taking complex, multi-faceted information and distilling it into clear, engaging, and informative content. You have a talent for identifying key themes, drawing connections between diverse pieces of information, and presenting findings in a way that is both accessible and comprehensive.""", |
48 | | - verbose=True, |
| 53 | + verbose=0, |
49 | 54 | llm=llm, |
50 | 55 | allow_delegation=False |
51 | 56 | ) |
|
59 | 64 | ) |
60 | 65 |
|
61 | 66 | task2 = Task( |
62 | | - description="""Using the research findings provided, create a comprehensive blog post that synthesizes the information gathered from various subreddits. Structure the post to include an introduction that outlines the main themes discovered, followed by sections that delve into specific insights, trends, or debates found across different communities. Include relevant statistics on post engagement to support your analysis. Conclude with a summary of the overall Reddit sentiment on the topic and any implications or future trends this might suggest.""", |
63 | | - expected_output="Full blog post of at least 2 paragraphs", |
| 67 | + description=f"""User query: {user_query}. Using the research findings provided, and user query create a well composed answer. Make sure it answers my query properly.""", |
| 68 | + expected_output="Text in simple english max 200 words.", |
64 | 69 | agent=writer |
65 | 70 | ) |
66 | 71 |
|
67 | 72 | # Instantiate your crew with a sequential process |
68 | 73 | crew = Crew( |
69 | 74 | agents=[researcher, writer], |
70 | 75 | tasks=[task1, task2], |
71 | | - verbose=True, |
| 76 | + verbose=0, |
72 | 77 | process = Process.sequential |
73 | 78 | ) |
74 | 79 |
|
|
0 commit comments