From 8c3d654629c193f7a2b26408b0c2b7afdb73611b Mon Sep 17 00:00:00 2001 From: luigisaetta Date: Fri, 31 Oct 2025 14:55:52 +0100 Subject: [PATCH] added crewai integration, rel 1 --- .../crewai-oci-integration/LICENSE | 21 ++ .../crewai-oci-integration/README.md | 107 +++++++ .../config_template.yml | 36 +++ .../crew_agent_mcp01.py | 58 ++++ .../crew_agent_mcp02.py | 79 +++++ .../multi_agent_report.py | 270 ++++++++++++++++++ .../simple_test_crewai_agent.py | 53 ++++ .../crewai-oci-integration/start_gateway.sh | 2 + 8 files changed, 626 insertions(+) create mode 100644 ai/gen-ai-agents/crewai-oci-integration/LICENSE create mode 100644 ai/gen-ai-agents/crewai-oci-integration/README.md create mode 100644 ai/gen-ai-agents/crewai-oci-integration/config_template.yml create mode 100644 ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp01.py create mode 100644 ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp02.py create mode 100644 ai/gen-ai-agents/crewai-oci-integration/multi_agent_report.py create mode 100644 ai/gen-ai-agents/crewai-oci-integration/simple_test_crewai_agent.py create mode 100755 ai/gen-ai-agents/crewai-oci-integration/start_gateway.sh diff --git a/ai/gen-ai-agents/crewai-oci-integration/LICENSE b/ai/gen-ai-agents/crewai-oci-integration/LICENSE new file mode 100644 index 000000000..fb2e1fcb6 --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Luigi Saetta + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ai/gen-ai-agents/crewai-oci-integration/README.md b/ai/gen-ai-agents/crewai-oci-integration/README.md new file mode 100644 index 000000000..7978b2299 --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/README.md @@ -0,0 +1,107 @@ +# CrewAI ↔ OCI Generative AI Integration + +This repository provides examples and configuration guidelines for integrating **[CrewAI](https://github.com/joaomdmoura/crewAI)** with **Oracle Cloud Infrastructure (OCI) Generative AI** services. +The goal is to demonstrate how CrewAI agents can seamlessly leverage OCI-hosted models through the **LiteLLM gateway**. + +Reviewed: 31.10.2025 + +--- + +## 🔐 Security Configuration + +Before running the demos, you must configure access credentials for OCI. + +In these examples, we use a **locally stored key pair** for authentication. +Ensure your local OCI configuration (`~/.oci/config` and private key) is correctly set up and accessible to the Python SDK. + +To correctly start the **LiteLLM gateway** you need to create and configure correctly a **config.yml** file. To create this file use the [template](./config_template.yml). + +In addition, you should be **enabled** to use OCI Generative AI Service in your tenant. If you haven't yet used OCI GenAI ask to your tenant's admin to setup the **needed policies**. + +--- + +## 🧩 Demos Included + +- [Simple CrewAI Agent](./simple_test_crewai_agent.py) — basic CrewAI agent interacting with an LLM through OCI +- [OCi Consumption Report](./crew_agent_mcp02.py) +- *(More demos to be added soon)* + +--- + +## 📦 Dependencies + +The project relies on the following main packages: + +| Dependency | Purpose | +|-------------|----------| +| **CrewAI** | Framework for creating multi-agent workflows | +| **OCI Python SDK** | Access OCI services programmatically | +| **LiteLLM (Gateway)** | OpenAI-compatible proxy for accessing OCI Generative AI models | + +To connect CrewAI to OCI models, we use a **LiteLLM gateway**, which exposes OCI GenAI via an **OpenAI-compatible** REST API. + +--- + +## ⚙️ Environment Setup + +1. **Create a Conda environment** +```bash +conda create -n crewai python=3.11 +``` + +2. **Activate** the environment +``` +conda activate crewai +``` + +3. **Install** the required **packages** +``` +pip install -U oci litellm "litellm[proxy]" crewai +``` + +4. Run the LiteLLM Gateway + +Start the LiteLLM gateway using your configuration file (config.yml): +``` +./start_gateway.sh +``` + +Make sure the gateway starts successfully and is listening on the configured port (e.g., http://localhost:4000/v1). + +🧠 Test the Integration + +Run the sample CrewAI agent to verify that CrewAI can connect to OCI through LiteLLM: + +``` +python simple_test_crewai_agent.py +``` + +If the setup is correct, you should see the agent’s output using an OCI model. + +## Integrate Agents with **MCP** servers. +Install this additional package: + +``` +pip install 'crewai-tools[mcp]' +``` + +You can test the integration with **MCP** using [OCI Consumption report](./crew_agent_mcp02.py) that generates a report +of the consumption in your tenant (top 5 compartments, for 4 weeks). + +To have this demo up&running: +* download the code for the MCP server from [here](https://github.com/oracle-devrel/technology-engineering/blob/main/ai/gen-ai-agents/mcp-oci-integration/mcp_consumption.py) +* start the MCP server, on a free port (for example 9500) +* register the URL, in [source](./crew_agent_mcp02.py), in the section: +``` +server_params = { + "url": "http://localhost:9500/mcp", + "transport": "streamable-http" +} +``` + +If you don't want to secure (with JWT) the communication with the MCP server, put +``` +ENABLE_JWT_TOKEN = False +``` +in the config.py file. + diff --git a/ai/gen-ai-agents/crewai-oci-integration/config_template.yml b/ai/gen-ai-agents/crewai-oci-integration/config_template.yml new file mode 100644 index 000000000..c44fc9eb3 --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/config_template.yml @@ -0,0 +1,36 @@ +# config.yaml for litellm with OCI Grok models +litellm_settings: + drop_params: true + # drop unsupported params instead of 500 errors + additional_drop_params: ["max_retries"] + +# Common OCI connection parameters +common_oci: &common_oci + provider: oci + oci_region: us-chicago-1 + oci_serving_mode: ON_DEMAND + supports_tool_calls: true + oci_user: your-oci-user-ocid + oci_fingerprint: your-oci-api-key-fingerprint + oci_tenancy: your-oci-tenancy-ocid + oci_compartment_id: your-oci-compartment-ocid + oci_key_file: /path/to/your/oci_api_key.pem + api_key: key4321 + + +# List of models +model_list: + - model_name: grok4-oci + litellm_params: + <<: *common_oci # merge common OCI params + model: oci/xai.grok-4 + + - model_name: grok4-fast-oci + litellm_params: + <<: *common_oci + model: oci/xai.grok-4-fast-reasoning + +general_settings: + telemetry: false + proxy_logging: false + allow_model_alias: true diff --git a/ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp01.py b/ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp01.py new file mode 100644 index 000000000..e717315ef --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp01.py @@ -0,0 +1,58 @@ +""" +CrewAI agent with MCP + +This one is doing Deep research using internet search tools via MCP server. + +see: + https://docs.crewai.com/en/mcp/overview + https://docs.crewai.com/en/mcp/multiple-servers +""" +import os +from crewai import Agent, Task, Crew, LLM +from crewai_tools import MCPServerAdapter + +# Disable telemetry, tracing, and logging +os.environ["CREWAI_LOGGING_ENABLED"] = "false" +os.environ["CREWAI_TELEMETRY_ENABLED"] = "false" +os.environ["CREWAI_TRACING_ENABLED"] = "false" + +llm = LLM( + model="grok4-fast-oci", + # LiteLLM proxy endpoint + base_url="http://localhost:4000/v1", + api_key="sk-local-any", + temperature=0.2, + max_tokens=4000, +) + +server_params = { + "url": "http://localhost:8500/mcp", + "transport": "streamable-http" +} + +# Create agent with MCP tools +with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools: + print(f"Available tools: {[tool.name for tool in mcp_tools]}") + + research_agent = Agent( + role="Research Analyst", + goal="Find and analyze information using advanced search tools", + backstory="Expert researcher with access to multiple data sources", + llm=llm, + tools=mcp_tools, + verbose=True + ) + + # Create task + research_task = Task( + description="Research the latest developments in AI agent frameworks", + expected_output="Comprehensive research report with citations", + agent=research_agent + ) + + # Create and run crew + crew = Crew(agents=[research_agent], tasks=[research_task]) + + result = crew.kickoff() + + print(result) \ No newline at end of file diff --git a/ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp02.py b/ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp02.py new file mode 100644 index 000000000..675405604 --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/crew_agent_mcp02.py @@ -0,0 +1,79 @@ +""" +CrewAI agent with MCP + +This one is analyzing tenant consumption via MCP server. + +see: + https://docs.crewai.com/en/mcp/overview + https://docs.crewai.com/en/mcp/multiple-servers +""" +import os +from datetime import datetime +from crewai import Agent, Task, Crew, LLM +from crewai_tools import MCPServerAdapter + +# Disable telemetry, tracing, and logging +os.environ["CREWAI_LOGGING_ENABLED"] = "false" +os.environ["CREWAI_TELEMETRY_ENABLED"] = "false" +os.environ["CREWAI_TRACING_ENABLED"] = "false" + +llm = LLM( + model="grok4-oci", + # LiteLLM proxy endpoint + base_url="http://localhost:4000/v1", + api_key="sk-local-any", + temperature=0., + max_tokens=4000, +) + +# OCI consumption +server_params = { + "url": "http://localhost:9500/mcp", + "transport": "streamable-http" +} + +# Create agent with MCP tools +with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools: + print(f"Available tools: {[tool.name for tool in mcp_tools]}") + + research_agent = Agent( + role="OCI Consumption Analyst", + goal="Find and analyze information about OCI tenant consumption.", + backstory="Expert analyst with access to multiple data sources", + llm=llm, + tools=mcp_tools, + max_iter=30, + max_retry_limit=5, + verbose=True + ) + + # Create task + research_task = Task( + description="Identify the top 5 compartments by consumption (amount) for the OCI tenant " + "in the weeks of the month of september 2025, analyze the trends and provide insights on usage patterns." + "Analyze fully the top 5 compartments. Use only the amount, not the quantity.", + expected_output="Comprehensive report with data-backed insights.", + agent=research_agent + ) + + # Create and run crew + crew = Crew(agents=[research_agent], tasks=[research_task]) + + result = crew.kickoff() + + print(result) + + # --- Save the result to a Markdown file --- + # Create an output directory if it doesn’t exist + output_dir = "reports" + os.makedirs(output_dir, exist_ok=True) + + # Use timestamped filename for clarity + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_path = os.path.join(output_dir, f"oci_consumption_report_{timestamp}.md") + + # Write the result + with open(output_path, "w", encoding="utf-8") as f: + f.write(str(result)) + + print(f"\n✅ Report saved successfully to: {output_path}") \ No newline at end of file diff --git a/ai/gen-ai-agents/crewai-oci-integration/multi_agent_report.py b/ai/gen-ai-agents/crewai-oci-integration/multi_agent_report.py new file mode 100644 index 000000000..ca25d72e1 --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/multi_agent_report.py @@ -0,0 +1,270 @@ +""" +Multi-agent report builder with CrewAI + LiteLLM (OCI) +- Planner -> generates outline +- Multiple Section Writers -> draft each section +- Synthesizer -> compiles final report + +Run: + python multi_agent_report.py "Subject to analyze" +""" + +import os +import sys +from typing import List +from pydantic import BaseModel, Field +from crewai import Agent, Task, Crew, LLM + +# --- Disable CrewAI phone-home/logs in locked-down environments --- +os.environ["CREWAI_LOGGING_ENABLED"] = "false" +os.environ["CREWAI_TELEMETRY_ENABLED"] = "false" +os.environ["CREWAI_TRACING_ENABLED"] = "false" + +# Make Instructor/OpenAI client use your LiteLLM proxy +os.environ.setdefault("OPENAI_API_KEY", "sk-local-any") +os.environ.setdefault("OPENAI_BASE_URL", "http://localhost:4000/v1") + + +# ========================= +# LLM CONFIG (LiteLLM proxy) +# ========================= +def make_llm(): + print("\n=== CONFIGURING LLM ===") + return LLM( + model="grok4-fast-oci", # your LiteLLM model alias + base_url="http://localhost:4000/v1", # LiteLLM proxy endpoint + api_key="sk-local-any", + temperature=0.2, + max_tokens=4000, + ) + + +# ========================= +# STRUCTURED OUTPUT MODELS +# ========================= +class Outline(BaseModel): + subject: str + title: str + sections: List[str] = Field(..., description="Ordered list of section titles") + + +class SectionDraft(BaseModel): + section_title: str + key_points: List[str] + content: str + + +class FinalReport(BaseModel): + subject: str + outline: Outline + executive_summary: str + sections: List[SectionDraft] + + +# ========================= +# AGENTS +# ========================= +def make_planner(llm: LLM) -> Agent: + print("=== DEFINING PLANNER AGENT ===") + return Agent( + role="Planner", + goal="Create a clear, logically ordered outline for a technical report.", + backstory=( + "A senior analyst with strong information architecture skills. " + "Produces pragmatic outlines tailored to enterprise readers." + ), + llm=llm, + allow_delegation=False, + ) + + +def make_section_writer(llm: LLM) -> Agent: + print("=== DEFINING SECTION WRITER AGENT ===") + return Agent( + role="Section Writer", + goal="Write concise, well-structured sections from an outline.", + backstory=( + "A staff technical writer focused on clarity, correctness, and actionable insights. " + "Avoids fluff and repetition; uses bullet points where helpful." + ), + llm=llm, + allow_delegation=False, + ) + + +def make_synthesizer(llm: LLM) -> Agent: + print("=== DEFINING SYNTHESIZER AGENT ===") + return Agent( + role="Report Synthesizer", + goal="Assemble a coherent, polished report from multiple section drafts.", + backstory=( + "An editor who specializes in executive summaries and narrative cohesion. " + "Ensures consistency of tone, terminology, and depth across sections." + ), + llm=llm, + allow_delegation=False, + ) + + +# ========================= +# SINGLE-TASK CREWS HELPERS +# (We run 3 stages: plan -> write -> synthesize) +# ========================= +def run_planner(subject: str, llm: LLM) -> Outline: + planner = make_planner(llm) + + print("=== DEFINING PLANNER TASK ===") + plan_task = Task( + description=( + "Create a structured outline for a technical report on the subject:\n" + f"SUBJECT: {subject}\n\n" + "Constraints:\n" + "- Audience: enterprise architects / AI platform owners.\n" + "- Depth: practical and decision-oriented.\n" + "- Include 5–8 sections, ordered logically.\n" + "- Title should be short and informative.\n" + "Return ONLY a valid JSON object matching the schema.\n" + ), + expected_output=( + "A JSON object with: 'subject', 'title', and 'sections' (array of section titles)." + ), + agent=planner, + output_pydantic=Outline, + ) + + print("=== RUNNING PLANNER CREW ===") + crew = Crew(agents=[planner], tasks=[plan_task]) + _ = crew.kickoff() + + outline = plan_task.output.pydantic # type: ignore + if not outline or not outline.sections: + raise RuntimeError("Planner produced no sections. Check LLM config or prompts.") + return outline + + +def run_section_writers(outline: Outline, llm: LLM) -> List[SectionDraft]: + writer = make_section_writer(llm) + + section_tasks: List[Task] = [] + print("=== DEFINING SECTION TASKS ===") + for idx, section in enumerate(outline.sections, start=1): + t = Task( + description=( + f"Write the section #{idx} titled '{section}' for a report titled '{outline.title}' " + f"on the subject '{outline.subject}'.\n\n" + "Deliverables:\n" + "- 4–7 key bullet points (actionable and non-redundant).\n" + "- A concise section narrative (120–250 words), no marketing fluff.\n" + "- Avoid repeating content from other sections.\n" + "Return ONLY a valid JSON object matching the schema.\n" + ), + expected_output=( + "A JSON object with 'section_title', 'key_points' (array of strings), and 'content' (string)." + ), + agent=writer, + output_pydantic=SectionDraft, + ) + section_tasks.append(t) + + print("=== RUNNING SECTION WRITERS CREW ===") + crew = Crew(agents=[writer], tasks=section_tasks) + _ = crew.kickoff() + + drafts: List[SectionDraft] = [] + for t in section_tasks: + p = getattr(t.output, "pydantic", None) + if not p: + raise RuntimeError( + f"Section task for '{t.description[:60]}...' produced no structured output." + ) + drafts.append(p) + return drafts + + +def run_synthesizer( + outline: Outline, drafts: List[SectionDraft], llm: LLM +) -> FinalReport: + synthesizer = make_synthesizer(llm) + + print("=== DEFINING SYNTHESIS TASK ===") + # Prepare a compact representation of drafts for the synthesizer's context + drafts_context = "\n\n".join( + [ + f"[{i+1}] {d.section_title}\n- " + + "\n- ".join(d.key_points) + + f"\n\n{d.content}" + for i, d in enumerate(drafts) + ] + ) + + synth_task = Task( + description=( + f"Assemble the final report for SUBJECT: {outline.subject}\n" + f"TITLE: {outline.title}\n\n" + "You are given the drafted sections below. Your job:\n" + "1) Produce a crisp executive summary (120–180 words)\n" + "2) Preserve the order of sections.\n" + "3) Normalize terminology and tone across sections.\n" + "4) Do not introduce new claims; keep it faithful to the drafts.\n" + "Return ONLY a valid JSON object matching the schema.\n\n" + f"DRAFTED SECTIONS:\n{drafts_context}\n" + ), + expected_output=( + "A JSON object with: 'subject', 'outline' (with subject/title/sections), " + "'executive_summary' (string), and 'sections' (array of {section_title,key_points,content})." + ), + agent=synthesizer, + output_pydantic=FinalReport, + ) + + print("=== RUNNING SYNTHESIS CREW ===") + crew = Crew(agents=[synthesizer], tasks=[synth_task]) + _ = crew.kickoff() + + final_report = synth_task.output.pydantic # type: ignore + if not final_report: + raise RuntimeError("Synthesizer produced no structured report.") + return final_report + + +# ========================= +# MAIN +# ========================= +def main(): + if len(sys.argv) < 2: + print('Usage: python multi_agent_report.py "Your subject here"') + sys.exit(1) + + subject = sys.argv[1].strip() + print(f"\n=== SUBJECT ===\n{subject}\n") + + llm = make_llm() + + # Stage 1: Plan + outline = run_planner(subject, llm) + print("\n=== OUTLINE (structured) ===") + print(outline.model_dump_json(indent=2)) + + # Stage 2: Write sections + drafts = run_section_writers(outline, llm) + print("\n=== FIRST SECTION DRAFT (preview) ===") + print(drafts[0].model_dump_json(indent=2)) + + # Stage 3: Synthesize final report + final_report = run_synthesizer(outline, drafts, llm) + print("\n=== FINAL REPORT (structured) ===") + print(final_report.model_dump_json(indent=2)) + + # Optional: also print a readable text version + print("\n=== FINAL REPORT (readable) ===\n") + print(f"# {final_report.outline.title}\n") + print("## Executive Summary\n") + print(final_report.executive_summary.strip(), "\n") + for i, sec in enumerate(final_report.sections, start=1): + print(f"## {i}. {sec.section_title}") + if sec.key_points: + print("\n- " + "\n- ".join(sec.key_points)) + print("\n" + sec.content.strip() + "\n") + + +if __name__ == "__main__": + main() diff --git a/ai/gen-ai-agents/crewai-oci-integration/simple_test_crewai_agent.py b/ai/gen-ai-agents/crewai-oci-integration/simple_test_crewai_agent.py new file mode 100644 index 000000000..463a50b0b --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/simple_test_crewai_agent.py @@ -0,0 +1,53 @@ +""" +Test CrewAI with LiteLLM and OCI Generative AI +""" + +import os +from crewai import Agent, Task, Crew, LLM + +# Disable telemetry, tracing, and logging +os.environ["CREWAI_LOGGING_ENABLED"] = "false" +os.environ["CREWAI_TELEMETRY_ENABLED"] = "false" +os.environ["CREWAI_TRACING_ENABLED"] = "false" + +# Configure the LLM (Grok model served via LiteLLM proxy on OCI) +print("\n=== CONFIGURING LLM ===") + +llm = LLM( + model="grok4-fast-oci", + # LiteLLM proxy endpoint + base_url="http://localhost:4000/v1", + api_key="sk-local-any", + temperature=0.2, + max_tokens=4000, +) + +# Define the agent +print("=== DEFINING AGENT ===") +researcher = Agent( + role="Researcher", + goal="Analyze documents and synthesize insights.", + backstory="Expert in enterprise Generative AI.", + llm=llm, +) + +# Define the task assigned to the agent +print("=== DEFINING TASK ===") +task = Task( + description="Summarize in 10 bullet points the pros and cons of using LiteLLM with OCI Generative AI.", + expected_output="A 10-bullet summary, clear and non-redundant.", + agent=researcher, +) + +# Create the crew (collection of agents and tasks) +print("=== CREATING CREW ===") +crew = Crew(agents=[researcher], tasks=[task]) + +# Execute the crew and print the result +print("") +print("\n=== EXECUTING CREW ===\n") + +result = crew.kickoff() + +print("\n=== CREW RESULT ===\n") +print(result) diff --git a/ai/gen-ai-agents/crewai-oci-integration/start_gateway.sh b/ai/gen-ai-agents/crewai-oci-integration/start_gateway.sh new file mode 100755 index 000000000..6c4be304e --- /dev/null +++ b/ai/gen-ai-agents/crewai-oci-integration/start_gateway.sh @@ -0,0 +1,2 @@ +# export LITELLM_LOG=DEBUG +litellm --config ./config.yml --port 4000 \ No newline at end of file