Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 50 additions & 22 deletions langchain/shopping-agent/.env.example
Original file line number Diff line number Diff line change
@@ -1,28 +1,56 @@
# If using OpenAI
# ============================================================
# LLM Configuration (AWS Bedrock is preferred, OpenAI as fallback)
# ============================================================

OPENAI_API_KEY="<openai-api-key>"
OPENAI_BASE_URL="<openai-base-url>" # Optional, if different from the default

# Optional for LangSmith tracing and experiment tracking

LANGSMITH_API_KEY="<langsmith-api-key>"
LANGSMITH_ENDPOINT="<langsmith-endpoint>" #defaults to https://api.smith.langchain.com if using cloud
LANGSMITH_TRACING="true"
# AWS Bedrock (Primary - Recommended for AWS deployments)
AWS_REGION_NAME="us-east-1"
AWS_ACCESS_KEY_ID=""
AWS_SECRET_ACCESS_KEY=""
AWS_MODEL_ARN="us.anthropic.claude-sonnet-4-5-20250929-v1:0"
AWS_MODEL_ID="us.anthropic.claude-sonnet-4-5-20250929-v1:0"

# OpenAI (Fallback if AWS Bedrock is not configured)
OPENAI_API_KEY=""
OPENAI_MODEL="gpt-5-mini" # Default OpenAI model
OPENAI_BASE_URL="" # Optional

# ============================================================
# Optional LangSmith tracing and experiment tracking
# ============================================================
LANGSMITH_API_KEY=""
LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
LANGSMITH_TRACING="false"
LANGSMITH_PROJECT="aws-shopping-agent"

# If using Azure OpenAI
# ============================================================
# Alternative LLM Providers (Optional)
# ============================================================

AZURE_OPENAI_API_KEY="<azure-openai-api-key>"
AZURE_OPENAI_ENDPOINT="<azure-openai-endpoint>"
# Azure OpenAI
AZURE_OPENAI_API_KEY=""
AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_API_VERSION=""

# If using Anthropic

ANTHROPIC_API_KEY="<anthropic-api-key>"

# If using AWS

AWS_ACCESS_KEY_ID=""
AWS_SECRET_ACCESS_KEY=""
AWS_REGION_NAME=""
AWS_MODEL_ARN=""
# Anthropic (Direct, not via Bedrock)
ANTHROPIC_API_KEY=""

# OpenSearch Configuration
# For local development with Docker
OPENSEARCH_HOST="localhost"
OPENSEARCH_PORT="9200"
OPENSEARCH_USE_SSL="false"
OPENSEARCH_VERIFY_CERTS="false"
OPENSEARCH_USERNAME="" # Empty for local Docker with security disabled
OPENSEARCH_PASSWORD="" # Empty for local Docker with security disabled

# For Amazon OpenSearch Service (production)
# OPENSEARCH_HOST="your-domain.us-east-1.es.amazonaws.com"
# OPENSEARCH_PORT="443"
# OPENSEARCH_USE_SSL="true"
# OPENSEARCH_VERIFY_CERTS="true"
# OPENSEARCH_USERNAME="admin"
# OPENSEARCH_PASSWORD="your-secure-password"

# OpenSearch Index Configuration
OPENSEARCH_INDEX_PRODUCTS="shopping_products"
OPENSEARCH_MODEL_ID="" # Will be populated after model deployment
116 changes: 89 additions & 27 deletions langchain/shopping-agent/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,16 @@
from pydantic import BaseModel, Field

from langchain.messages import SystemMessage, HumanMessage, AIMessage
from langchain.agents import create_agent
from langchain.tools import tool, ToolRuntime

from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.managed.is_last_step import RemainingSteps
from langgraph.store.base import BaseStore
from langgraph.types import interrupt

from agents.subagents import invoice_subagent
from agents.subagents import invoice_subagent, opensearch_subagent
from agents.prompts import (
supervisor_routing_prompt,
supervisor_system_prompt,
extract_customer_info_prompt,
verify_customer_info_prompt,
Expand All @@ -35,34 +34,69 @@ class InputState(TypedDict):
class State(InputState):
customer_id: NotRequired[str]
loaded_memory: NotRequired[str]
next_agent: NotRequired[str] # For conditional routing


# ------------------------------------------------------------
# Supervisor Graph
# Supervisor Router - Decides which agent to route to
# ------------------------------------------------------------
@tool(
name_or_callable="invoice_subagent",
description="""An agent that can assistant with all invoice-related queries. It can retrieve information about a customers past purchases or invoices."""
)
def call_invoice_subagent(runtime: ToolRuntime, query: str):
print('made it here')
print(f"invoice subagent input: {query}")
def supervisor_router(state: State) -> dict:
"""
Supervisor that routes to appropriate subagent using LLM decision.
Uses conditional routing instead of tools to avoid Bedrock ValidationException.
"""
messages = state["messages"]

# Create routing prompt with conversation context
routing_messages = [
SystemMessage(content=supervisor_routing_prompt),
*messages
]

# Get routing decision from LLM
response = llm.invoke(routing_messages)
next_agent = response.content.strip()

print(f"[Supervisor] Routing decision: {next_agent}")

# Store the routing decision in state
return {"next_agent": next_agent}

# ------------------------------------------------------------
# Subagent Nodes - Execute specialized tasks
# ------------------------------------------------------------
def invoice_agent_node(state: State) -> dict:
"""Node that executes the invoice subagent."""
print(f"[Invoice Agent] Processing query")

# Get only user messages (filter out supervisor routing messages)
user_messages = [msg for msg in state["messages"] if msg.type in ["human", "user"]]

# Invoke the invoice subagent with clean message history
result = invoice_subagent.invoke({
"messages": [{"role": "user", "content": query}],
"customer_id": runtime.state.get("customer_id", {})
"messages": user_messages, # Only user messages, no tool_use artifacts
"customer_id": state.get("customer_id", ""),
})
subagent_response = result["messages"][-1].content
return subagent_response

# TODO: Add Opensearch E-commerce Agent as tool
# Return the subagent's response as new messages
return {"messages": result["messages"]}

supervisor = create_agent(
model="openai:gpt-4o",
tools=[call_invoice_subagent], # TODO: Add Opensearch E-commerce Agent as tool
name="supervisor",
system_prompt=supervisor_system_prompt,
state_schema=State,
)
def opensearch_agent_node(state: State) -> dict:
"""Node that executes the opensearch subagent."""
print(f"[OpenSearch Agent] Processing query")

# Get only user messages (filter out supervisor routing messages)
user_messages = [msg for msg in state["messages"] if msg.type in ["human", "user"]]

# Invoke the opensearch subagent with clean message history
result = opensearch_subagent.invoke({
"messages": user_messages, # Only user messages, no tool_use artifacts
"customer_id": state.get("customer_id", ""),
"loaded_memory": state.get("loaded_memory", "")
})

# Return the subagent's response as new messages
return {"messages": result["messages"]}

# ------------------------------------------------------------
# Human Feedback Nodes
Expand Down Expand Up @@ -138,15 +172,26 @@ def create_memory(state: State, store: BaseStore):


# ------------------------------------------------------------
# State Graph
# State Graph with Conditional Routing
# ------------------------------------------------------------
workflow_builder = StateGraph(State, input_schema = InputState)
def route_after_supervisor(state: State) -> str:
"""Route to the appropriate agent based on supervisor's decision."""
next_agent = state.get("next_agent", "FINISH")
print(f"[Router] Directing to: {next_agent}")
return next_agent

workflow_builder = StateGraph(State, input_schema = InputState)

# Add all nodes
workflow_builder.add_node("verify_info", verify_info)
workflow_builder.add_node("human_input", human_input)
workflow_builder.add_node("load_memory", load_memory)
workflow_builder.add_node("supervisor", supervisor)
workflow_builder.add_node("supervisor", supervisor_router) # Router, not agent
workflow_builder.add_node("opensearch_agent", opensearch_agent_node) # Subagent node
workflow_builder.add_node("invoice_agent", invoice_agent_node) # Subagent node
workflow_builder.add_node("create_memory", create_memory)

# Build the workflow
workflow_builder.add_edge(START, "verify_info")
workflow_builder.add_conditional_edges(
"verify_info",
Expand All @@ -158,7 +203,24 @@ def create_memory(state: State, store: BaseStore):
)
workflow_builder.add_edge("human_input", "verify_info")
workflow_builder.add_edge("load_memory", "supervisor")
workflow_builder.add_edge("supervisor", "create_memory")

# Conditional routing from supervisor to agents
workflow_builder.add_conditional_edges(
"supervisor",
route_after_supervisor,
{
"opensearch_agent": "opensearch_agent",
"invoice_agent": "invoice_agent",
"FINISH": "create_memory"
}
)

# Both agents return to create_memory
workflow_builder.add_edge("opensearch_agent", "create_memory")
workflow_builder.add_edge("invoice_agent", "create_memory")
workflow_builder.add_edge("create_memory", END)

# Compile the graph
# LangGraph API (dev or cloud) provides managed persistence automatically.
# Do not use a custom store - the platform handles it.
graph = workflow_builder.compile(name="multi_agent_verify")
Loading