Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/deploy-changed-samples.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ jobs:
TEST_SLACK_CHANNEL_ID: ${{ secrets.TEST_SLACK_CHANNEL_ID }}
TEST_SLACK_TOKEN: ${{ secrets.TEST_SLACK_TOKEN }}
TEST_SHARED_SECRETS: ${{ secrets.TEST_SHARED_SECRETS}}
TEST_TAVILY_API_KEY: ${{ secrets.TEST_TAVILY_API_KEY }}
TEST_ALLOWED_HOSTS: ${{ secrets.TEST_ALLOWED_HOSTS }}
run: |
SAMPLES=$(sed 's|^samples/||' changed_samples.txt | paste -s -d ',' -)
Expand Down
1 change: 1 addition & 0 deletions samples/agentic-langgraph/.devcontainer/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
FROM mcr.microsoft.com/devcontainers/typescript-node:22-bookworm
11 changes: 11 additions & 0 deletions samples/agentic-langgraph/.devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"build": {
"dockerfile": "Dockerfile",
"context": ".."
},
"features": {
"ghcr.io/defanglabs/devcontainer-feature/defang-cli:1.0.4": {},
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/aws-cli:1": {}
}
}
25 changes: 25 additions & 0 deletions samples/agentic-langgraph/.github/workflows/deploy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: Deploy

on:
push:
branches:
- main

jobs:
deploy:
environment: playground
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write

steps:
- name: Checkout Repo
uses: actions/checkout@v4

- name: Deploy
uses: DefangLabs/defang-github-action@v1.1.3
with:
config-env-vars: TAVILY_API_KEY
env:
TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }}
2 changes: 2 additions & 0 deletions samples/agentic-langgraph/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
.env
node_modules/
57 changes: 57 additions & 0 deletions samples/agentic-langgraph/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Agentic LangGraph

[![1-click-deploy](https://raw.githubusercontent.com/DefangLabs/defang-assets/main/Logos/Buttons/SVG/deploy-with-defang.svg)](https://portal.defang.dev/redirect?url=https%3A%2F%2Fgithub.com%2Fnew%3Ftemplate_name%3Dsample-agentic-langgraph-template%26template_owner%3DDefangSamples)

This sample demonstrates a LangGraph Agent application deployed with Defang. You can customize the agent's tools as needed. For example, it includes a Tavily Search tool for performing search queries, which requires a `TAVILY_API_KEY` (see [Configuration](#configuration) for setup details).

## Prerequisites

1. Download [Defang CLI](https://github.com/DefangLabs/defang)
2. (Optional) If you are using [Defang BYOC](https://docs.defang.io/docs/concepts/defang-byoc) authenticate with your cloud provider account
3. (Optional for local development) [Docker CLI](https://docs.docker.com/engine/install/)

## Development

To run the application locally, you can use the following command:

```bash
docker compose -f compose.dev.yaml up --build
```

## Configuration

For this sample, you will need to provide the following [configuration](https://docs.defang.io/docs/concepts/configuration):

> Note that if you are using the 1-click deploy option, you can set these values as secrets in your GitHub repository and the action will automatically deploy them for you.

### `TAVILY_API_KEY`
A Tavily API key for accessing [Tavily Search](https://www.tavily.com/).
```bash
defang config set TAVILY_API_KEY
```

## Deployment

> [!NOTE]
> Download [Defang CLI](https://github.com/DefangLabs/defang)

### Defang Playground

Deploy your application to the Defang Playground by opening up your terminal and typing:
```bash
defang compose up
```

### BYOC

If you want to deploy to your own cloud account, you can [use Defang BYOC](https://docs.defang.io/docs/tutorials/deploy-to-your-cloud).

---

Title: Agentic LangGraph

Short Description: A LangGraph Agent application that can use tools, deployed with Defang.

Tags: Agent, LangGraph, LangChain, AI, OpenAI, Tavily

Languages: TypeScript
2 changes: 2 additions & 0 deletions samples/agentic-langgraph/app/.dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
node_modules
npm-debug.log
2 changes: 2 additions & 0 deletions samples/agentic-langgraph/app/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
node_modules
.env
19 changes: 19 additions & 0 deletions samples/agentic-langgraph/app/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@

FROM node:22-bookworm-slim

RUN apt-get update -qq \
&& apt-get install -y curl \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

WORKDIR /app

COPY package*.json ./

RUN npm install --omit=dev

COPY . .

EXPOSE 3000

CMD ["npm", "start"]
74 changes: 74 additions & 0 deletions samples/agentic-langgraph/app/agent.mts
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@

import { TavilySearch } from "@langchain/tavily";
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import { StateGraph, MessagesAnnotation } from "@langchain/langgraph";

// Define the tools for the agent to use
const tools = [new TavilySearch({ maxResults: 3 })];
const toolNode = new ToolNode(tools);

const baseUrl = process.env.LLM_URL || "https://api.openai.com/v1/";
console.log("Using LLM base URL:", baseUrl);
const baseModel = process.env.LLM_MODEL || "gpt-4o-mini";
console.log("Using LLM model:", baseModel);
// Create a model and give it access to the tools
const model = new ChatOpenAI({
model: baseModel,
temperature: 0.7,
configuration: {
baseURL: baseUrl,
},
}).bindTools(tools);

// Define the function that determines whether to continue or not
function shouldContinue({ messages }: typeof MessagesAnnotation.State) {
const lastMessage = messages[messages.length - 1] as AIMessage;

// If the LLM makes a tool call, then we route to the "tools" node
if (lastMessage.tool_calls?.length) {
return "tools";
}
// Otherwise, we stop (reply to the user) using the special "__end__" node
return "__end__";
}

// Define the function that calls the model
async function callModel(state: typeof MessagesAnnotation.State) {
const response = await model.invoke(state.messages);

// We return a list, because this will get added to the existing list
return { messages: [response] };
}

// Define a new graph
const workflow = new StateGraph(MessagesAnnotation)
.addNode("agent", callModel)
.addEdge("__start__", "agent") // __start__ is a special name for the entrypoint
.addNode("tools", toolNode)
.addEdge("tools", "agent")
.addConditionalEdges("agent", shouldContinue);

// Finally, we compile it into a LangChain Runnable.
const app = workflow.compile();

// Helper function to get agent output for a given input and optional previous messages
const getAgentOutput = async (input: string, previousMessages: (HumanMessage | AIMessage)[] = []) => {

const initialState = {
messages: [...previousMessages, new HumanMessage(input)],
};

const finalState = await app.invoke(initialState);
return {
content: finalState.messages[finalState.messages.length - 1].content,
messages: finalState.messages,
};
};

// Helper function to get agent output as a readablestring
export const getAgentOutputAsString = async (input: string, previousMessages: (HumanMessage | AIMessage)[] = []) => {
return getAgentOutput(input, previousMessages).then(result => result.content);
};

Loading
Loading