-
Notifications
You must be signed in to change notification settings - Fork 1
Seng/fix gh action #198
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Seng/fix gh action #198
Changes from all commits
6e99bd6
73f56dd
ef7bfb6
2ec0e46
6d4b685
afec57f
d856eee
bd0ec71
3cbb4c9
ff284c1
8928aa3
4dccc27
fbbbd64
5efc854
7d07da8
f1d2d28
4e98edf
4d6091e
b2c4f50
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,60 +12,24 @@ jobs: | |
runs-on: ubuntu-latest | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
fetch-depth: 0 | ||
|
||
- name: Get changed and removed files | ||
id: files | ||
- name: Collect and validate files | ||
run: | | ||
set -euo pipefail | ||
git fetch origin ${{ github.event.before }} | ||
./bin/collect-changed-files.sh "${{ github.event.before }}" "${{ github.sha }}" | \ | ||
./bin/validate-files.sh > changed-files.txt | ||
afterrburn marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# Get changed files (relative to content directory) | ||
CHANGED_FILES=$(git diff --name-only ${{ github.event.before }} ${{ github.sha }} -- 'content/**/*.mdx' | sed 's|^content/||') | ||
REMOVED_FILES=$(git diff --name-only --diff-filter=D ${{ github.event.before }} ${{ github.sha }} -- 'content/**/*.mdx' | sed 's|^content/||') | ||
|
||
echo "Changed files: $CHANGED_FILES" | ||
echo "Removed files: $REMOVED_FILES" | ||
|
||
# Build JSON payload with file contents | ||
payload=$(jq -n \ | ||
--arg commit "${{ github.sha }}" \ | ||
--arg repo "${{ github.repository }}" \ | ||
--argjson changed "$( | ||
if [ -n "$CHANGED_FILES" ]; then | ||
for f in $CHANGED_FILES; do | ||
if [ -f "content/$f" ]; then | ||
jq -n \ | ||
--arg path "$f" \ | ||
--arg content "$(base64 -w0 < "content/$f")" \ | ||
'{path: $path, content: $content}' | ||
fi | ||
done | jq -s '.' | ||
else | ||
echo '[]' | ||
fi | ||
)" \ | ||
--argjson removed "$( | ||
if [ -n "$REMOVED_FILES" ]; then | ||
printf '%s\n' $REMOVED_FILES | jq -R -s -c 'split("\n") | map(select(length > 0))' | ||
else | ||
echo '[]' | ||
fi | ||
)" \ | ||
'{commit: $commit, repo: $repo, changed: $changed, removed: $removed}' | ||
) | ||
|
||
echo "payload<<EOF" >> $GITHUB_OUTPUT | ||
echo "$payload" >> $GITHUB_OUTPUT | ||
echo "EOF" >> $GITHUB_OUTPUT | ||
echo "Files to sync:" | ||
cat changed-files.txt | ||
|
||
- name: Trigger Agentuity Sync Agent | ||
- name: Build and send payload | ||
env: | ||
AGENTUITY_TOKEN: ${{ secrets.AGENTUITY_TOKEN }} | ||
run: | | ||
echo "Sending payload to agent:" | ||
echo '${{ steps.files.outputs.payload }}' | jq '.' | ||
|
||
curl https://agentuity.ai/webhook/f61d5ce9d6ed85695cc992c55ccdc2a6 \ | ||
-X POST \ | ||
-H "Authorization: Bearer $AGENTUITY_TOKEN" \ | ||
-H "Content-Type: application/json" \ | ||
-d '${{ steps.files.outputs.payload }}' | ||
set -euo pipefail | ||
cat changed-files.txt | \ | ||
./bin/build-payload.sh "${{ github.repository }}" incremental | \ | ||
./bin/send-webhook.sh "https://agentuity.ai/webhook/f61d5ce9d6ed85695cc992c55ccdc2a6" "Bearer $AGENTUITY_TOKEN" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Secure webhook URL and add newline at EOF. - ./bin/send-webhook.sh "https://agentuity.ai/webhook/…"
+ ./bin/send-webhook.sh "${{ secrets.SYNC_DOCS_WEBHOOK_URL }}"
+# (ensure newline at end of file)
🧰 Tools🪛 YAMLlint (1.37.1)[error] 35-35: no new line character at the end of file (new-line-at-end-of-file) 🤖 Prompt for AI Agents
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; | ||
import { streamText } from 'ai'; | ||
import { openai } from '@ai-sdk/openai'; | ||
|
||
import type { ChunkMetadata } from '../doc-processing/types'; | ||
import { VECTOR_STORE_NAME, vectorSearchNumber } from '../../../../config'; | ||
import type { RelevantDoc } from './types'; | ||
|
||
export default async function Agent( | ||
req: AgentRequest, | ||
resp: AgentResponse, | ||
ctx: AgentContext | ||
) { | ||
const prompt = await req.data.text(); | ||
const relevantDocs = await retrieveRelevantDocs(ctx, prompt); | ||
|
||
const systemPrompt = ` | ||
You are a developer documentation assistant. Your job is to answer user questions about the Agentuity platform as effectively and concisely as possible, adapting your style to the user's request. If the user asks for a direct answer, provide it without extra explanation. If they want an explanation, provide a clear and concise one. Use only the provided relevant documents to answer. | ||
You must not make up answers if the provided documents don't exist. You can be direct to the user that the documentations | ||
don't seem to include what they are looking for. Lying to the user is prohibited as it only slows them down. Feel free to | ||
suggest follow up questions if what they're asking for don't seem to have an answer in the document. You can provide them | ||
a few related things that the documents contain that may interest them. | ||
For every answer, return a valid JSON object with: | ||
1. "answer": your answer to the user's question. | ||
2. "documents": an array of strings, representing the path of the documents you used to answer. | ||
If you use information from a document, include it in the "documents" array. If you do not use any documents, return an empty array for "documents". | ||
User question: | ||
\`\`\` | ||
${prompt} | ||
\`\`\` | ||
Relevant documents: | ||
${JSON.stringify(relevantDocs, null, 2)} | ||
Respond ONLY with a valid JSON object as described above. In your answer, you should format code blocks properly in Markdown style if the user needs answer in code block. | ||
`.trim(); | ||
|
||
const llmResponse = await streamText({ | ||
model: openai('gpt-4o'), | ||
system: systemPrompt, | ||
prompt: prompt, | ||
maxTokens: 2048, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This model's size is 128,000 tokens - why so low here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh yeah I'll extend the size in the next PR. Mainly, I wanted to keep the generation concise and short for now. Will push its limit. |
||
}); | ||
|
||
return resp.stream(llmResponse.textStream); | ||
} | ||
|
||
async function retrieveRelevantDocs(ctx: AgentContext, prompt: string): Promise<RelevantDoc[]> { | ||
const dbQuery = { | ||
query: prompt, | ||
limit: vectorSearchNumber | ||
} | ||
try { | ||
|
||
|
||
const vectors = await ctx.vector.search(VECTOR_STORE_NAME, dbQuery); | ||
|
||
const uniquePaths = new Set<string>(); | ||
|
||
vectors.forEach(vec => { | ||
if (!vec.metadata) { | ||
ctx.logger.warn('Vector missing metadata'); | ||
return; | ||
} | ||
const path = typeof vec.metadata.path === 'string' ? vec.metadata.path : undefined; | ||
if (!path) { | ||
ctx.logger.warn('Vector metadata path is not a string'); | ||
return; | ||
} | ||
uniquePaths.add(path); | ||
}); | ||
|
||
const docs = await Promise.all( | ||
Array.from(uniquePaths).map(async path => ({ | ||
path, | ||
content: await retrieveDocumentBasedOnPath(ctx, path) | ||
})) | ||
); | ||
|
||
return docs; | ||
} catch (err) { | ||
ctx.logger.error('Error retrieving relevant docs: %o', err); | ||
return []; | ||
} | ||
} | ||
|
||
async function retrieveDocumentBasedOnPath(ctx: AgentContext, path: string): Promise<string> { | ||
const dbQuery = { | ||
query: ' ', | ||
limit: 10000, | ||
metadata: { | ||
path: path | ||
} | ||
} | ||
try { | ||
const vectors = await ctx.vector.search(VECTOR_STORE_NAME, dbQuery); | ||
|
||
// Sort vectors by chunk index and concatenate text | ||
const sortedVectors = vectors | ||
.map(vec => { | ||
const metadata = vec.metadata as ChunkMetadata; | ||
return { | ||
metadata, | ||
index: metadata.chunkIndex | ||
}; | ||
}) | ||
.sort((a, b) => a.index - b.index); | ||
|
||
const fullText = sortedVectors | ||
.map(vec => vec.metadata.text) | ||
.join('\n\n'); | ||
|
||
return fullText; | ||
} catch (err) { | ||
ctx.logger.error('Error retrieving document by path %s: %o', path, err); | ||
return ''; | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
export interface RelevantDoc { | ||
path: string; | ||
content: string; | ||
} | ||
|
Uh oh!
There was an error while loading. Please reload this page.