Official JavaScript/TypeScript SDK for AgentTrust — prompt injection detection, agent identity verification, and audit trails for autonomous AI agents.
npm install @agenttrust/sdk
# or
yarn add @agenttrust/sdk
# or
pnpm add @agenttrust/sdkimport { AgentTrust } from '@agenttrust/sdk'
// From environment variable (recommended)
const at = AgentTrust.fromEnv() // reads AGENTTRUST_API_KEY
// Or explicitly
const at = new AgentTrust({ apiKey: 'atk_YOUR_API_KEY' })Get your API key at agenttrust.ai. Keys are prefixed with atk_.
Scan any text for prompt injection, command execution attempts, and social engineering before your agent acts on it.
const result = await at.injectionGuard(text, {
capabilities: ['send_messages', 'open_links'],
channel: 'email',
use_llm_review: true,
})
if (result.blocked) {
// Hard block — do not proceed
console.log('Triggers:', result.triggers)
console.log('Mitigations:', result.mitigations)
} else if (result.requiresHuman) {
// Queue for human review
} else {
// Safe to execute
}| Option | Type | Required | Description |
|---|---|---|---|
capabilities |
string[] |
✅ | What your agent can do (e.g. send_messages, open_links, data_access) |
channel |
'email' | 'chat' | 'api' |
— | Source channel for better contextual analysis |
use_llm_review |
boolean |
— | Enable additional LLM-based risk assessment |
exclusions |
string[] |
— | Terms that suppress trigger detection |
custom_keywords |
CustomKeyword[] |
— | Your own keyword triggers with severity |
// Custom keywords example
custom_keywords: [
{ keyword: 'powershell', severity: 'high' },
{ keyword: 'wget', severity: 'medium' },
]Dashboard-configured keywords and exclusions are automatically merged with request values.
| Field | Type | Description |
|---|---|---|
risk_level |
'low' | 'medium' | 'high' |
Overall risk assessment |
suggested_mode |
'allow_execute' | 'draft_only' | 'require_human' | 'block' |
Recommended handling |
triggers |
string[] |
Rules that matched |
trigger_excerpts |
Record<string, string[]> |
Supporting excerpts per trigger |
mitigations |
string[] |
Suggested mitigation actions |
ruleset_version |
string |
Detection ruleset version |
llm_review |
LLMReview | undefined |
AI review block (when use_llm_review: true) |
blocked |
boolean |
Convenience: true when suggested_mode === 'block' |
requiresHuman |
boolean |
Convenience: true when suggested_mode === 'require_human' |
Prove your agent's identity when reaching out to users or other systems.
const trustCode = await at.issue({
preset: 'draft_only',
ttl_seconds: 3600, // 1 hour (default: 86400)
payload: {
intent: 'schedule_meeting',
message: 'Scheduling a meeting on behalf of Acme Corp.',
},
})
console.log(trustCode.code) // e.g. "gFs2-jbQE-GddW"
console.log(trustCode.verify_url) // shareable verification link
console.log(trustCode.badge_html) // embeddable HTML trust badgeNo API key required — anyone can verify.
const identity = await at.verify('gFs2-jbQE-GddW')
if (identity.verified) {
console.log('Agent:', identity.issuer_agent_id)
console.log('Org:', identity.issuer_org_id)
console.log('Intent:', identity.payload.intent)
}import express from 'express'
import { AgentTrust } from '@agenttrust/sdk'
const at = AgentTrust.fromEnv()
const app = express()
app.use(express.json())
app.post('/agent/run', async (req, res) => {
const check = await at.injectionGuard(req.body.message, {
capabilities: ['data_access'],
channel: 'api',
})
if (check.blocked) {
return res.status(400).json({
error: 'Prompt injection detected',
triggers: check.triggers,
})
}
if (check.requiresHuman) {
return res.status(202).json({ status: 'queued_for_review' })
}
// Continue with agent logic...
})import { AgentTrust } from '@agenttrust/sdk'
const at = AgentTrust.fromEnv()
async function secureToolCall(input: string, capabilities: string[]) {
const check = await at.injectionGuard(input, {
capabilities,
use_llm_review: true,
})
if (check.blocked) {
throw new Error(`Blocked by AgentTrust: ${check.triggers.join(', ')}`)
}
// Call your tool...
}const at = new AgentTrust({
apiKey: 'atk_YOUR_API_KEY', // required — get from dashboard
baseUrl: 'https://agenttrust.ai/api', // optional, override for testing
})Set AGENTTRUST_API_KEY and use AgentTrust.fromEnv() for cleaner setup.
- Node.js 18+ (uses native
fetch) - Works with Bun, Deno, and any modern JS/TS runtime