diff --git a/js/plugins/anthropic/.gitignore b/js/plugins/anthropic/.gitignore
new file mode 100644
index 0000000000..d83aca04ae
--- /dev/null
+++ b/js/plugins/anthropic/.gitignore
@@ -0,0 +1,3 @@
+lib/
+node_modules/
+coverage/
diff --git a/js/plugins/anthropic/.npmignore b/js/plugins/anthropic/.npmignore
new file mode 100644
index 0000000000..d265d4ab73
--- /dev/null
+++ b/js/plugins/anthropic/.npmignore
@@ -0,0 +1,17 @@
+# typescript source files
+src/
+tests/
+tsconfig.json
+tsup.common.ts
+tsup.config.ts
+
+# GitHub files
+.github/
+.gitignore
+.npmignore
+CODE_OF_CONDUCT.md
+CONTRIBUTING.md
+
+# Developer related files
+.devcontainer/
+.vscode/
diff --git a/js/plugins/anthropic/NOTICE b/js/plugins/anthropic/NOTICE
new file mode 100644
index 0000000000..dc335bb090
--- /dev/null
+++ b/js/plugins/anthropic/NOTICE
@@ -0,0 +1,8 @@
+This project includes code derived from the Firebase Genkit Anthropic community plugin
+(https://github.com/BloomLabsInc/genkit-plugins/tree/main/plugins/anthropic).
+
+Copyright 2024 Bloom Labs Inc.
+Copyright 2025 Google LLC.
+
+Licensed under the Apache License, Version 2.0.
+See the LICENSE file distributed with this project for the full license text.
diff --git a/js/plugins/anthropic/README.md b/js/plugins/anthropic/README.md
new file mode 100644
index 0000000000..ec9c9115c0
--- /dev/null
+++ b/js/plugins/anthropic/README.md
@@ -0,0 +1,195 @@
+# Firebase Genkit + Anthropic AI
+
+
Firebase Genkit <> Anthropic AI Plugin
+
+Anthropic AI plugin for Google Firebase Genkit
+
+`@genkit-ai/anthropic` is the official Anthropic plugin for [Firebase Genkit](https://github.com/firebase/genkit). It supersedes the earlier community package `genkitx-anthropic` and is now maintained by Google.
+
+## Supported models
+
+The plugin supports the most recent Anthropic models: **Claude Sonnet 4.5**, **Claude Opus 4.1**, **Claude Haiku 4.5**, **Claude Sonnet 4**, **Claude Opus 4**, **Claude 3.5 Haiku**, and **Claude 3 Haiku**.
+
+## Installation
+
+Install the plugin in your project with your favorite package manager:
+
+- `npm install @genkit-ai/anthropic`
+- `yarn add @genkit-ai/anthropic`
+- `pnpm add @genkit-ai/anthropic`
+
+## Usage
+
+### Initialize
+
+```typescript
+import { genkit } from 'genkit';
+import { anthropic } from '@genkit-ai/anthropic';
+
+const ai = genkit({
+ plugins: [anthropic({ apiKey: process.env.ANTHROPIC_API_KEY })],
+ // specify a default model for generate here if you wish:
+ model: anthropic.model('claude-sonnet-4-5'),
+});
+```
+
+### Basic examples
+
+The simplest way to generate text is by using the `generate` method:
+
+```typescript
+const response = await ai.generate({
+ model: anthropic.model('claude-3-haiku'),
+ prompt: 'Tell me a joke.',
+});
+
+console.log(response.text);
+```
+
+### Multi-modal prompt
+
+```typescript
+// ...initialize Genkit instance (as shown above)...
+
+const response = await ai.generate({
+ prompt: [
+ { text: 'What animal is in the photo?' },
+ { media: { url: imageUrl } },
+ ],
+ config: {
+ // control of the level of visual detail when processing image embeddings
+ // Low detail level also decreases the token usage
+ visualDetailLevel: 'low',
+ },
+});
+console.log(response.text);
+```
+
+### Extended thinking
+
+Claude 4 models can expose their internal reasoning. Enable it per-request with the Anthropic thinking config and read the reasoning from the response:
+
+```typescript
+const response = await ai.generate({
+ prompt: 'Walk me through your reasoning for Fermat’s little theorem.',
+ config: {
+ thinking: {
+ enabled: true,
+ budgetTokens: 4096, // Must be >= 1024 and less than max_tokens
+ },
+ },
+});
+
+console.log(response.text); // Final assistant answer
+console.log(response.reasoning); // Summarized thinking steps
+```
+
+When thinking is enabled, request bodies sent through the plugin include the `thinking` payload (`{ type: 'enabled', budget_tokens: … }`) that Anthropic's API expects, and streamed responses deliver `reasoning` parts as they arrive so you can render the chain-of-thought incrementally.
+
+### Beta API Limitations
+
+The beta API surface provides access to experimental features, but some server-managed tool blocks are not yet supported by this plugin. The following beta API features will cause an error if encountered:
+
+- `web_fetch_tool_result`
+- `code_execution_tool_result`
+- `bash_code_execution_tool_result`
+- `text_editor_code_execution_tool_result`
+- `mcp_tool_result`
+- `mcp_tool_use`
+- `container_upload`
+
+Note that `server_tool_use` and `web_search_tool_result` ARE supported and work with both stable and beta APIs.
+
+### Within a flow
+
+```typescript
+import { z } from 'genkit';
+
+// ...initialize Genkit instance (as shown above)...
+
+export const jokeFlow = ai.defineFlow(
+ {
+ name: 'jokeFlow',
+ inputSchema: z.string(),
+ outputSchema: z.string(),
+ },
+ async (subject) => {
+ const llmResponse = await ai.generate({
+ prompt: `tell me a joke about ${subject}`,
+ });
+ return llmResponse.text;
+ }
+);
+```
+
+### Direct model usage (without Genkit instance)
+
+The plugin supports Genkit Plugin API v2, which allows you to use models directly without initializing the full Genkit framework:
+
+```typescript
+import { anthropic } from '@genkit-ai/anthropic';
+
+// Create a model reference directly
+const claude = anthropic.model('claude-sonnet-4-5');
+
+// Use the model directly
+const response = await claude({
+ messages: [
+ {
+ role: 'user',
+ content: [{ text: 'Tell me a joke.' }],
+ },
+ ],
+});
+
+console.log(response);
+```
+
+You can also create model references using the plugin's `model()` method:
+
+```typescript
+import { anthropic } from '@genkit-ai/anthropic';
+
+// Create model references
+const claudeSonnet45 = anthropic.model('claude-sonnet-4-5');
+const claudeOpus41 = anthropic.model('claude-opus-4-1');
+const claude35Haiku = anthropic.model('claude-3-5-haiku');
+
+// Use the model reference directly
+const response = await claudeSonnet45({
+ messages: [
+ {
+ role: 'user',
+ content: [{ text: 'Hello!' }],
+ },
+ ],
+});
+```
+
+This approach is useful for:
+
+- Framework developers who need raw model access
+- Testing models in isolation
+- Using Genkit models in non-Genkit applications
+
+## Acknowledgements
+
+This plugin builds on the community work published as [`genkitx-anthropic`](https://github.com/BloomLabsInc/genkit-plugins/blob/main/plugins/anthropic/README.md) by Bloom Labs Inc. Their Apache 2.0–licensed implementation provided the foundation for this maintained package.
+
+## Contributing
+
+Want to contribute to the project? That's awesome! Head over to our [Contribution Guidelines](CONTRIBUTING.md).
+
+## Need support?
+
+> [!NOTE]
+> This repository depends on Google's Firebase Genkit. For issues and questions related to Genkit, please refer to instructions available in [Genkit's repository](https://github.com/firebase/genkit).
+
+
+## Credits
+
+This plugin is maintained by Google with acknowledgement to the community contributions from [Bloom Labs Inc](https://github.com/BloomLabsInc).
+
+## License
+
+This project is licensed under the [Apache 2.0 License](https://github.com/BloomLabsInc/genkit-plugins/blob/main/LICENSE).
diff --git a/js/plugins/anthropic/package.json b/js/plugins/anthropic/package.json
new file mode 100644
index 0000000000..32b1c4ba87
--- /dev/null
+++ b/js/plugins/anthropic/package.json
@@ -0,0 +1,70 @@
+{
+ "name": "@genkit-ai/anthropic",
+ "description": "Genkit AI framework plugin for Anthropic APIs.",
+ "keywords": [
+ "genkit",
+ "genkit-plugin",
+ "genkit-model",
+ "anthropic",
+ "anthropic-ai",
+ "claude-4",
+ "haiku-4",
+ "opus",
+ "haiku",
+ "sonnet",
+ "ai",
+ "genai",
+ "generative-ai"
+ ],
+ "version": "1.23.0",
+ "type": "commonjs",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/firebase/genkit.git",
+ "directory": "js/plugins/anthropic"
+ },
+ "author": "genkit",
+ "license": "Apache-2.0",
+ "peerDependencies": {
+ "genkit": "workspace:^"
+ },
+ "dependencies": {
+ "@anthropic-ai/sdk": "^0.68.0"
+ },
+ "devDependencies": {
+ "@types/node": "^20.11.16",
+ "check-node-version": "^4.2.1",
+ "genkit": "workspace:*",
+ "npm-run-all": "^4.1.5",
+ "rimraf": "^6.0.1",
+ "tsup": "^8.3.5",
+ "tsx": "^4.19.2",
+ "typescript": "^4.9.0"
+ },
+ "types": "./lib/index.d.ts",
+ "exports": {
+ ".": {
+ "types": "./lib/index.d.ts",
+ "require": "./lib/index.js",
+ "import": "./lib/index.mjs",
+ "default": "./lib/index.js"
+ }
+ },
+ "files": [
+ "lib"
+ ],
+ "publishConfig": {
+ "provenance": true,
+ "access": "public"
+ },
+ "scripts": {
+ "check": "tsc",
+ "compile": "tsup-node",
+ "build:clean": "rimraf ./lib",
+ "build": "npm-run-all build:clean check compile",
+ "build:watch": "tsup-node --watch",
+ "test": "tsx --test tests/*_test.ts",
+ "test:file": "tsx --test",
+ "test:coverage": "check-node-version --node '>=22' && tsx --test --experimental-test-coverage --test-coverage-include='src/**/*.ts' ./tests/**/*_test.ts"
+ }
+}
diff --git a/js/plugins/anthropic/src/index.ts b/js/plugins/anthropic/src/index.ts
new file mode 100644
index 0000000000..d5a0fef9cb
--- /dev/null
+++ b/js/plugins/anthropic/src/index.ts
@@ -0,0 +1,155 @@
+/**
+ * Copyright 2024 Bloom Labs Inc
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Anthropic from '@anthropic-ai/sdk';
+import { genkitPluginV2, type GenkitPluginV2 } from 'genkit/plugin';
+
+import { ActionMetadata, ModelReference, z } from 'genkit';
+import { ModelAction } from 'genkit/model';
+import { ActionType } from 'genkit/registry';
+import { listActions } from './list.js';
+import {
+ AnthropicConfigSchemaType,
+ ClaudeConfig,
+ ClaudeModelName,
+ KNOWN_CLAUDE_MODELS,
+ KnownClaudeModels,
+ claudeModel,
+ claudeModelReference,
+} from './models.js';
+import { InternalPluginOptions, PluginOptions, __testClient } from './types.js';
+
+const PROMPT_CACHING_BETA_HEADER_VALUE = 'prompt-caching-2024-07-31';
+
+/**
+ * Gets or creates an Anthropic client instance.
+ * Supports test client injection for internal testing.
+ */
+function getAnthropicClient(options?: PluginOptions): Anthropic {
+ // Check for test client injection first (internal use only)
+ const internalOptions = options as InternalPluginOptions | undefined;
+ if (internalOptions?.[__testClient]) {
+ return internalOptions[__testClient];
+ }
+
+ // Production path: create real client
+ const apiKey = options?.apiKey || process.env.ANTHROPIC_API_KEY;
+ if (!apiKey) {
+ throw new Error(
+ 'Please pass in the API key or set the ANTHROPIC_API_KEY environment variable'
+ );
+ }
+ const defaultHeaders: Record = {};
+ if (options?.cacheSystemPrompt) {
+ defaultHeaders['anthropic-beta'] = PROMPT_CACHING_BETA_HEADER_VALUE;
+ }
+ return new Anthropic({ apiKey, defaultHeaders });
+}
+
+/**
+ * This module provides an interface to the Anthropic AI models through the Genkit plugin system.
+ * It allows users to interact with various Claude models by providing an API key and optional configuration.
+ *
+ * The main export is the `anthropic` plugin, which can be configured with an API key either directly or through
+ * environment variables. It initializes the Anthropic client and makes available the Claude models for use.
+ *
+ * Exports:
+ * - anthropic: The main plugin function to interact with the Anthropic AI.
+ *
+ * Usage:
+ * To use the Claude models, initialize the anthropic plugin inside `genkit()` and pass the configuration options. If no API key is provided in the options, the environment variable `ANTHROPIC_API_KEY` must be set. If you want to cache the system prompt, set `cacheSystemPrompt` to `true`. **Note:** Prompt caching is in beta and may change. To learn more, see https://docs.anthropic.com/en/docs/prompt-caching.
+ *
+ * Example:
+ * ```
+ * import { anthropic } from '@genkit-ai/anthropic';
+ * import { genkit } from 'genkit';
+ *
+ * const ai = genkit({
+ * plugins: [
+ * anthropic({ apiKey: 'your-api-key', cacheSystemPrompt: false })
+ * ... // other plugins
+ * ]
+ * });
+ *
+ * // Access models via the plugin's model() method:
+ * const model = anthropic.model('claude-sonnet-4');
+ * ```
+ */
+function anthropicPlugin(options?: PluginOptions): GenkitPluginV2 {
+ const client = getAnthropicClient(options);
+ const defaultApiVersion = options?.apiVersion;
+
+ let listActionsCache: ActionMetadata[] | null = null;
+
+ return genkitPluginV2({
+ name: 'anthropic',
+ init: async () => {
+ const actions: ModelAction[] = [];
+ for (const name of Object.keys(KNOWN_CLAUDE_MODELS)) {
+ const action = claudeModel({
+ name,
+ client,
+ cacheSystemPrompt: options?.cacheSystemPrompt,
+ defaultApiVersion,
+ });
+ actions.push(action);
+ }
+ return actions;
+ },
+ resolve: (actionType: ActionType, name: string) => {
+ if (actionType === 'model') {
+ // Strip the 'anthropic/' namespace prefix if present
+ const modelName = name.startsWith('anthropic/') ? name.slice(10) : name;
+ return claudeModel({
+ name: modelName,
+ client,
+ cacheSystemPrompt: options?.cacheSystemPrompt,
+ defaultApiVersion,
+ });
+ }
+ return undefined;
+ },
+ list: async () => {
+ if (listActionsCache) return listActionsCache;
+ listActionsCache = await listActions(client);
+ return listActionsCache;
+ },
+ });
+}
+
+export type AnthropicPlugin = {
+ (pluginOptions?: PluginOptions): GenkitPluginV2;
+ model(
+ name: KnownClaudeModels | (ClaudeModelName & {}),
+ config?: ClaudeConfig
+ ): ModelReference;
+ model(name: string, config?: any): ModelReference;
+};
+
+/**
+ * Anthropic AI plugin for Genkit.
+ * Includes Claude models (3, 3.5, and 4 series).
+ */
+export const anthropic = anthropicPlugin as AnthropicPlugin;
+(anthropic as any).model = (
+ name: string,
+ config?: any
+): ModelReference => {
+ return claudeModelReference(name, config);
+};
+
+export default anthropic;
diff --git a/js/plugins/anthropic/src/list.ts b/js/plugins/anthropic/src/list.ts
new file mode 100644
index 0000000000..6124fd4392
--- /dev/null
+++ b/js/plugins/anthropic/src/list.ts
@@ -0,0 +1,66 @@
+/**
+ * Copyright 2024 Bloom Labs Inc
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Anthropic from '@anthropic-ai/sdk';
+import { modelActionMetadata } from 'genkit/plugin';
+
+import { ActionMetadata } from 'genkit';
+import { claudeModelReference } from './models.js';
+
+/**
+ * Retrieves available Anthropic models from the API and converts them into Genkit action metadata.
+ *
+ * This function queries the Anthropic API for the list of available models and generates metadata
+ * for all discovered models.
+ *
+ * @param client - The Anthropic API client instance
+ * @returns A promise that resolves to an array of action metadata for all discovered models
+ */
+export async function listActions(
+ client: Anthropic
+): Promise {
+ const clientModels = (await client.models.list()).data;
+ const seenNames = new Set();
+
+ return clientModels
+ .filter((modelInfo) => {
+ const modelId = modelInfo.id;
+ if (!modelId) {
+ return false;
+ }
+
+ const ref = claudeModelReference(modelId);
+ const name = ref.name;
+
+ // Deduplicate by name
+ if (seenNames.has(name)) {
+ return false;
+ }
+ seenNames.add(name);
+ return true;
+ })
+ .map((modelInfo) => {
+ const modelId = modelInfo.id!;
+ const ref = claudeModelReference(modelId);
+
+ return modelActionMetadata({
+ name: ref.name,
+ info: ref.info,
+ configSchema: ref.configSchema,
+ });
+ });
+}
diff --git a/js/plugins/anthropic/src/models.ts b/js/plugins/anthropic/src/models.ts
new file mode 100644
index 0000000000..2ee33d933c
--- /dev/null
+++ b/js/plugins/anthropic/src/models.ts
@@ -0,0 +1,241 @@
+/**
+ * Copyright 2024 Bloom Labs Inc
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type {
+ GenerateRequest,
+ GenerateResponseData,
+ ModelReference,
+ StreamingCallback,
+} from 'genkit';
+import { z } from 'genkit';
+import type { GenerateResponseChunkData, ModelAction } from 'genkit/model';
+import { modelRef } from 'genkit/model';
+import { model } from 'genkit/plugin';
+
+import type { ModelInfo } from 'genkit/model';
+import { BetaRunner, Runner } from './runner/index.js';
+import {
+ AnthropicBaseConfigSchema,
+ AnthropicBaseConfigSchemaType,
+ AnthropicConfigSchema,
+ AnthropicThinkingConfigSchema,
+ resolveBetaEnabled,
+ type ClaudeModelParams,
+ type ClaudeRunnerParams,
+} from './types.js';
+
+// This contains all the Anthropic config schema types
+type ConfigSchemaType =
+ | AnthropicBaseConfigSchemaType
+ | AnthropicThinkingConfigSchemaType;
+
+/**
+ * Creates a model reference for a Claude model.
+ */
+function commonRef(
+ name: string,
+ configSchema: ConfigSchemaType = AnthropicConfigSchema,
+ info?: ModelInfo
+): ModelReference {
+ return modelRef({
+ name: `anthropic/${name}`,
+ configSchema,
+ info: info ?? {
+ supports: {
+ multiturn: true,
+ tools: true,
+ media: true,
+ systemRole: true,
+ output: ['text'],
+ },
+ },
+ });
+}
+
+export const KNOWN_CLAUDE_MODELS: Record<
+ string,
+ ModelReference<
+ AnthropicBaseConfigSchemaType | AnthropicThinkingConfigSchemaType
+ >
+> = {
+ 'claude-3-haiku': commonRef('claude-3-haiku', AnthropicBaseConfigSchema),
+ 'claude-3-5-haiku': commonRef('claude-3-5-haiku', AnthropicBaseConfigSchema),
+ 'claude-sonnet-4': commonRef(
+ 'claude-sonnet-4',
+ AnthropicThinkingConfigSchema
+ ),
+ 'claude-opus-4': commonRef('claude-opus-4', AnthropicThinkingConfigSchema),
+ 'claude-sonnet-4-5': commonRef(
+ 'claude-sonnet-4-5',
+ AnthropicThinkingConfigSchema
+ ),
+ 'claude-haiku-4-5': commonRef(
+ 'claude-haiku-4-5',
+ AnthropicThinkingConfigSchema
+ ),
+ 'claude-opus-4-1': commonRef(
+ 'claude-opus-4-1',
+ AnthropicThinkingConfigSchema
+ ),
+};
+
+/**
+ * Gets the un-prefixed model name from a modelReference.
+ */
+export function extractVersion(
+ model: ModelReference | undefined,
+ modelName: string
+): string {
+ // Extract from model name (remove 'anthropic/' prefix if present)
+ return modelName.replace(/^anthropic\//, '');
+}
+
+/**
+ * Generic Claude model info for unknown/unsupported models.
+ * Used when a model name is not in KNOWN_CLAUDE_MODELS.
+ */
+export const GENERIC_CLAUDE_MODEL_INFO = {
+ supports: {
+ multiturn: true,
+ tools: true,
+ media: true,
+ systemRole: true,
+ output: ['text'],
+ },
+};
+
+export type KnownClaudeModels = keyof typeof KNOWN_CLAUDE_MODELS;
+export type ClaudeModelName = string;
+export type AnthropicConfigSchemaType = typeof AnthropicConfigSchema;
+export type AnthropicThinkingConfigSchemaType =
+ typeof AnthropicThinkingConfigSchema;
+export type ClaudeConfig = z.infer;
+
+/**
+ * Creates the runner used by Genkit to interact with the Claude model.
+ * @param params Configuration for the Claude runner.
+ * @param configSchema The config schema for this model (used for type inference).
+ * @returns The runner that Genkit will call when the model is invoked.
+ */
+export function claudeRunner(
+ params: ClaudeRunnerParams,
+ configSchema: TConfigSchema
+) {
+ const { defaultApiVersion, ...runnerParams } = params;
+
+ if (!runnerParams.client) {
+ throw new Error('Anthropic client is required to create a runner');
+ }
+
+ let stableRunner: Runner | null = null;
+ let betaRunner: BetaRunner | null = null;
+
+ return async (
+ request: GenerateRequest,
+ {
+ streamingRequested,
+ sendChunk,
+ abortSignal,
+ }: {
+ streamingRequested: boolean;
+ sendChunk: StreamingCallback;
+ abortSignal: AbortSignal;
+ }
+ ): Promise => {
+ // Cast to AnthropicConfigSchema for internal runner which expects the full schema
+ const normalizedRequest = request as unknown as GenerateRequest<
+ typeof AnthropicConfigSchema
+ >;
+ const isBeta = resolveBetaEnabled(
+ normalizedRequest.config,
+ defaultApiVersion
+ );
+ const runner = isBeta
+ ? (betaRunner ??= new BetaRunner(runnerParams))
+ : (stableRunner ??= new Runner(runnerParams));
+ return runner.run(normalizedRequest, {
+ streamingRequested,
+ sendChunk,
+ abortSignal,
+ });
+ };
+}
+
+/**
+ * Strips the 'anthropic/' namespace prefix if present.
+ */
+function checkModelName(name: string): string {
+ return name.startsWith('anthropic/') ? name.slice(10) : name;
+}
+
+/**
+ * Creates a model reference for a Claude model.
+ * This allows referencing models without initializing the plugin.
+ */
+export function claudeModelReference(
+ name: string,
+ config: z.infer = {}
+): ModelReference {
+ const modelName = checkModelName(name);
+ return modelRef({
+ name: `anthropic/${modelName}`,
+ config: config,
+ configSchema: AnthropicConfigSchema,
+ info: {
+ ...GENERIC_CLAUDE_MODEL_INFO,
+ },
+ });
+}
+
+/**
+ * Defines a Claude model with the given name and Anthropic client.
+ * Accepts any model name and lets the API validate it. If the model is in KNOWN_CLAUDE_MODELS, uses that modelRef
+ * for better defaults; otherwise creates a generic model reference.
+ */
+export function claudeModel(
+ params: ClaudeModelParams
+): ModelAction {
+ const {
+ name,
+ client: runnerClient,
+ cacheSystemPrompt: cachePrompt,
+ defaultApiVersion: apiVersion,
+ } = params;
+ // Use supported model ref if available, otherwise create generic model ref
+ const modelRef = KNOWN_CLAUDE_MODELS[name];
+ const modelInfo = modelRef ? modelRef.info : GENERIC_CLAUDE_MODEL_INFO;
+ const configSchema = modelRef?.configSchema ?? AnthropicConfigSchema;
+
+ return model<
+ AnthropicBaseConfigSchemaType | AnthropicThinkingConfigSchemaType
+ >(
+ {
+ name: `anthropic/${name}`,
+ ...modelInfo,
+ configSchema: configSchema,
+ },
+ claudeRunner(
+ {
+ name,
+ client: runnerClient,
+ cacheSystemPrompt: cachePrompt,
+ defaultApiVersion: apiVersion,
+ },
+ configSchema
+ )
+ );
+}
diff --git a/js/plugins/anthropic/src/runner/base.ts b/js/plugins/anthropic/src/runner/base.ts
new file mode 100644
index 0000000000..e6b7132e28
--- /dev/null
+++ b/js/plugins/anthropic/src/runner/base.ts
@@ -0,0 +1,550 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { Anthropic } from '@anthropic-ai/sdk';
+import type { DocumentBlockParam } from '@anthropic-ai/sdk/resources/messages';
+import type {
+ GenerateRequest,
+ GenerateResponseChunkData,
+ GenerateResponseData,
+ MessageData,
+ Part,
+ Role,
+} from 'genkit';
+import { Message as GenkitMessage } from 'genkit';
+import type { ToolDefinition } from 'genkit/model';
+
+import {
+ AnthropicConfigSchema,
+ Media,
+ MediaSchema,
+ MediaType,
+ MediaTypeSchema,
+ type ClaudeRunnerParams,
+ type ThinkingConfig,
+} from '../types.js';
+
+import {
+ RunnerContentBlockParam,
+ RunnerMessage,
+ RunnerMessageParam,
+ RunnerRequestBody,
+ RunnerStream,
+ RunnerStreamEvent,
+ RunnerStreamingRequestBody,
+ RunnerTool,
+ RunnerToolResponseContent,
+ RunnerTypes,
+} from './types.js';
+
+const ANTHROPIC_THINKING_CUSTOM_KEY = 'anthropicThinking';
+
+/**
+ * Shared runner logic for Anthropic SDK integrations.
+ *
+ * Concrete subclasses pass in their SDK-specific type bundle via `RunnerTypes`,
+ * letting this base class handle message/tool translation once for both the
+ * stable and beta APIs that share the same conceptual surface.
+ */
+export abstract class BaseRunner {
+ protected name: string;
+ protected client: Anthropic;
+ protected cacheSystemPrompt?: boolean;
+
+ /**
+ * Default maximum output tokens for Claude models when not specified in the request.
+ */
+ protected readonly DEFAULT_MAX_OUTPUT_TOKENS = 4096;
+
+ constructor(params: ClaudeRunnerParams) {
+ this.name = params.name;
+ this.client = params.client;
+ this.cacheSystemPrompt = params.cacheSystemPrompt;
+ }
+
+ /**
+ * Converts a Genkit role to the corresponding Anthropic role.
+ */
+ protected toAnthropicRole(
+ role: Role,
+ toolMessageType?: 'tool_use' | 'tool_result'
+ ): 'user' | 'assistant' {
+ if (role === 'user') {
+ return 'user';
+ }
+ if (role === 'model') {
+ return 'assistant';
+ }
+ if (role === 'tool') {
+ return toolMessageType === 'tool_use' ? 'assistant' : 'user';
+ }
+ throw new Error(`Unsupported genkit role: ${role}`);
+ }
+
+ protected isMediaType(value: string): value is MediaType {
+ return MediaTypeSchema.safeParse(value).success;
+ }
+
+ protected isMediaObject(obj: unknown): obj is Media {
+ return MediaSchema.safeParse(obj).success;
+ }
+
+ /**
+ * Checks if a URL is a data URL (starts with 'data:').
+ */
+ protected isDataUrl(url: string): boolean {
+ return url.startsWith('data:');
+ }
+
+ protected extractDataFromBase64Url(
+ url: string
+ ): { data: string; contentType: string } | null {
+ const match = url.match(/^data:([^;]+);base64,(.+)$/);
+ return (
+ match && {
+ contentType: match[1],
+ data: match[2],
+ }
+ );
+ }
+
+ /**
+ * Both the stable and beta Anthropic SDKs accept the same JSON shape for PDF
+ * document sources (either `type: 'base64'` with a base64 payload or `type: 'url'`
+ * with a public URL). Even though the return type references the stable SDK
+ * union, TypeScript’s structural typing lets the beta runner reuse this helper.
+ */
+ protected toPdfDocumentSource(media: Media): DocumentBlockParam['source'] {
+ if (media.contentType !== 'application/pdf') {
+ throw new Error(
+ `PDF contentType mismatch: expected application/pdf, got ${media.contentType}`
+ );
+ }
+ const url = media.url;
+ if (this.isDataUrl(url)) {
+ const extracted = this.extractDataFromBase64Url(url);
+ if (!extracted) {
+ throw new Error(
+ `Invalid PDF data URL format: ${url.substring(0, 50)}...`
+ );
+ }
+ const { data, contentType } = extracted;
+ if (contentType !== 'application/pdf') {
+ throw new Error(
+ `PDF contentType mismatch: expected application/pdf, got ${contentType}`
+ );
+ }
+ return {
+ type: 'base64',
+ media_type: 'application/pdf',
+ data,
+ };
+ }
+ return {
+ type: 'url',
+ url,
+ };
+ }
+
+ /**
+ * Normalizes Genkit `Media` into either a base64 payload or a remote URL
+ * accepted by the Anthropic SDK. Anthropic supports both `data:` URLs (which
+ * we forward as base64) and remote `https` URLs without additional handling.
+ */
+ protected toImageSource(
+ media: Media
+ ):
+ | { kind: 'base64'; data: string; mediaType: MediaType }
+ | { kind: 'url'; url: string } {
+ if (this.isDataUrl(media.url)) {
+ const extracted = this.extractDataFromBase64Url(media.url);
+ const { data, contentType } = extracted ?? {};
+ if (!data || !contentType) {
+ throw new Error(
+ `Invalid genkit part media provided to toAnthropicMessageContent: ${JSON.stringify(
+ media
+ )}.`
+ );
+ }
+
+ const resolvedMediaType = contentType;
+ if (!resolvedMediaType) {
+ throw new Error('Media type is required but was not provided');
+ }
+ if (!this.isMediaType(resolvedMediaType)) {
+ // Provide helpful error message for text files
+ if (resolvedMediaType === 'text/plain') {
+ throw new Error(
+ `Unsupported media type: ${resolvedMediaType}. Text files should be sent as text content in the message, not as media. For example, use { text: '...' } instead of { media: { url: '...', contentType: 'text/plain' } }`
+ );
+ }
+ throw new Error(`Unsupported media type: ${resolvedMediaType}`);
+ }
+ return {
+ kind: 'base64',
+ data,
+ mediaType: resolvedMediaType,
+ };
+ }
+
+ if (!media.url) {
+ throw new Error('Media url is required but was not provided');
+ }
+
+ // For non-data URLs, use the provided contentType or default to a generic type
+ // Note: Anthropic will validate the actual content when fetching from URL
+ if (media.contentType) {
+ if (!this.isMediaType(media.contentType)) {
+ // Provide helpful error message for text files
+ if (media.contentType === 'text/plain') {
+ throw new Error(
+ `Unsupported media type: ${media.contentType}. Text files should be sent as text content in the message, not as media. For example, use { text: '...' } instead of { media: { url: '...', contentType: 'text/plain' } }`
+ );
+ }
+ throw new Error(`Unsupported media type: ${media.contentType}`);
+ }
+ }
+
+ return {
+ kind: 'url',
+ url: media.url,
+ };
+ }
+
+ /**
+ * Converts tool response output to the appropriate Anthropic content format.
+ * Handles Media objects, data URLs, strings, and other outputs.
+ */
+ protected toAnthropicToolResponseContent(
+ part: Part
+ ): RunnerToolResponseContent {
+ const output = part.toolResponse?.output ?? {};
+
+ // Handle Media objects (images returned by tools)
+ if (this.isMediaObject(output)) {
+ const { data, contentType } =
+ this.extractDataFromBase64Url(output.url) ?? {};
+ if (data && contentType) {
+ if (!this.isMediaType(contentType)) {
+ // Provide helpful error message for text files
+ if (contentType === 'text/plain') {
+ throw new Error(
+ `Unsupported media type: ${contentType}. Text files should be sent as text content, not as media.`
+ );
+ }
+ throw new Error(`Unsupported media type: ${contentType}`);
+ }
+ return {
+ type: 'image',
+ source: {
+ type: 'base64',
+ data,
+ media_type: contentType,
+ },
+ };
+ }
+ }
+
+ // Handle string outputs - check if it's a data URL
+ if (typeof output === 'string') {
+ // Check if string is a data URL (e.g., "data:image/gif;base64,...")
+ if (this.isDataUrl(output)) {
+ const { data, contentType } =
+ this.extractDataFromBase64Url(output) ?? {};
+ if (data && contentType) {
+ if (!this.isMediaType(contentType)) {
+ // Provide helpful error message for text files
+ if (contentType === 'text/plain') {
+ throw new Error(
+ `Unsupported media type: ${contentType}. Text files should be sent as text content, not as media.`
+ );
+ }
+ throw new Error(`Unsupported media type: ${contentType}`);
+ }
+ return {
+ type: 'image',
+ source: {
+ type: 'base64',
+ data,
+ media_type: contentType,
+ },
+ };
+ }
+ }
+ // Regular string output
+ return {
+ type: 'text',
+ text: output,
+ };
+ }
+
+ // Handle other outputs by stringifying
+ return {
+ type: 'text',
+ text: JSON.stringify(output),
+ };
+ }
+
+ protected createThinkingPart(thinking: string, signature?: string): Part {
+ const custom =
+ signature !== undefined
+ ? {
+ [ANTHROPIC_THINKING_CUSTOM_KEY]: { signature },
+ }
+ : undefined;
+ return custom
+ ? {
+ reasoning: thinking,
+ custom,
+ }
+ : {
+ reasoning: thinking,
+ };
+ }
+
+ protected getThinkingSignature(part: Part): string | undefined {
+ const custom = part.custom as Record | undefined;
+ const thinkingValue = custom?.[ANTHROPIC_THINKING_CUSTOM_KEY];
+ if (
+ typeof thinkingValue === 'object' &&
+ thinkingValue !== null &&
+ 'signature' in thinkingValue &&
+ typeof (thinkingValue as { signature: unknown }).signature === 'string'
+ ) {
+ return (thinkingValue as { signature: string }).signature;
+ }
+ return undefined;
+ }
+
+ protected getRedactedThinkingData(part: Part): string | undefined {
+ const custom = part.custom as Record | undefined;
+ const redacted = custom?.redactedThinking;
+ return typeof redacted === 'string' ? redacted : undefined;
+ }
+
+ protected toAnthropicThinkingConfig(
+ config: ThinkingConfig | undefined
+ ):
+ | { type: 'enabled'; budget_tokens: number }
+ | { type: 'disabled' }
+ | undefined {
+ if (!config) return undefined;
+
+ const { enabled, budgetTokens } = config;
+
+ if (enabled === true) {
+ if (budgetTokens === undefined) {
+ return undefined;
+ }
+ return { type: 'enabled', budget_tokens: budgetTokens };
+ }
+
+ if (enabled === false) {
+ return { type: 'disabled' };
+ }
+
+ if (budgetTokens !== undefined) {
+ return { type: 'enabled', budget_tokens: budgetTokens };
+ }
+
+ return undefined;
+ }
+
+ protected toWebSearchToolResultPart(params: {
+ toolUseId: string;
+ content: unknown;
+ type: string;
+ }): Part {
+ const { toolUseId, content, type } = params;
+ return {
+ text: `[Anthropic server tool result ${toolUseId}] ${JSON.stringify(content)}`,
+ custom: {
+ anthropicServerToolResult: {
+ type,
+ toolUseId,
+ content,
+ },
+ },
+ };
+ }
+
+ /**
+ * Converts a Genkit Part to the corresponding Anthropic content block.
+ * Each runner implements this to return its specific API type.
+ */
+ protected abstract toAnthropicMessageContent(
+ part: Part
+ ): RunnerContentBlockParam;
+
+ /**
+ * Converts Genkit messages to Anthropic format.
+ * Extracts system message and converts remaining messages using the runner's
+ * toAnthropicMessageContent implementation.
+ */
+ protected toAnthropicMessages(messages: MessageData[]): {
+ system?: string;
+ messages: RunnerMessageParam[];
+ } {
+ let system: string | undefined;
+
+ if (messages[0]?.role === 'system') {
+ const systemMessage = messages[0];
+ const textParts: string[] = [];
+
+ for (const part of systemMessage.content ?? []) {
+ if (part.text) {
+ textParts.push(part.text);
+ } else if (part.media || part.toolRequest || part.toolResponse) {
+ throw new Error(
+ 'System messages can only contain text content. Media, tool requests, and tool responses are not supported in system messages.'
+ );
+ }
+ }
+
+ // Concatenate multiple text parts into a single string.
+ // Note: The Anthropic SDK supports system as string | Array,
+ // so we could alternatively preserve the multi-part structure as:
+ // system = textParts.map(text => ({ type: 'text', text }))
+ // However, concatenation is simpler and maintains semantic equivalence while
+ // keeping the cache control logic straightforward in the concrete runners.
+ system = textParts.length > 0 ? textParts.join('\n\n') : undefined;
+ }
+
+ const messagesToIterate =
+ system !== undefined ? messages.slice(1) : messages;
+ const anthropicMsgs: RunnerMessageParam[] = [];
+
+ for (const message of messagesToIterate) {
+ const msg = new GenkitMessage(message);
+
+ // Detect tool message kind from Genkit Parts (no SDK typing needed)
+ const hadToolUse = msg.content.some((p) => !!p.toolRequest);
+ const hadToolResult = msg.content.some((p) => !!p.toolResponse);
+
+ const toolMessageType = hadToolUse
+ ? ('tool_use' as const)
+ : hadToolResult
+ ? ('tool_result' as const)
+ : undefined;
+
+ const role = this.toAnthropicRole(message.role, toolMessageType);
+
+ const content = msg.content.map((part) =>
+ this.toAnthropicMessageContent(part)
+ );
+
+ anthropicMsgs.push({ role, content });
+ }
+
+ return { system, messages: anthropicMsgs };
+ }
+
+ /**
+ * Converts a Genkit ToolDefinition to an Anthropic Tool object.
+ */
+ protected toAnthropicTool(tool: ToolDefinition): RunnerTool {
+ return {
+ name: tool.name,
+ description: tool.description,
+ input_schema: tool.inputSchema,
+ } as RunnerTool;
+ }
+
+ /**
+ * Converts an Anthropic request to a non-streaming Anthropic API request body.
+ * @param modelName The name of the Anthropic model to use.
+ * @param request The Genkit GenerateRequest to convert.
+ * @param cacheSystemPrompt Whether to cache the system prompt.
+ * @returns The converted Anthropic API non-streaming request body.
+ * @throws An error if an unsupported output format is requested.
+ */
+ protected abstract toAnthropicRequestBody(
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ): RunnerRequestBody;
+
+ /**
+ * Converts an Anthropic request to a streaming Anthropic API request body.
+ * @param modelName The name of the Anthropic model to use.
+ * @param request The Genkit GenerateRequest to convert.
+ * @param cacheSystemPrompt Whether to cache the system prompt.
+ * @returns The converted Anthropic API streaming request body.
+ * @throws An error if an unsupported output format is requested.
+ */
+ protected abstract toAnthropicStreamingRequestBody(
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ): RunnerStreamingRequestBody;
+
+ protected abstract createMessage(
+ body: RunnerRequestBody,
+ abortSignal: AbortSignal
+ ): Promise>;
+
+ protected abstract streamMessages(
+ body: RunnerStreamingRequestBody,
+ abortSignal: AbortSignal
+ ): RunnerStream;
+
+ protected abstract toGenkitResponse(
+ message: RunnerMessage
+ ): GenerateResponseData;
+
+ protected abstract toGenkitPart(
+ event: RunnerStreamEvent
+ ): Part | undefined;
+
+ public async run(
+ request: GenerateRequest,
+ options: {
+ streamingRequested: boolean;
+ sendChunk: (chunk: GenerateResponseChunkData) => void;
+ abortSignal: AbortSignal;
+ }
+ ): Promise {
+ const { streamingRequested, sendChunk, abortSignal } = options;
+
+ if (streamingRequested) {
+ const body = this.toAnthropicStreamingRequestBody(
+ this.name,
+ request,
+ this.cacheSystemPrompt
+ );
+ const stream = this.streamMessages(body, abortSignal);
+ for await (const event of stream) {
+ const part = this.toGenkitPart(event);
+ if (part) {
+ sendChunk({
+ index: 0,
+ content: [part],
+ });
+ }
+ }
+ const finalMessage = await stream.finalMessage();
+ return this.toGenkitResponse(finalMessage);
+ }
+
+ const body = this.toAnthropicRequestBody(
+ this.name,
+ request,
+ this.cacheSystemPrompt
+ );
+ const response = await this.createMessage(body, abortSignal);
+ return this.toGenkitResponse(response);
+ }
+}
diff --git a/js/plugins/anthropic/src/runner/beta.ts b/js/plugins/anthropic/src/runner/beta.ts
new file mode 100644
index 0000000000..6a71fa71d5
--- /dev/null
+++ b/js/plugins/anthropic/src/runner/beta.ts
@@ -0,0 +1,494 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { BetaMessageStream } from '@anthropic-ai/sdk/lib/BetaMessageStream.js';
+import type {
+ BetaContentBlock,
+ BetaImageBlockParam,
+ BetaMessage,
+ MessageCreateParams as BetaMessageCreateParams,
+ MessageCreateParamsNonStreaming as BetaMessageCreateParamsNonStreaming,
+ MessageCreateParamsStreaming as BetaMessageCreateParamsStreaming,
+ BetaMessageParam,
+ BetaRawMessageStreamEvent,
+ BetaRedactedThinkingBlockParam,
+ BetaRequestDocumentBlock,
+ BetaStopReason,
+ BetaTextBlockParam,
+ BetaThinkingBlockParam,
+ BetaTool,
+ BetaToolResultBlockParam,
+ BetaToolUseBlockParam,
+} from '@anthropic-ai/sdk/resources/beta/messages';
+
+import type {
+ GenerateRequest,
+ GenerateResponseData,
+ ModelResponseData,
+ Part,
+} from 'genkit';
+import { logger } from 'genkit/logging';
+
+import { KNOWN_CLAUDE_MODELS, extractVersion } from '../models.js';
+import { AnthropicConfigSchema, type ClaudeRunnerParams } from '../types.js';
+import { BaseRunner } from './base.js';
+import { RunnerTypes } from './types.js';
+
+/**
+ * Server-managed tool blocks emitted by the beta API that Genkit cannot yet
+ * interpret. We fail fast on these so callers do not accidentally treat them as
+ * locally executable tool invocations.
+ */
+/**
+ * Server tool types that exist in beta but are not yet supported.
+ * Note: server_tool_use and web_search_tool_result ARE supported (same as stable API).
+ */
+const BETA_UNSUPPORTED_SERVER_TOOL_BLOCK_TYPES = new Set([
+ 'web_fetch_tool_result',
+ 'code_execution_tool_result',
+ 'bash_code_execution_tool_result',
+ 'text_editor_code_execution_tool_result',
+ 'mcp_tool_result',
+ 'mcp_tool_use',
+ 'container_upload',
+]);
+
+const unsupportedServerToolError = (blockType: string): string =>
+ `Anthropic beta runner does not yet support server-managed tool block '${blockType}'. Please retry against the stable API or wait for dedicated support.`;
+
+interface BetaRunnerTypes extends RunnerTypes {
+ Message: BetaMessage;
+ Stream: BetaMessageStream;
+ StreamEvent: BetaRawMessageStreamEvent;
+ RequestBody: BetaMessageCreateParamsNonStreaming;
+ StreamingRequestBody: BetaMessageCreateParamsStreaming;
+ Tool: BetaTool;
+ MessageParam: BetaMessageParam;
+ ToolResponseContent: BetaTextBlockParam | BetaImageBlockParam;
+ ContentBlockParam:
+ | BetaTextBlockParam
+ | BetaImageBlockParam
+ | BetaRequestDocumentBlock
+ | BetaToolUseBlockParam
+ | BetaToolResultBlockParam
+ | BetaThinkingBlockParam
+ | BetaRedactedThinkingBlockParam;
+}
+
+/**
+ * Runner for the Anthropic Beta API.
+ */
+export class BetaRunner extends BaseRunner {
+ constructor(params: ClaudeRunnerParams) {
+ super(params);
+ }
+
+ /**
+ * Map a Genkit Part -> Anthropic beta content block param.
+ * Supports: text, images (base64 data URLs), PDFs (document source),
+ * tool_use (client tool request), tool_result (client tool response).
+ */
+ protected toAnthropicMessageContent(
+ part: Part
+ ):
+ | BetaTextBlockParam
+ | BetaImageBlockParam
+ | BetaRequestDocumentBlock
+ | BetaToolUseBlockParam
+ | BetaToolResultBlockParam
+ | BetaThinkingBlockParam
+ | BetaRedactedThinkingBlockParam {
+ if (part.reasoning) {
+ const signature = this.getThinkingSignature(part);
+ if (!signature) {
+ throw new Error(
+ 'Anthropic thinking parts require a signature when sending back to the API. Preserve the `custom.anthropicThinking.signature` value from the original response.'
+ );
+ }
+ return {
+ type: 'thinking',
+ thinking: part.reasoning,
+ signature,
+ };
+ }
+
+ const redactedThinking = this.getRedactedThinkingData(part);
+ if (redactedThinking !== undefined) {
+ return {
+ type: 'redacted_thinking',
+ data: redactedThinking,
+ };
+ }
+
+ // Text
+ if (part.text) {
+ return { type: 'text', text: part.text };
+ }
+
+ // Media
+ if (part.media) {
+ if (part.media.contentType === 'application/pdf') {
+ return {
+ type: 'document',
+ source: this.toPdfDocumentSource(part.media),
+ };
+ }
+
+ const source = this.toImageSource(part.media);
+ if (source.kind === 'base64') {
+ return {
+ type: 'image',
+ source: {
+ type: 'base64',
+ data: source.data,
+ media_type: source.mediaType,
+ },
+ };
+ }
+ return {
+ type: 'image',
+ source: {
+ type: 'url',
+ url: source.url,
+ },
+ };
+ }
+
+ // Tool request (client tool use)
+ if (part.toolRequest) {
+ if (!part.toolRequest.ref) {
+ throw new Error(
+ `Tool request ref is required for Anthropic API. Part: ${JSON.stringify(
+ part.toolRequest
+ )}`
+ );
+ }
+ return {
+ type: 'tool_use',
+ id: part.toolRequest.ref,
+ name: part.toolRequest.name,
+ input: part.toolRequest.input,
+ };
+ }
+
+ // Tool response (client tool result)
+ if (part.toolResponse) {
+ if (!part.toolResponse.ref) {
+ throw new Error(
+ `Tool response ref is required for Anthropic API. Part: ${JSON.stringify(
+ part.toolResponse
+ )}`
+ );
+ }
+ const betaResult: BetaToolResultBlockParam = {
+ type: 'tool_result',
+ tool_use_id: part.toolResponse.ref,
+ content: [this.toAnthropicToolResponseContent(part)],
+ };
+ return betaResult;
+ }
+
+ throw new Error(
+ `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(
+ part
+ )}.`
+ );
+ }
+
+ protected createMessage(
+ body: BetaMessageCreateParamsNonStreaming,
+ abortSignal: AbortSignal
+ ): Promise {
+ return this.client.beta.messages.create(body, { signal: abortSignal });
+ }
+
+ protected streamMessages(
+ body: BetaMessageCreateParamsStreaming,
+ abortSignal: AbortSignal
+ ): BetaMessageStream {
+ return this.client.beta.messages.stream(body, { signal: abortSignal });
+ }
+
+ /**
+ * Build non-streaming request body.
+ */
+ protected toAnthropicRequestBody(
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ): BetaMessageCreateParamsNonStreaming {
+ const model = KNOWN_CLAUDE_MODELS[modelName];
+ const { system, messages } = this.toAnthropicMessages(request.messages);
+ const mappedModelName =
+ request.config?.version ?? extractVersion(model, modelName);
+
+ let betaSystem: BetaMessageCreateParamsNonStreaming['system'];
+
+ if (system !== undefined) {
+ betaSystem = cacheSystemPrompt
+ ? [
+ {
+ type: 'text' as const,
+ text: system,
+ cache_control: { type: 'ephemeral' as const },
+ },
+ ]
+ : system;
+ }
+
+ const body: BetaMessageCreateParamsNonStreaming = {
+ model: mappedModelName,
+ max_tokens:
+ request.config?.maxOutputTokens ?? this.DEFAULT_MAX_OUTPUT_TOKENS,
+ messages,
+ };
+
+ if (betaSystem !== undefined) body.system = betaSystem;
+ if (request.config?.stopSequences !== undefined)
+ body.stop_sequences = request.config.stopSequences;
+ if (request.config?.temperature !== undefined)
+ body.temperature = request.config.temperature;
+ if (request.config?.topK !== undefined) body.top_k = request.config.topK;
+ if (request.config?.topP !== undefined) body.top_p = request.config.topP;
+ if (request.config?.tool_choice !== undefined) {
+ body.tool_choice = request.config
+ .tool_choice as BetaMessageCreateParams['tool_choice'];
+ }
+ if (request.config?.metadata !== undefined) {
+ body.metadata = request.config
+ .metadata as BetaMessageCreateParams['metadata'];
+ }
+ if (request.tools) {
+ body.tools = request.tools.map((tool) => this.toAnthropicTool(tool));
+ }
+ const thinkingConfig = this.toAnthropicThinkingConfig(
+ request.config?.thinking
+ );
+ if (thinkingConfig) {
+ body.thinking = thinkingConfig as BetaMessageCreateParams['thinking'];
+ }
+
+ if (request.output?.format && request.output.format !== 'text') {
+ throw new Error(
+ `Only text output format is supported for Claude models currently`
+ );
+ }
+
+ return body;
+ }
+
+ /**
+ * Build streaming request body.
+ */
+ protected toAnthropicStreamingRequestBody(
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ): BetaMessageCreateParamsStreaming {
+ const model = KNOWN_CLAUDE_MODELS[modelName];
+ const { system, messages } = this.toAnthropicMessages(request.messages);
+ const mappedModelName =
+ request.config?.version ?? extractVersion(model, modelName);
+
+ const betaSystem =
+ system === undefined
+ ? undefined
+ : cacheSystemPrompt
+ ? [
+ {
+ type: 'text' as const,
+ text: system,
+ cache_control: { type: 'ephemeral' as const },
+ },
+ ]
+ : system;
+
+ const body: BetaMessageCreateParamsStreaming = {
+ model: mappedModelName,
+ max_tokens:
+ request.config?.maxOutputTokens ?? this.DEFAULT_MAX_OUTPUT_TOKENS,
+ messages,
+ stream: true,
+ };
+
+ if (betaSystem !== undefined) body.system = betaSystem;
+ if (request.config?.stopSequences !== undefined)
+ body.stop_sequences = request.config.stopSequences;
+ if (request.config?.temperature !== undefined)
+ body.temperature = request.config.temperature;
+ if (request.config?.topK !== undefined) body.top_k = request.config.topK;
+ if (request.config?.topP !== undefined) body.top_p = request.config.topP;
+ if (request.config?.tool_choice !== undefined) {
+ body.tool_choice = request.config
+ .tool_choice as BetaMessageCreateParams['tool_choice'];
+ }
+ if (request.config?.metadata !== undefined) {
+ body.metadata = request.config
+ .metadata as BetaMessageCreateParams['metadata'];
+ }
+ if (request.tools) {
+ body.tools = request.tools.map((tool) => this.toAnthropicTool(tool));
+ }
+ const thinkingConfig = this.toAnthropicThinkingConfig(
+ request.config?.thinking
+ );
+ if (thinkingConfig) {
+ body.thinking = thinkingConfig as BetaMessageCreateParams['thinking'];
+ }
+
+ if (request.output?.format && request.output.format !== 'text') {
+ throw new Error(
+ `Only text output format is supported for Claude models currently`
+ );
+ }
+
+ return body;
+ }
+
+ protected toGenkitResponse(message: BetaMessage): GenerateResponseData {
+ return {
+ candidates: [
+ {
+ index: 0,
+ finishReason: this.fromBetaStopReason(message.stop_reason),
+ message: {
+ role: 'model',
+ content: message.content.map((block) =>
+ this.fromBetaContentBlock(block)
+ ),
+ },
+ },
+ ],
+ usage: {
+ inputTokens: message.usage.input_tokens,
+ outputTokens: message.usage.output_tokens,
+ },
+ custom: message,
+ };
+ }
+
+ protected toGenkitPart(event: BetaRawMessageStreamEvent): Part | undefined {
+ if (event.type === 'content_block_start') {
+ const blockType = (event.content_block as { type?: string }).type;
+ if (
+ blockType &&
+ BETA_UNSUPPORTED_SERVER_TOOL_BLOCK_TYPES.has(blockType)
+ ) {
+ throw new Error(unsupportedServerToolError(blockType));
+ }
+ return this.fromBetaContentBlock(event.content_block);
+ }
+ if (event.type === 'content_block_delta') {
+ if (event.delta.type === 'text_delta') {
+ return { text: event.delta.text };
+ }
+ if (event.delta.type === 'thinking_delta') {
+ return { reasoning: event.delta.thinking };
+ }
+ // server/client tool input_json_delta not supported yet
+ return undefined;
+ }
+ return undefined;
+ }
+
+ private fromBetaContentBlock(contentBlock: BetaContentBlock): Part {
+ switch (contentBlock.type) {
+ case 'tool_use': {
+ return {
+ toolRequest: {
+ ref: contentBlock.id,
+ name: contentBlock.name ?? 'unknown_tool',
+ input: contentBlock.input,
+ },
+ };
+ }
+
+ case 'mcp_tool_use':
+ throw new Error(unsupportedServerToolError(contentBlock.type));
+
+ case 'server_tool_use': {
+ const baseName = contentBlock.name ?? 'unknown_tool';
+ const serverToolName =
+ 'server_name' in contentBlock && contentBlock.server_name
+ ? `${contentBlock.server_name}/${baseName}`
+ : baseName;
+ return {
+ text: `[Anthropic server tool ${serverToolName}] input: ${JSON.stringify(contentBlock.input)}`,
+ custom: {
+ anthropicServerToolUse: {
+ id: contentBlock.id,
+ name: serverToolName,
+ input: contentBlock.input,
+ },
+ },
+ };
+ }
+
+ case 'web_search_tool_result':
+ return this.toWebSearchToolResultPart({
+ type: contentBlock.type,
+ toolUseId: contentBlock.tool_use_id,
+ content: contentBlock.content,
+ });
+
+ case 'text':
+ return { text: contentBlock.text };
+
+ case 'thinking':
+ return this.createThinkingPart(
+ contentBlock.thinking,
+ contentBlock.signature
+ );
+
+ case 'redacted_thinking':
+ return { custom: { redactedThinking: contentBlock.data } };
+
+ default: {
+ if (BETA_UNSUPPORTED_SERVER_TOOL_BLOCK_TYPES.has(contentBlock.type)) {
+ throw new Error(unsupportedServerToolError(contentBlock.type));
+ }
+ const unknownType = (contentBlock as { type: string }).type;
+ logger.warn(
+ `Unexpected Anthropic beta content block type: ${unknownType}. Returning empty text. Content block: ${JSON.stringify(
+ contentBlock
+ )}`
+ );
+ return { text: '' };
+ }
+ }
+ }
+
+ private fromBetaStopReason(
+ reason: BetaStopReason | null
+ ): ModelResponseData['finishReason'] {
+ switch (reason) {
+ case 'max_tokens':
+ case 'model_context_window_exceeded':
+ return 'length';
+ case 'end_turn':
+ case 'stop_sequence':
+ case 'tool_use':
+ case 'pause_turn':
+ return 'stop';
+ case null:
+ return 'unknown';
+ case 'refusal':
+ return 'other';
+ default:
+ return 'other';
+ }
+ }
+}
diff --git a/js/plugins/anthropic/src/runner/index.ts b/js/plugins/anthropic/src/runner/index.ts
new file mode 100644
index 0000000000..ce7e3c6fdd
--- /dev/null
+++ b/js/plugins/anthropic/src/runner/index.ts
@@ -0,0 +1,19 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export { BaseRunner } from './base.js';
+export { BetaRunner } from './beta.js';
+export { Runner } from './stable.js';
diff --git a/js/plugins/anthropic/src/runner/stable.ts b/js/plugins/anthropic/src/runner/stable.ts
new file mode 100644
index 0000000000..0c8f7ffc4f
--- /dev/null
+++ b/js/plugins/anthropic/src/runner/stable.ts
@@ -0,0 +1,514 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { MessageStream } from '@anthropic-ai/sdk/lib/MessageStream.js';
+import type {
+ ContentBlock,
+ DocumentBlockParam,
+ ImageBlockParam,
+ Message,
+ MessageCreateParams,
+ MessageCreateParamsNonStreaming,
+ MessageCreateParamsStreaming,
+ MessageParam,
+ MessageStreamEvent,
+ RedactedThinkingBlockParam,
+ TextBlockParam,
+ ThinkingBlockParam,
+ Tool,
+ ToolResultBlockParam,
+ ToolUseBlockParam,
+} from '@anthropic-ai/sdk/resources/messages';
+import type {
+ GenerateRequest,
+ GenerateResponseData,
+ ModelResponseData,
+ Part,
+} from 'genkit';
+import { logger } from 'genkit/logging';
+
+import { KNOWN_CLAUDE_MODELS, extractVersion } from '../models.js';
+import { AnthropicConfigSchema, type ClaudeRunnerParams } from '../types.js';
+import { BaseRunner } from './base.js';
+import { RunnerTypes as BaseRunnerTypes } from './types.js';
+interface RunnerTypes extends BaseRunnerTypes {
+ Message: Message;
+ Stream: MessageStream;
+ StreamEvent: MessageStreamEvent;
+ RequestBody: MessageCreateParamsNonStreaming;
+ StreamingRequestBody: MessageCreateParamsStreaming;
+ Tool: Tool;
+ MessageParam: MessageParam;
+ ToolResponseContent: TextBlockParam | ImageBlockParam;
+ ContentBlockParam:
+ | TextBlockParam
+ | ImageBlockParam
+ | DocumentBlockParam
+ | ToolUseBlockParam
+ | ToolResultBlockParam
+ | ThinkingBlockParam
+ | RedactedThinkingBlockParam;
+}
+
+export class Runner extends BaseRunner {
+ constructor(params: ClaudeRunnerParams) {
+ super(params);
+ }
+
+ protected toAnthropicMessageContent(
+ part: Part
+ ):
+ | TextBlockParam
+ | ImageBlockParam
+ | DocumentBlockParam
+ | ToolUseBlockParam
+ | ToolResultBlockParam
+ | ThinkingBlockParam
+ | RedactedThinkingBlockParam {
+ if (part.reasoning) {
+ const signature = this.getThinkingSignature(part);
+ if (!signature) {
+ throw new Error(
+ 'Anthropic thinking parts require a signature when sending back to the API. Preserve the `custom.anthropicThinking.signature` value from the original response.'
+ );
+ }
+ return {
+ type: 'thinking',
+ thinking: part.reasoning,
+ signature,
+ };
+ }
+
+ const redactedThinking = this.getRedactedThinkingData(part);
+ if (redactedThinking !== undefined) {
+ return {
+ type: 'redacted_thinking',
+ data: redactedThinking,
+ };
+ }
+
+ if (part.text) {
+ return {
+ type: 'text',
+ text: part.text,
+ citations: null,
+ };
+ }
+
+ if (part.media) {
+ if (part.media.contentType === 'application/pdf') {
+ return {
+ type: 'document',
+ source: this.toPdfDocumentSource(part.media),
+ };
+ }
+
+ const source = this.toImageSource(part.media);
+ if (source.kind === 'base64') {
+ return {
+ type: 'image',
+ source: {
+ type: 'base64',
+ data: source.data,
+ media_type: source.mediaType,
+ },
+ };
+ }
+ return {
+ type: 'image',
+ source: {
+ type: 'url',
+ url: source.url,
+ },
+ };
+ }
+
+ if (part.toolRequest) {
+ if (!part.toolRequest.ref) {
+ throw new Error(
+ `Tool request ref is required for Anthropic API. Part: ${JSON.stringify(
+ part.toolRequest
+ )}`
+ );
+ }
+ return {
+ type: 'tool_use',
+ id: part.toolRequest.ref,
+ name: part.toolRequest.name,
+ input: part.toolRequest.input,
+ };
+ }
+
+ if (part.toolResponse) {
+ if (!part.toolResponse.ref) {
+ throw new Error(
+ `Tool response ref is required for Anthropic API. Part: ${JSON.stringify(
+ part.toolResponse
+ )}`
+ );
+ }
+ return {
+ type: 'tool_result',
+ tool_use_id: part.toolResponse.ref,
+ content: [this.toAnthropicToolResponseContent(part)],
+ };
+ }
+
+ throw new Error(
+ `Unsupported genkit part fields encountered for current message role: ${JSON.stringify(
+ part
+ )}.`
+ );
+ }
+
+ protected toAnthropicRequestBody(
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ): MessageCreateParamsNonStreaming {
+ const model = KNOWN_CLAUDE_MODELS[modelName];
+ const { system, messages } = this.toAnthropicMessages(request.messages);
+ const mappedModelName =
+ request.config?.version ?? extractVersion(model, modelName);
+
+ const systemValue =
+ system === undefined
+ ? undefined
+ : cacheSystemPrompt
+ ? [
+ {
+ type: 'text' as const,
+ text: system,
+ cache_control: { type: 'ephemeral' as const },
+ },
+ ]
+ : system;
+
+ const body: MessageCreateParamsNonStreaming = {
+ model: mappedModelName,
+ max_tokens:
+ request.config?.maxOutputTokens ?? this.DEFAULT_MAX_OUTPUT_TOKENS,
+ messages,
+ };
+
+ if (systemValue !== undefined) {
+ body.system = systemValue;
+ }
+
+ if (request.tools) {
+ body.tools = request.tools.map((tool) => this.toAnthropicTool(tool));
+ }
+ if (request.config?.topK !== undefined) {
+ body.top_k = request.config.topK;
+ }
+ if (request.config?.topP !== undefined) {
+ body.top_p = request.config.topP;
+ }
+ if (request.config?.temperature !== undefined) {
+ body.temperature = request.config.temperature;
+ }
+ if (request.config?.stopSequences !== undefined) {
+ body.stop_sequences = request.config.stopSequences;
+ }
+ if (request.config?.metadata !== undefined) {
+ body.metadata = request.config.metadata;
+ }
+ if (request.config?.tool_choice !== undefined) {
+ body.tool_choice = request.config.tool_choice;
+ }
+ const thinkingConfig = this.toAnthropicThinkingConfig(
+ request.config?.thinking
+ );
+ if (thinkingConfig) {
+ body.thinking = thinkingConfig as MessageCreateParams['thinking'];
+ }
+
+ if (request.output?.format && request.output.format !== 'text') {
+ throw new Error(
+ `Only text output format is supported for Claude models currently`
+ );
+ }
+ return body;
+ }
+
+ protected toAnthropicStreamingRequestBody(
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ): MessageCreateParamsStreaming {
+ const model = KNOWN_CLAUDE_MODELS[modelName];
+ const { system, messages } = this.toAnthropicMessages(request.messages);
+ const mappedModelName =
+ request.config?.version ?? extractVersion(model, modelName);
+
+ const systemValue =
+ system === undefined
+ ? undefined
+ : cacheSystemPrompt
+ ? [
+ {
+ type: 'text' as const,
+ text: system,
+ cache_control: { type: 'ephemeral' as const },
+ },
+ ]
+ : system;
+
+ const body: MessageCreateParamsStreaming = {
+ model: mappedModelName,
+ max_tokens:
+ request.config?.maxOutputTokens ?? this.DEFAULT_MAX_OUTPUT_TOKENS,
+ messages,
+ stream: true,
+ };
+
+ if (systemValue !== undefined) {
+ body.system = systemValue;
+ }
+
+ if (request.tools) {
+ body.tools = request.tools.map((tool) => this.toAnthropicTool(tool));
+ }
+ if (request.config?.topK !== undefined) {
+ body.top_k = request.config.topK;
+ }
+ if (request.config?.topP !== undefined) {
+ body.top_p = request.config.topP;
+ }
+ if (request.config?.temperature !== undefined) {
+ body.temperature = request.config.temperature;
+ }
+ if (request.config?.stopSequences !== undefined) {
+ body.stop_sequences = request.config.stopSequences;
+ }
+ if (request.config?.metadata !== undefined) {
+ body.metadata = request.config.metadata;
+ }
+ if (request.config?.tool_choice !== undefined) {
+ body.tool_choice = request.config.tool_choice;
+ }
+ const thinkingConfig = this.toAnthropicThinkingConfig(
+ request.config?.thinking
+ );
+ if (thinkingConfig) {
+ body.thinking =
+ thinkingConfig as MessageCreateParamsStreaming['thinking'];
+ }
+
+ if (request.output?.format && request.output.format !== 'text') {
+ throw new Error(
+ `Only text output format is supported for Claude models currently`
+ );
+ }
+ return body;
+ }
+
+ protected async createMessage(
+ body: MessageCreateParamsNonStreaming,
+ abortSignal: AbortSignal
+ ): Promise {
+ return await this.client.messages.create(body, { signal: abortSignal });
+ }
+
+ protected streamMessages(
+ body: MessageCreateParamsStreaming,
+ abortSignal: AbortSignal
+ ): MessageStream {
+ return this.client.messages.stream(body, { signal: abortSignal });
+ }
+
+ protected toGenkitResponse(message: Message): GenerateResponseData {
+ return this.fromAnthropicResponse(message);
+ }
+
+ protected toGenkitPart(event: MessageStreamEvent): Part | undefined {
+ return this.fromAnthropicContentBlockChunk(event);
+ }
+
+ protected fromAnthropicContentBlockChunk(
+ event: MessageStreamEvent
+ ): Part | undefined {
+ // Handle content_block_delta events
+ if (event.type === 'content_block_delta') {
+ const delta = event.delta;
+
+ if (delta.type === 'input_json_delta') {
+ throw new Error(
+ 'Anthropic streaming tool input (input_json_delta) is not yet supported. Please disable streaming or upgrade this plugin.'
+ );
+ }
+
+ if (delta.type === 'text_delta') {
+ return { text: delta.text };
+ }
+
+ if (delta.type === 'thinking_delta') {
+ return { reasoning: delta.thinking };
+ }
+
+ // signature_delta - ignore
+ return undefined;
+ }
+
+ // Handle content_block_start events
+ if (event.type === 'content_block_start') {
+ const block = event.content_block;
+
+ switch (block.type) {
+ case 'server_tool_use':
+ return {
+ text: `[Anthropic server tool ${block.name}] input: ${JSON.stringify(block.input)}`,
+ custom: {
+ anthropicServerToolUse: {
+ id: block.id,
+ name: block.name,
+ input: block.input,
+ },
+ },
+ };
+
+ case 'web_search_tool_result':
+ return this.toWebSearchToolResultPart({
+ type: block.type,
+ toolUseId: block.tool_use_id,
+ content: block.content,
+ });
+
+ case 'text':
+ return { text: block.text };
+
+ case 'thinking':
+ return this.createThinkingPart(block.thinking, block.signature);
+
+ case 'redacted_thinking':
+ return { custom: { redactedThinking: block.data } };
+
+ case 'tool_use':
+ return {
+ toolRequest: {
+ ref: block.id,
+ name: block.name,
+ input: block.input,
+ },
+ };
+
+ default: {
+ const unknownType = (block as { type: string }).type;
+ logger.warn(
+ `Unexpected Anthropic content block type in stream: ${unknownType}. Returning undefined. Content block: ${JSON.stringify(block)}`
+ );
+ return undefined;
+ }
+ }
+ }
+
+ // Other event types (message_start, message_delta, etc.) - ignore
+ return undefined;
+ }
+
+ protected fromAnthropicContentBlock(contentBlock: ContentBlock): Part {
+ switch (contentBlock.type) {
+ case 'server_tool_use':
+ return {
+ text: `[Anthropic server tool ${contentBlock.name}] input: ${JSON.stringify(contentBlock.input)}`,
+ custom: {
+ anthropicServerToolUse: {
+ id: contentBlock.id,
+ name: contentBlock.name,
+ input: contentBlock.input,
+ },
+ },
+ };
+
+ case 'web_search_tool_result':
+ return this.toWebSearchToolResultPart({
+ type: contentBlock.type,
+ toolUseId: contentBlock.tool_use_id,
+ content: contentBlock.content,
+ });
+
+ case 'tool_use':
+ return {
+ toolRequest: {
+ ref: contentBlock.id,
+ name: contentBlock.name,
+ input: contentBlock.input,
+ },
+ };
+
+ case 'text':
+ return { text: contentBlock.text };
+
+ case 'thinking':
+ return this.createThinkingPart(
+ contentBlock.thinking,
+ contentBlock.signature
+ );
+
+ case 'redacted_thinking':
+ return { custom: { redactedThinking: contentBlock.data } };
+
+ default: {
+ const unknownType = (contentBlock as { type: string }).type;
+ logger.warn(
+ `Unexpected Anthropic content block type: ${unknownType}. Returning empty text. Content block: ${JSON.stringify(contentBlock)}`
+ );
+ return { text: '' };
+ }
+ }
+ }
+
+ protected fromAnthropicStopReason(
+ reason: Message['stop_reason']
+ ): ModelResponseData['finishReason'] {
+ switch (reason) {
+ case 'max_tokens':
+ return 'length';
+ case 'end_turn':
+ // fall through
+ case 'stop_sequence':
+ // fall through
+ case 'tool_use':
+ return 'stop';
+ case null:
+ return 'unknown';
+ default:
+ return 'other';
+ }
+ }
+
+ protected fromAnthropicResponse(response: Message): GenerateResponseData {
+ return {
+ candidates: [
+ {
+ index: 0,
+ finishReason: this.fromAnthropicStopReason(response.stop_reason),
+ message: {
+ role: 'model',
+ content: response.content.map((block) =>
+ this.fromAnthropicContentBlock(block)
+ ),
+ },
+ },
+ ],
+ usage: {
+ inputTokens: response.usage.input_tokens,
+ outputTokens: response.usage.output_tokens,
+ },
+ custom: response,
+ };
+ }
+}
diff --git a/js/plugins/anthropic/src/runner/types.ts b/js/plugins/anthropic/src/runner/types.ts
new file mode 100644
index 0000000000..5fd04c6911
--- /dev/null
+++ b/js/plugins/anthropic/src/runner/types.ts
@@ -0,0 +1,78 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Type contract that each Anthropic runner passes into the generic `BaseRunner`.
+ *
+ * The concrete runners (stable vs. beta SDKs) bind these slots to their SDK’s
+ * concrete interfaces so the shared logic in `BaseRunner` can stay strongly typed
+ * without knowing which SDK variant it is talking to.
+ *
+ * Properties are `unknown` by default, so every subclass must plug in the
+ * correct Anthropic types to keep the generic plumbing sound.
+ */
+type RunnerTypes = {
+ Message: unknown;
+ Stream: AsyncIterable & { finalMessage(): Promise };
+ StreamEvent: unknown;
+ RequestBody: unknown;
+ StreamingRequestBody: unknown;
+ Tool: unknown;
+ MessageParam: unknown;
+ ContentBlockParam: unknown;
+ ToolResponseContent: unknown;
+};
+
+type RunnerMessage = ApiTypes['Message'];
+
+/** Streaming handle that yields Anthropic events and exposes the final message. */
+type RunnerStream = ApiTypes['Stream'];
+
+/** Discrete event emitted by the Anthropic stream (delta, block start, etc.). */
+type RunnerStreamEvent = ApiTypes['StreamEvent'];
+
+/** Non-streaming request payload shape for create-message calls. */
+type RunnerRequestBody = ApiTypes['RequestBody'];
+type RunnerStreamingRequestBody =
+ ApiTypes['StreamingRequestBody'];
+
+/** Tool definition compatible with the target Anthropic SDK. */
+type RunnerTool = ApiTypes['Tool'];
+
+/** Anthropic message param shape used when sending history to the API. */
+type RunnerMessageParam =
+ ApiTypes['MessageParam'];
+
+/** Content block that the runner sends to Anthropic for a single part. */
+type RunnerContentBlockParam =
+ ApiTypes['ContentBlockParam'];
+
+/** Tool response block that Anthropic expects when returning tool output. */
+type RunnerToolResponseContent =
+ ApiTypes['ToolResponseContent'];
+
+export {
+ RunnerContentBlockParam,
+ RunnerMessage,
+ RunnerMessageParam,
+ RunnerRequestBody,
+ RunnerStream,
+ RunnerStreamEvent,
+ RunnerStreamingRequestBody,
+ RunnerTool,
+ RunnerToolResponseContent,
+ RunnerTypes,
+};
diff --git a/js/plugins/anthropic/src/types.ts b/js/plugins/anthropic/src/types.ts
new file mode 100644
index 0000000000..9796d71c36
--- /dev/null
+++ b/js/plugins/anthropic/src/types.ts
@@ -0,0 +1,166 @@
+/**
+ * Copyright 2024 Bloom Labs Inc
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type Anthropic from '@anthropic-ai/sdk';
+import { z } from 'genkit';
+import { GenerationCommonConfigSchema } from 'genkit/model';
+
+/**
+ * Internal symbol for dependency injection in tests.
+ * Not part of the public API.
+ * @internal
+ */
+export const __testClient = Symbol('testClient');
+
+/**
+ * Plugin configuration options for the Anthropic plugin.
+ */
+export interface PluginOptions {
+ apiKey?: string;
+ cacheSystemPrompt?: boolean;
+ /** Default API surface for all requests unless overridden per-request. */
+ apiVersion?: 'stable' | 'beta';
+}
+
+/**
+ * Internal plugin options that include test client injection.
+ * @internal
+ */
+export interface InternalPluginOptions extends PluginOptions {
+ [__testClient]?: Anthropic;
+}
+
+/**
+ * Shared parameters required to construct Claude helpers.
+ */
+interface ClaudeHelperParamsBase {
+ name: string;
+ client: Anthropic;
+ cacheSystemPrompt?: boolean;
+ defaultApiVersion?: 'stable' | 'beta';
+}
+
+/**
+ * Parameters for creating a Claude model action.
+ */
+export interface ClaudeModelParams extends ClaudeHelperParamsBase {}
+
+/**
+ * Parameters for creating a Claude runner.
+ */
+export interface ClaudeRunnerParams extends ClaudeHelperParamsBase {}
+
+export const AnthropicBaseConfigSchema = GenerationCommonConfigSchema.extend({
+ tool_choice: z
+ .union([
+ z.object({
+ type: z.literal('auto'),
+ }),
+ z.object({
+ type: z.literal('any'),
+ }),
+ z.object({
+ type: z.literal('tool'),
+ name: z.string(),
+ }),
+ ])
+ .optional(),
+ metadata: z
+ .object({
+ user_id: z.string().optional(),
+ })
+ .optional(),
+ /** Optional shorthand to pick API surface for this request. */
+ apiVersion: z.enum(['stable', 'beta']).optional(),
+});
+
+export type AnthropicBaseConfigSchemaType = typeof AnthropicBaseConfigSchema;
+
+export const ThinkingConfigSchema = z
+ .object({
+ enabled: z.boolean().optional(),
+ budgetTokens: z.number().int().min(1_024).optional(),
+ })
+ .superRefine((value, ctx) => {
+ if (value.enabled && value.budgetTokens === undefined) {
+ ctx.addIssue({
+ code: z.ZodIssueCode.custom,
+ path: ['budgetTokens'],
+ message: 'budgetTokens is required when thinking is enabled',
+ });
+ }
+ });
+
+export const AnthropicThinkingConfigSchema = AnthropicBaseConfigSchema.extend({
+ thinking: ThinkingConfigSchema.optional(),
+});
+
+export const AnthropicConfigSchema = AnthropicThinkingConfigSchema;
+
+export type ThinkingConfig = z.infer;
+export type AnthropicBaseConfig = z.infer;
+export type AnthropicThinkingConfig = z.infer<
+ typeof AnthropicThinkingConfigSchema
+>;
+export type ClaudeConfig = AnthropicThinkingConfig | AnthropicBaseConfig;
+
+/**
+ * Media object representation with URL and optional content type.
+ */
+export interface Media {
+ url: string;
+ contentType?: string;
+}
+
+export const MediaSchema = z.object({
+ url: z.string(),
+ contentType: z.string().optional(),
+});
+
+export const MediaTypeSchema = z.enum([
+ 'image/jpeg',
+ 'image/png',
+ 'image/gif',
+ 'image/webp',
+]);
+
+export type MediaType = z.infer;
+
+export const MEDIA_TYPES = {
+ JPEG: 'image/jpeg',
+ PNG: 'image/png',
+ GIF: 'image/gif',
+ WEBP: 'image/webp',
+} as const satisfies Record;
+
+/**
+ * Resolve whether beta API should be used for this call.
+ * Priority:
+ * 1. request.config.apiVersion (per-request override - explicit stable or beta)
+ * 2. pluginDefaultApiVersion (plugin-wide default)
+ * 3. otherwise stable
+ */
+export function resolveBetaEnabled(
+ cfg: AnthropicThinkingConfig | AnthropicBaseConfig | undefined,
+ pluginDefaultApiVersion?: 'stable' | 'beta'
+): boolean {
+ if (cfg?.apiVersion !== undefined) {
+ return cfg.apiVersion === 'beta';
+ }
+ if (pluginDefaultApiVersion === 'beta') return true;
+ return false;
+}
diff --git a/js/plugins/anthropic/tests/beta_runner_test.ts b/js/plugins/anthropic/tests/beta_runner_test.ts
new file mode 100644
index 0000000000..655bfc599e
--- /dev/null
+++ b/js/plugins/anthropic/tests/beta_runner_test.ts
@@ -0,0 +1,804 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import type { Part } from 'genkit';
+import { describe, it } from 'node:test';
+
+import { BetaRunner } from '../src/runner/beta.js';
+import { createMockAnthropicClient } from './mocks/anthropic-client.js';
+
+describe('BetaRunner.toAnthropicMessageContent', () => {
+ function createRunner() {
+ return new BetaRunner({
+ name: 'anthropic/claude-3-5-haiku',
+ client: createMockAnthropicClient(),
+ cacheSystemPrompt: false,
+ });
+ }
+
+ it('converts PDF media parts into document blocks', () => {
+ const runner = createRunner();
+ const part: Part = {
+ media: {
+ contentType: 'application/pdf',
+ url: 'data:application/pdf;base64,UEsDBAoAAAAAAD',
+ },
+ };
+
+ const result = (runner as any).toAnthropicMessageContent(part);
+
+ assert.strictEqual(result.type, 'document');
+ assert.ok(result.source);
+ assert.strictEqual(result.source.type, 'base64');
+ assert.strictEqual(result.source.media_type, 'application/pdf');
+ assert.ok(result.source.data);
+ });
+
+ it('throws when tool request ref is missing', () => {
+ const runner = createRunner();
+ const part: Part = {
+ toolRequest: {
+ name: 'do_something',
+ input: { foo: 'bar' },
+ },
+ };
+
+ assert.throws(() => {
+ (runner as any).toAnthropicMessageContent(part);
+ }, /Tool request ref is required/);
+ });
+
+ it('maps tool request with ref into tool_use block', () => {
+ const runner = createRunner();
+ const part: Part = {
+ toolRequest: {
+ ref: 'tool-123',
+ name: 'do_something',
+ input: { foo: 'bar' },
+ },
+ };
+
+ const result = (runner as any).toAnthropicMessageContent(part);
+
+ assert.strictEqual(result.type, 'tool_use');
+ assert.strictEqual(result.id, 'tool-123');
+ assert.strictEqual(result.name, 'do_something');
+ assert.deepStrictEqual(result.input, { foo: 'bar' });
+ });
+
+ it('throws when tool response ref is missing', () => {
+ const runner = createRunner();
+ const part: Part = {
+ toolResponse: {
+ name: 'do_something',
+ output: 'done',
+ },
+ };
+
+ assert.throws(() => {
+ (runner as any).toAnthropicMessageContent(part);
+ }, /Tool response ref is required/);
+ });
+
+ it('maps tool response into tool_result block containing text response', () => {
+ const runner = createRunner();
+ const part: Part = {
+ toolResponse: {
+ name: 'do_something',
+ ref: 'tool-abc',
+ output: 'done',
+ },
+ };
+
+ const result = (runner as any).toAnthropicMessageContent(part);
+
+ assert.strictEqual(result.type, 'tool_result');
+ assert.strictEqual(result.tool_use_id, 'tool-abc');
+ assert.deepStrictEqual(result.content, [{ type: 'text', text: 'done' }]);
+ });
+
+ it('should handle WEBP image data URLs', () => {
+ const runner = createRunner();
+ const part: Part = {
+ media: {
+ contentType: 'image/webp',
+ url: 'data:image/webp;base64,AAA',
+ },
+ };
+
+ const result = (runner as any).toAnthropicMessageContent(part);
+
+ assert.strictEqual(result.type, 'image');
+ assert.strictEqual(result.source.type, 'base64');
+ assert.strictEqual(result.source.media_type, 'image/webp');
+ assert.strictEqual(result.source.data, 'AAA');
+ });
+
+ it('should prefer data URL content type over media.contentType for WEBP', () => {
+ const runner = createRunner();
+ const part: Part = {
+ media: {
+ // Even if contentType says PNG, data URL says WEBP - should use WEBP
+ contentType: 'image/png',
+ url: 'data:image/webp;base64,AAA',
+ },
+ };
+
+ const result = (runner as any).toAnthropicMessageContent(part);
+
+ assert.strictEqual(result.type, 'image');
+ assert.strictEqual(result.source.type, 'base64');
+ // Key fix: should use data URL type (webp), not contentType (png)
+ assert.strictEqual(result.source.media_type, 'image/webp');
+ assert.strictEqual(result.source.data, 'AAA');
+ });
+
+ it('should throw helpful error for text/plain in toAnthropicMessageContent', () => {
+ const runner = createRunner();
+ const part: Part = {
+ media: {
+ contentType: 'text/plain',
+ url: 'data:text/plain;base64,AAA',
+ },
+ };
+
+ assert.throws(
+ () => {
+ (runner as any).toAnthropicMessageContent(part);
+ },
+ (error: Error) => {
+ return (
+ error.message.includes('Text files should be sent as text content') &&
+ error.message.includes('text:')
+ );
+ }
+ );
+ });
+
+ it('should throw helpful error for text/plain with remote URL', () => {
+ const runner = createRunner();
+ const part: Part = {
+ media: {
+ contentType: 'text/plain',
+ url: 'https://example.com/file.txt',
+ },
+ };
+
+ assert.throws(
+ () => {
+ (runner as any).toAnthropicMessageContent(part);
+ },
+ (error: Error) => {
+ return (
+ error.message.includes('Text files should be sent as text content') &&
+ error.message.includes('text:')
+ );
+ }
+ );
+ });
+
+ it('should throw helpful error for text/plain in tool response', () => {
+ const runner = createRunner();
+ const part: Part = {
+ toolResponse: {
+ ref: 'call_123',
+ name: 'get_file',
+ output: {
+ url: 'data:text/plain;base64,AAA',
+ contentType: 'text/plain',
+ },
+ },
+ };
+
+ assert.throws(
+ () => {
+ (runner as any).toAnthropicToolResponseContent(part);
+ },
+ (error: Error) => {
+ return error.message.includes(
+ 'Text files should be sent as text content'
+ );
+ }
+ );
+ });
+});
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { Anthropic } from '@anthropic-ai/sdk';
+import { mock } from 'node:test';
+
+describe('BetaRunner', () => {
+ it('should map all supported Part shapes to beta content blocks', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+
+ const exposed = runner as any;
+
+ const textPart = exposed.toAnthropicMessageContent({
+ text: 'Hello',
+ } as any);
+ assert.deepStrictEqual(textPart, { type: 'text', text: 'Hello' });
+
+ const pdfPart = exposed.toAnthropicMessageContent({
+ media: {
+ url: 'data:application/pdf;base64,JVBERi0xLjQKJ',
+ contentType: 'application/pdf',
+ },
+ } as any);
+ assert.strictEqual(pdfPart.type, 'document');
+
+ const imagePart = exposed.toAnthropicMessageContent({
+ media: {
+ url: 'data:image/png;base64,AAA',
+ contentType: 'image/png',
+ },
+ } as any);
+ assert.strictEqual(imagePart.type, 'image');
+
+ const toolUsePart = exposed.toAnthropicMessageContent({
+ toolRequest: {
+ ref: 'tool1',
+ name: 'get_weather',
+ input: { city: 'NYC' },
+ },
+ } as any);
+ assert.deepStrictEqual(toolUsePart, {
+ type: 'tool_use',
+ id: 'tool1',
+ name: 'get_weather',
+ input: { city: 'NYC' },
+ });
+
+ const toolResultPart = exposed.toAnthropicMessageContent({
+ toolResponse: {
+ ref: 'tool1',
+ name: 'get_weather',
+ output: 'Sunny',
+ },
+ } as any);
+ assert.strictEqual(toolResultPart.type, 'tool_result');
+
+ assert.throws(() => exposed.toAnthropicMessageContent({} as any));
+ });
+
+ it('should convert beta stream events to Genkit Parts', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+
+ const exposed = runner as any;
+ const textPart = exposed.toGenkitPart({
+ type: 'content_block_start',
+ index: 0,
+ content_block: { type: 'text', text: 'hi' },
+ } as any);
+ assert.deepStrictEqual(textPart, { text: 'hi' });
+
+ const serverToolEvent = {
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'server_tool_use',
+ id: 'toolu_test',
+ name: 'myTool',
+ input: { foo: 'bar' },
+ server_name: 'srv',
+ },
+ } as any;
+ const toolPart = exposed.toGenkitPart(serverToolEvent);
+ assert.deepStrictEqual(toolPart, {
+ text: '[Anthropic server tool srv/myTool] input: {"foo":"bar"}',
+ custom: {
+ anthropicServerToolUse: {
+ id: 'toolu_test',
+ name: 'srv/myTool',
+ input: { foo: 'bar' },
+ },
+ },
+ });
+
+ const deltaPart = exposed.toGenkitPart({
+ type: 'content_block_delta',
+ index: 0,
+ delta: { type: 'thinking_delta', thinking: 'hmm' },
+ } as any);
+ assert.deepStrictEqual(deltaPart, { reasoning: 'hmm' });
+
+ const ignored = exposed.toGenkitPart({ type: 'message_stop' } as any);
+ assert.strictEqual(ignored, undefined);
+ });
+
+ it('should throw on unsupported mcp tool stream events', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+
+ const exposed = runner as any;
+ assert.throws(
+ () =>
+ exposed.toGenkitPart({
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'mcp_tool_use',
+ id: 'toolu_unsupported',
+ input: {},
+ },
+ }),
+ /server-managed tool block 'mcp_tool_use'/
+ );
+ });
+
+ it('should map beta stop reasons correctly', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+
+ const finishReason = runner['fromBetaStopReason'](
+ 'model_context_window_exceeded'
+ );
+ assert.strictEqual(finishReason, 'length');
+
+ const pauseReason = runner['fromBetaStopReason']('pause_turn');
+ assert.strictEqual(pauseReason, 'stop');
+ });
+
+ it('should execute streaming calls and surface errors', async () => {
+ const streamError = new Error('stream failed');
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ {
+ type: 'content_block_start',
+ index: 0,
+ content_block: { type: 'text', text: 'hi' },
+ } as any,
+ ],
+ streamErrorAfterChunk: 1,
+ streamError,
+ });
+
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+ const sendChunk = mock.fn();
+ await assert.rejects(async () =>
+ runner.run({ messages: [] } as any, {
+ streamingRequested: true,
+ sendChunk,
+ abortSignal: new AbortController().signal,
+ })
+ );
+ assert.strictEqual(sendChunk.mock.calls.length, 1);
+
+ const abortController = new AbortController();
+ abortController.abort();
+ await assert.rejects(async () =>
+ runner.run({ messages: [] } as any, {
+ streamingRequested: true,
+ sendChunk: () => {},
+ abortSignal: abortController.signal,
+ })
+ );
+ });
+
+ it('should throw when tool refs are missing in message content', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+ const exposed = runner as any;
+
+ assert.throws(() =>
+ exposed.toAnthropicMessageContent({
+ toolRequest: {
+ name: 'get_weather',
+ input: {},
+ },
+ } as any)
+ );
+
+ assert.throws(() =>
+ exposed.toAnthropicMessageContent({
+ toolResponse: {
+ name: 'get_weather',
+ output: 'ok',
+ },
+ } as any)
+ );
+
+ assert.throws(() =>
+ exposed.toAnthropicMessageContent({
+ media: {
+ url: 'data:image/png;base64,',
+ contentType: undefined,
+ },
+ } as any)
+ );
+ });
+
+ it('should build request bodies with optional config fields', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-3-5-haiku',
+ client: mockClient as Anthropic,
+ cacheSystemPrompt: true,
+ }) as any;
+
+ const request = {
+ messages: [
+ {
+ role: 'system',
+ content: [{ text: 'You are helpful.' }],
+ },
+ {
+ role: 'user',
+ content: [{ text: 'Tell me a joke' }],
+ },
+ ],
+ config: {
+ maxOutputTokens: 128,
+ topK: 4,
+ topP: 0.65,
+ temperature: 0.55,
+ stopSequences: ['DONE'],
+ metadata: { user_id: 'beta-user' },
+ tool_choice: { type: 'tool', name: 'get_weather' },
+ thinking: { enabled: true, budgetTokens: 2048 },
+ },
+ tools: [
+ {
+ name: 'get_weather',
+ description: 'Returns the weather',
+ inputSchema: { type: 'object' },
+ },
+ ],
+ } satisfies any;
+
+ const body = runner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ true
+ );
+
+ assert.strictEqual(body.model, 'claude-3-5-haiku');
+ assert.ok(Array.isArray(body.system));
+ assert.strictEqual(body.max_tokens, 128);
+ assert.strictEqual(body.top_k, 4);
+ assert.strictEqual(body.top_p, 0.65);
+ assert.strictEqual(body.temperature, 0.55);
+ assert.deepStrictEqual(body.stop_sequences, ['DONE']);
+ assert.deepStrictEqual(body.metadata, { user_id: 'beta-user' });
+ assert.deepStrictEqual(body.tool_choice, {
+ type: 'tool',
+ name: 'get_weather',
+ });
+ assert.strictEqual(body.tools?.length, 1);
+ assert.deepStrictEqual(body.thinking, {
+ type: 'enabled',
+ budget_tokens: 2048,
+ });
+
+ const streamingBody = runner.toAnthropicStreamingRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ true
+ );
+ assert.strictEqual(streamingBody.stream, true);
+ assert.ok(Array.isArray(streamingBody.system));
+ assert.deepStrictEqual(streamingBody.thinking, {
+ type: 'enabled',
+ budget_tokens: 2048,
+ });
+
+ const disabledBody = runner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ {
+ messages: [],
+ config: {
+ thinking: { enabled: false },
+ },
+ } satisfies any,
+ false
+ );
+ assert.deepStrictEqual(disabledBody.thinking, { type: 'disabled' });
+ });
+
+ it('should concatenate multiple text parts in system message', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-3-5-haiku',
+ client: mockClient as Anthropic,
+ }) as any;
+
+ const request = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { text: 'Always be concise.' },
+ { text: 'Use proper grammar.' },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ } satisfies any;
+
+ const body = runner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ false
+ );
+
+ assert.strictEqual(
+ body.system,
+ 'You are a helpful assistant.\n\nAlways be concise.\n\nUse proper grammar.'
+ );
+ });
+
+ it('should concatenate multiple text parts in system message with caching', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-3-5-haiku',
+ client: mockClient as Anthropic,
+ }) as any;
+
+ const request = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { text: 'Always be concise.' },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ } satisfies any;
+
+ const body = runner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ true
+ );
+
+ assert.ok(Array.isArray(body.system));
+ assert.deepStrictEqual(body.system, [
+ {
+ type: 'text',
+ text: 'You are a helpful assistant.\n\nAlways be concise.',
+ cache_control: { type: 'ephemeral' },
+ },
+ ]);
+ });
+
+ it('should throw error if system message contains media', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-3-5-haiku',
+ client: mockClient as Anthropic,
+ }) as any;
+
+ const request = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ {
+ media: {
+ url: 'data:image/png;base64,iVBORw0KGgoAAAANS',
+ contentType: 'image/png',
+ },
+ },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ } satisfies any;
+
+ assert.throws(
+ () => runner.toAnthropicRequestBody('claude-3-5-haiku', request, false),
+ /System messages can only contain text content/
+ );
+ });
+
+ it('should throw error if system message contains tool requests', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-3-5-haiku',
+ client: mockClient as Anthropic,
+ }) as any;
+
+ const request = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { toolRequest: { name: 'getTool', input: {}, ref: '123' } },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ } satisfies any;
+
+ assert.throws(
+ () => runner.toAnthropicRequestBody('claude-3-5-haiku', request, false),
+ /System messages can only contain text content/
+ );
+ });
+
+ it('should throw error if system message contains tool responses', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-3-5-haiku',
+ client: mockClient as Anthropic,
+ }) as any;
+
+ const request = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { toolResponse: { name: 'getTool', output: {}, ref: '123' } },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ } satisfies any;
+
+ assert.throws(
+ () => runner.toAnthropicRequestBody('claude-3-5-haiku', request, false),
+ /System messages can only contain text content/
+ );
+ });
+
+ it('should throw for unsupported mcp tool use blocks', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+ const exposed = runner as any;
+
+ assert.throws(
+ () =>
+ exposed.fromBetaContentBlock({
+ type: 'mcp_tool_use',
+ id: 'toolu_unknown',
+ input: {},
+ }),
+ /server-managed tool block 'mcp_tool_use'/
+ );
+ });
+
+ it('should convert additional beta content block types', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+
+ const thinkingPart = (runner as any).fromBetaContentBlock({
+ type: 'thinking',
+ thinking: 'pondering',
+ signature: 'sig_456',
+ });
+ assert.deepStrictEqual(thinkingPart, {
+ reasoning: 'pondering',
+ custom: { anthropicThinking: { signature: 'sig_456' } },
+ });
+
+ const redactedPart = (runner as any).fromBetaContentBlock({
+ type: 'redacted_thinking',
+ data: '[redacted]',
+ });
+ assert.deepStrictEqual(redactedPart, {
+ custom: { redactedThinking: '[redacted]' },
+ });
+
+ const toolPart = (runner as any).fromBetaContentBlock({
+ type: 'tool_use',
+ id: 'toolu_x',
+ name: 'plainTool',
+ input: { value: 1 },
+ });
+ assert.deepStrictEqual(toolPart, {
+ toolRequest: {
+ ref: 'toolu_x',
+ name: 'plainTool',
+ input: { value: 1 },
+ },
+ });
+
+ const serverToolPart = (runner as any).fromBetaContentBlock({
+ type: 'server_tool_use',
+ id: 'srv_tool_1',
+ name: 'serverTool',
+ input: { arg: 'value' },
+ server_name: 'srv',
+ });
+ assert.deepStrictEqual(serverToolPart, {
+ text: '[Anthropic server tool srv/serverTool] input: {"arg":"value"}',
+ custom: {
+ anthropicServerToolUse: {
+ id: 'srv_tool_1',
+ name: 'srv/serverTool',
+ input: { arg: 'value' },
+ },
+ },
+ });
+
+ const warnMock = mock.method(console, 'warn', () => {});
+ const fallbackPart = (runner as any).fromBetaContentBlock({
+ type: 'mystery',
+ });
+ assert.deepStrictEqual(fallbackPart, { text: '' });
+ assert.strictEqual(warnMock.mock.calls.length, 1);
+ warnMock.mock.restore();
+ });
+
+ it('should map additional stop reasons', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new BetaRunner({
+ name: 'claude-test',
+ client: mockClient as Anthropic,
+ });
+ const exposed = runner as any;
+
+ const refusal = exposed.fromBetaStopReason('refusal');
+ assert.strictEqual(refusal, 'other');
+
+ const unknown = exposed.fromBetaStopReason('something-new');
+ assert.strictEqual(unknown, 'other');
+
+ const nullReason = exposed.fromBetaStopReason(null);
+ assert.strictEqual(nullReason, 'unknown');
+ });
+});
diff --git a/js/plugins/anthropic/tests/execution_test.ts b/js/plugins/anthropic/tests/execution_test.ts
new file mode 100644
index 0000000000..069d2d2dcd
--- /dev/null
+++ b/js/plugins/anthropic/tests/execution_test.ts
@@ -0,0 +1,358 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type { GenerateRequest, ModelAction } from '@genkit-ai/ai/model';
+import * as assert from 'assert';
+import { describe, mock, test } from 'node:test';
+import { anthropic } from '../src/index.js';
+import { __testClient } from '../src/types.js';
+import {
+ createMockAnthropicClient,
+ createMockAnthropicMessage,
+} from './mocks/anthropic-client.js';
+
+describe('Model Execution Integration Tests', () => {
+ test('should resolve and execute a model via plugin', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Hello from Claude!',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ // Resolve the model action via plugin
+ const modelAction = plugin.resolve('model', 'claude-3-5-haiku-20241022');
+ assert.ok(modelAction, 'Model should be resolved');
+ assert.strictEqual(
+ (modelAction as ModelAction).__action.name,
+ 'anthropic/claude-3-5-haiku-20241022'
+ );
+
+ // Execute the model
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'user',
+ content: [{ text: 'Hi there!' }],
+ },
+ ],
+ };
+
+ const response = await (modelAction as ModelAction)(request, {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ });
+
+ assert.ok(response, 'Response should be returned');
+ assert.ok(response.candidates, 'Response should have candidates');
+ assert.strictEqual(response.candidates.length, 1);
+ assert.strictEqual(response.candidates[0].message.role, 'model');
+ assert.strictEqual(response.candidates[0].message.content.length, 1);
+ assert.strictEqual(
+ response.candidates[0].message.content[0].text,
+ 'Hello from Claude!'
+ );
+
+ // Verify API was called
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ });
+
+ test('should handle multi-turn conversations', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'The capital of France is Paris.',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ const modelAction = plugin.resolve(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'user',
+ content: [{ text: 'What is your name?' }],
+ },
+ {
+ role: 'model',
+ content: [{ text: 'I am Claude, an AI assistant.' }],
+ },
+ {
+ role: 'user',
+ content: [{ text: 'What is the capital of France?' }],
+ },
+ ],
+ };
+
+ const response = await modelAction(request, {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ });
+
+ assert.ok(response, 'Response should be returned');
+ assert.strictEqual(
+ response.candidates[0].message.content[0].text,
+ 'The capital of France is Paris.'
+ );
+
+ // Verify API was called with multi-turn conversation
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const apiRequest = createStub.mock.calls[0].arguments[0];
+ assert.strictEqual(apiRequest.messages.length, 3);
+ });
+
+ test('should handle system messages', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Arr matey!',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ const modelAction = plugin.resolve(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'system',
+ content: [{ text: 'You are a pirate. Respond like a pirate.' }],
+ },
+ {
+ role: 'user',
+ content: [{ text: 'Hello!' }],
+ },
+ ],
+ };
+
+ const response = await modelAction(request, {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ });
+
+ assert.ok(response, 'Response should be returned');
+
+ // Verify system message was passed to API
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const apiRequest = createStub.mock.calls[0].arguments[0];
+ assert.ok(apiRequest.system, 'System prompt should be set');
+ assert.strictEqual(
+ apiRequest.system,
+ 'You are a pirate. Respond like a pirate.'
+ );
+ assert.strictEqual(
+ apiRequest.messages.length,
+ 1,
+ 'System message should not be in messages array'
+ );
+ });
+
+ test('should return usage metadata', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Response',
+ usage: {
+ input_tokens: 100,
+ output_tokens: 50,
+ },
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ const modelAction = plugin.resolve(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ },
+ {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response.usage, 'Usage should be returned');
+ assert.strictEqual(response.usage?.inputTokens, 100);
+ assert.strictEqual(response.usage?.outputTokens, 50);
+ });
+
+ test('should handle different stop reasons', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'This is a partial response',
+ stopReason: 'max_tokens',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ const modelAction = plugin.resolve(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Tell me a story' }] }],
+ },
+ {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response, 'Response should be returned');
+ assert.strictEqual(response.candidates[0].finishReason, 'length');
+ });
+
+ test('should resolve model without anthropic prefix', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Response',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ // Resolve without prefix
+ const modelAction = plugin.resolve(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+ assert.ok(modelAction, 'Model should be resolved without prefix');
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hi' }] }],
+ },
+ {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response, 'Response should be returned');
+ });
+
+ test('should resolve model with anthropic prefix', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Response',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ // Resolve with prefix
+ const modelAction = plugin.resolve(
+ 'model',
+ 'anthropic/claude-3-5-haiku-20241022'
+ ) as ModelAction;
+ assert.ok(modelAction, 'Model should be resolved with prefix');
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hi' }] }],
+ },
+ {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response, 'Response should be returned');
+ });
+
+ test('should handle unknown model names', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Response from future model',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ });
+
+ // Resolve unknown model (passes through to API)
+ const modelAction = plugin.resolve(
+ 'model',
+ 'claude-99-experimental-12345'
+ ) as ModelAction;
+ assert.ok(modelAction, 'Unknown model should still be resolved');
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hi' }] }],
+ },
+ {
+ streamingRequested: false,
+ sendChunk: mock.fn(),
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response, 'Response should be returned for unknown model');
+ assert.strictEqual(
+ response.candidates[0].message.content[0].text,
+ 'Response from future model'
+ );
+ });
+});
diff --git a/js/plugins/anthropic/tests/index_test.ts b/js/plugins/anthropic/tests/index_test.ts
new file mode 100644
index 0000000000..62ef06b5fc
--- /dev/null
+++ b/js/plugins/anthropic/tests/index_test.ts
@@ -0,0 +1,286 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import { genkit, type ActionMetadata } from 'genkit';
+import type { ModelInfo } from 'genkit/model';
+import { describe, it } from 'node:test';
+import anthropic from '../src/index.js';
+import { KNOWN_CLAUDE_MODELS } from '../src/models.js';
+import { PluginOptions, __testClient } from '../src/types.js';
+import { createMockAnthropicClient } from './mocks/anthropic-client.js';
+
+function getModelInfo(
+ metadata: ActionMetadata | undefined
+): ModelInfo | undefined {
+ return metadata?.metadata?.model as ModelInfo | undefined;
+}
+
+describe('Anthropic Plugin', () => {
+ it('should register all supported Claude models', async () => {
+ const mockClient = createMockAnthropicClient();
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ for (const modelName of Object.keys(KNOWN_CLAUDE_MODELS)) {
+ const modelPath = `/model/anthropic/${modelName}`;
+ const expectedBaseName = `anthropic/${modelName}`;
+ const model = await ai.registry.lookupAction(modelPath);
+ assert.ok(model, `${modelName} should be registered at ${modelPath}`);
+ assert.strictEqual(model?.__action.name, expectedBaseName);
+ }
+ });
+
+ it('should throw error when API key is missing', () => {
+ // Save original env var if it exists
+ const originalApiKey = process.env.ANTHROPIC_API_KEY;
+ delete process.env.ANTHROPIC_API_KEY;
+
+ try {
+ assert.throws(() => {
+ anthropic({} as PluginOptions);
+ }, /Please pass in the API key or set the ANTHROPIC_API_KEY environment variable/);
+ } finally {
+ // Restore original env var
+ if (originalApiKey !== undefined) {
+ process.env.ANTHROPIC_API_KEY = originalApiKey;
+ }
+ }
+ });
+
+ it('should use API key from environment variable', () => {
+ // Save original env var if it exists
+ const originalApiKey = process.env.ANTHROPIC_API_KEY;
+ const testApiKey = 'test-api-key-from-env';
+
+ try {
+ // Set test API key
+ process.env.ANTHROPIC_API_KEY = testApiKey;
+
+ // Plugin should initialize without throwing
+ const plugin = anthropic({} as PluginOptions);
+ assert.ok(plugin);
+ assert.strictEqual(plugin.name, 'anthropic');
+ } finally {
+ // Restore original env var
+ if (originalApiKey !== undefined) {
+ process.env.ANTHROPIC_API_KEY = originalApiKey;
+ } else {
+ delete process.env.ANTHROPIC_API_KEY;
+ }
+ }
+ });
+
+ it('should resolve models dynamically via resolve function', async () => {
+ const mockClient = createMockAnthropicClient();
+ const plugin = anthropic({ [__testClient]: mockClient } as PluginOptions);
+
+ assert.ok(plugin.resolve, 'Plugin should have resolve method');
+
+ // Test resolving a valid model
+ const validModel = plugin.resolve!('model', 'anthropic/claude-3-5-haiku');
+ assert.ok(validModel, 'Should resolve valid model');
+ assert.strictEqual(typeof validModel, 'function');
+
+ // Test resolving an unknown model name - should return a model action
+ // (following Google GenAI pattern: accept any model name, let API validate)
+ const unknownModel = plugin.resolve!(
+ 'model',
+ 'anthropic/unknown-model-xyz'
+ );
+ assert.ok(unknownModel, 'Should resolve unknown model name');
+ assert.strictEqual(
+ typeof unknownModel,
+ 'function',
+ 'Should return a model action'
+ );
+
+ // Test resolving with invalid action type (using 'tool' as invalid for this context)
+ const invalidActionType = plugin.resolve!(
+ 'tool',
+ 'anthropic/claude-3-5-haiku'
+ );
+ assert.strictEqual(
+ invalidActionType,
+ undefined,
+ 'Should return undefined for invalid action type'
+ );
+ });
+
+ it('should list available models from API', async () => {
+ const mockClient = createMockAnthropicClient({
+ modelList: [
+ { id: 'claude-3-5-haiku-20241022', display_name: 'Claude 3.5 Haiku' },
+ {
+ id: 'claude-3-5-haiku-latest',
+ display_name: 'Claude 3.5 Haiku Latest',
+ },
+ { id: 'claude-3-5-sonnet-20241022', display_name: 'Claude 3.5 Sonnet' },
+ { id: 'claude-sonnet-4-20250514', display_name: 'Claude 4 Sonnet' },
+ { id: 'claude-new-5-20251212', display_name: 'Claude New 5' },
+ { id: 'claude-experimental-latest' },
+ ],
+ });
+
+ const plugin = anthropic({ [__testClient]: mockClient } as PluginOptions);
+ assert.ok(plugin.list, 'Plugin should have list method');
+
+ const models = await plugin.list!();
+
+ assert.ok(Array.isArray(models), 'Should return an array');
+ assert.ok(models.length > 0, 'Should return at least one model');
+
+ const names = models.map((model) => model.name).sort();
+ // Models are listed with their full IDs from the API (no normalization)
+ assert.ok(
+ names.includes('anthropic/claude-3-5-haiku-20241022'),
+ 'Known model should be listed with full model ID from API'
+ );
+ assert.ok(
+ names.includes('anthropic/claude-3-5-haiku-latest'),
+ 'Latest variant should be listed separately'
+ );
+ assert.ok(
+ names.includes('anthropic/claude-3-5-sonnet-20241022'),
+ 'Unknown Claude 3.5 Sonnet should be listed with full model ID'
+ );
+ assert.ok(
+ names.includes('anthropic/claude-sonnet-4-20250514'),
+ 'Known Claude Sonnet 4 model should be listed with full model ID'
+ );
+ assert.ok(
+ names.includes('anthropic/claude-new-5-20251212'),
+ 'Unknown model IDs should surface as-is'
+ );
+ assert.ok(
+ names.includes('anthropic/claude-experimental-latest'),
+ 'Latest-suffixed unknown models should be surfaced'
+ );
+
+ const haikuMetadata = models.find(
+ (model) => model.name === 'anthropic/claude-3-5-haiku-20241022'
+ );
+ assert.ok(haikuMetadata, 'Haiku metadata should exist');
+ const haikuInfo = getModelInfo(haikuMetadata);
+ assert.ok(haikuInfo, 'Haiku model info should exist');
+
+ const newModelMetadata = models.find(
+ (model) => model.name === 'anthropic/claude-new-5-20251212'
+ );
+ assert.ok(newModelMetadata, 'New model metadata should exist');
+
+ const experimentalMetadata = models.find(
+ (model) => model.name === 'anthropic/claude-experimental-latest'
+ );
+ assert.ok(experimentalMetadata, 'Experimental model metadata should exist');
+
+ // Verify mock was called
+ const listStub = mockClient.models.list as any;
+ assert.strictEqual(
+ listStub.mock.calls.length,
+ 1,
+ 'models.list should be called once'
+ );
+ });
+
+ it('should cache list results on subsequent calls?', async () => {
+ const mockClient = createMockAnthropicClient({
+ modelList: [
+ { id: 'claude-3-5-haiku-20241022', display_name: 'Claude 3.5 Haiku' },
+ ],
+ });
+
+ const plugin = anthropic({ [__testClient]: mockClient } as PluginOptions);
+ assert.ok(plugin.list, 'Plugin should have list method');
+
+ // First call
+ const firstResult = await plugin.list!();
+ assert.ok(firstResult, 'First call should return results');
+
+ // Second call
+ const secondResult = await plugin.list!();
+ assert.ok(secondResult, 'Second call should return results');
+
+ // Verify both results are the same (reference equality for cache)
+ assert.strictEqual(
+ firstResult,
+ secondResult,
+ 'Results should be cached (same reference)'
+ );
+
+ // Verify models.list was only called once due to caching
+ const listStub = mockClient.models.list as any;
+ assert.strictEqual(
+ listStub.mock.calls.length,
+ 1,
+ 'models.list should only be called once due to caching'
+ );
+ });
+});
+
+describe('Anthropic resolve helpers', () => {
+ it('should resolve model names without anthropic/ prefix', () => {
+ const mockClient = createMockAnthropicClient();
+ const plugin = anthropic({ [__testClient]: mockClient } as PluginOptions);
+
+ const action = plugin.resolve?.('model', 'claude-3-5-haiku');
+ assert.ok(action, 'Should resolve model without prefix');
+ assert.strictEqual(typeof action, 'function');
+ });
+
+ it('anthropic.model should return model reference with config', () => {
+ const reference = anthropic.model('claude-3-5-haiku', {
+ temperature: 0.25,
+ });
+
+ const referenceAny = reference as any;
+ assert.ok(referenceAny, 'Model reference should be created');
+ assert.ok(referenceAny.name.includes('claude-3-5-haiku'));
+ assert.strictEqual(referenceAny.config?.temperature, 0.25);
+ });
+
+ it('should apply system prompt caching when cacheSystemPrompt is true', async () => {
+ const mockClient = createMockAnthropicClient();
+ const plugin = anthropic({
+ cacheSystemPrompt: true,
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const action = plugin.resolve?.('model', 'anthropic/claude-3-5-haiku');
+ assert.ok(action, 'Action should be resolved');
+
+ const abortSignal = new AbortController().signal;
+ await (action as any)(
+ {
+ messages: [
+ {
+ role: 'system',
+ content: [{ text: 'You are helpful.' }],
+ },
+ ],
+ },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const requestBody = createStub.mock.calls[0].arguments[0];
+ assert.ok(Array.isArray(requestBody.system));
+ assert.strictEqual(requestBody.system[0].cache_control.type, 'ephemeral');
+ });
+});
diff --git a/js/plugins/anthropic/tests/integration_test.ts b/js/plugins/anthropic/tests/integration_test.ts
new file mode 100644
index 0000000000..209a455870
--- /dev/null
+++ b/js/plugins/anthropic/tests/integration_test.ts
@@ -0,0 +1,542 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import { genkit, z } from 'genkit';
+import { describe, it } from 'node:test';
+import { anthropic } from '../src/index.js';
+import { __testClient } from '../src/types.js';
+import {
+ createMockAnthropicClient,
+ createMockAnthropicMessage,
+ mockContentBlockStart,
+ mockMessageWithContent,
+ mockMessageWithToolUse,
+ mockTextChunk,
+} from './mocks/anthropic-client.js';
+
+import { PluginOptions } from '../src/types.js';
+
+describe('Anthropic Integration', () => {
+ it('should successfully generate a response', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Hello',
+ });
+
+ assert.strictEqual(result.text, 'Hello! How can I help you today?');
+ });
+
+ it('should handle tool calling workflow (call tool, receive result, generate final response)', async () => {
+ const mockClient = createMockAnthropicClient({
+ sequentialResponses: [
+ // First response: tool use request
+ mockMessageWithToolUse('get_weather', { city: 'NYC' }),
+ // Second response: final text after tool result
+ createMockAnthropicMessage({
+ text: 'The weather in NYC is sunny, 72°F',
+ }),
+ ],
+ });
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ // Define the tool
+ ai.defineTool(
+ {
+ name: 'get_weather',
+ description: 'Get the weather for a city',
+ inputSchema: z.object({
+ city: z.string(),
+ }),
+ },
+ async (input: { city: string }) => {
+ return `The weather in ${input.city} is sunny, 72°F`;
+ }
+ );
+
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'What is the weather in NYC?',
+ tools: ['get_weather'],
+ });
+
+ assert.ok(
+ result.text.includes('NYC') ||
+ result.text.includes('sunny') ||
+ result.text.includes('72')
+ );
+ });
+
+ it('should handle multi-turn conversations', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ // First turn
+ const response1 = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'My name is Alice',
+ });
+
+ // Second turn with conversation history
+ const response2 = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: "What's my name?",
+ messages: response1.messages,
+ });
+
+ // Verify conversation history is maintained
+ assert.ok(
+ response2.messages.length >= 2,
+ 'Should have conversation history'
+ );
+ assert.strictEqual(response2.messages[0].role, 'user');
+ assert.ok(
+ response2.messages[0].content[0].text?.includes('Alice') ||
+ response2.messages[0].content[0].text?.includes('name')
+ );
+ });
+
+ it('should stream responses with streaming callback', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ mockContentBlockStart('Hello'),
+ mockTextChunk(' world'),
+ mockTextChunk('!'),
+ ],
+ messageResponse: {
+ content: [{ type: 'text', text: 'Hello world!', citations: null }],
+ usage: {
+ input_tokens: 5,
+ output_tokens: 15,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ cache_creation: null,
+ server_tool_use: null,
+ service_tier: null,
+ },
+ },
+ });
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const chunks: any[] = [];
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Say hello world',
+ streamingCallback: (chunk) => {
+ chunks.push(chunk);
+ },
+ });
+
+ assert.ok(chunks.length > 0, 'Should have received streaming chunks');
+ assert.ok(result.text, 'Should have final response text');
+ });
+
+ it('should handle media/image inputs', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ { text: 'Describe this image:' },
+ {
+ media: {
+ url: 'data:image/png;base64,R0lGODlhAQABAAAAACw=',
+ contentType: 'image/png',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ assert.ok(result.text, 'Should generate response for image input');
+ });
+
+ it('should handle WEBP image inputs', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ { text: 'Describe this image:' },
+ {
+ media: {
+ url: 'data:image/webp;base64,AAA',
+ contentType: 'image/webp',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ assert.ok(result.text, 'Should generate response for WEBP image input');
+ // Verify the request was made with correct media_type
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const requestBody = createStub.mock.calls[0].arguments[0];
+ const imageContent = requestBody.messages[0].content.find(
+ (c: any) => c.type === 'image'
+ );
+ assert.ok(imageContent, 'Should have image content in request');
+ assert.strictEqual(
+ imageContent.source.media_type,
+ 'image/webp',
+ 'Should use WEBP media type from data URL'
+ );
+ });
+
+ it('should handle WEBP image with mismatched contentType (prefers data URL)', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ media: {
+ // Data URL says WEBP, but contentType says PNG - should use WEBP
+ url: 'data:image/webp;base64,AAA',
+ contentType: 'image/png',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ assert.ok(result.text, 'Should generate response for WEBP image input');
+ // Verify the request was made with WEBP (from data URL), not PNG (from contentType)
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const requestBody = createStub.mock.calls[0].arguments[0];
+ const imageContent = requestBody.messages[0].content.find(
+ (c: any) => c.type === 'image'
+ );
+ assert.ok(imageContent, 'Should have image content in request');
+ assert.strictEqual(
+ imageContent.source.media_type,
+ 'image/webp',
+ 'Should prefer data URL content type (webp) over contentType (png)'
+ );
+ });
+
+ it('should throw helpful error for text/plain media', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ await assert.rejects(
+ async () => {
+ await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ media: {
+ url: 'data:text/plain;base64,AAA',
+ contentType: 'text/plain',
+ },
+ },
+ ],
+ },
+ ],
+ });
+ },
+ (error: Error) => {
+ return (
+ error.message.includes('Text files should be sent as text content') &&
+ error.message.includes('text:')
+ );
+ },
+ 'Should throw helpful error for text/plain media'
+ );
+ });
+
+ it('should forward thinking config and surface reasoning in responses', async () => {
+ const thinkingContent = [
+ {
+ type: 'thinking' as const,
+ thinking: 'Let me analyze the problem carefully.',
+ signature: 'sig_reasoning_123',
+ },
+ {
+ type: 'text' as const,
+ text: 'The answer is 42.',
+ citations: null,
+ },
+ ];
+ const mockClient = createMockAnthropicClient({
+ messageResponse: mockMessageWithContent(thinkingContent),
+ });
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const thinkingConfig = { enabled: true, budgetTokens: 2048 };
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'What is the meaning of life?',
+ config: { thinking: thinkingConfig },
+ });
+
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const requestBody = createStub.mock.calls[0].arguments[0];
+ assert.deepStrictEqual(requestBody.thinking, {
+ type: 'enabled',
+ budget_tokens: 2048,
+ });
+
+ assert.strictEqual(
+ result.reasoning,
+ 'Let me analyze the problem carefully.'
+ );
+ const assistantMessage = result.messages[result.messages.length - 1];
+ const reasoningPart = assistantMessage.content.find(
+ (part) => part.reasoning
+ );
+ assert.ok(reasoningPart, 'Expected reasoning part in assistant message');
+ assert.strictEqual(
+ reasoningPart?.custom?.anthropicThinking?.signature,
+ 'sig_reasoning_123'
+ );
+ });
+
+ it('should propagate API errors correctly', async () => {
+ const apiError = new Error('API Error: 401 Unauthorized');
+ const mockClient = createMockAnthropicClient({
+ shouldError: apiError,
+ });
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ await assert.rejects(
+ async () => {
+ await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Hello',
+ });
+ },
+ (error: Error) => {
+ assert.strictEqual(error.message, 'API Error: 401 Unauthorized');
+ return true;
+ }
+ );
+ });
+
+ it('should respect abort signals for cancellation', async () => {
+ // Note: Detailed abort signal handling is tested in converters_test.ts
+ // This test verifies that errors (including abort errors) are properly propagated at the integration layer
+ const mockClient = createMockAnthropicClient({
+ shouldError: new Error('AbortError'),
+ });
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ await assert.rejects(
+ async () => {
+ await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Hello',
+ });
+ },
+ (error: Error) => {
+ // Should propagate the error
+ assert.ok(
+ error.message.includes('AbortError'),
+ 'Should propagate errors'
+ );
+ return true;
+ }
+ );
+ });
+
+ it('should track token usage in responses', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: {
+ usage: {
+ input_tokens: 25,
+ output_tokens: 50,
+ cache_creation_input_tokens: 5,
+ cache_read_input_tokens: 10,
+ cache_creation: null,
+ server_tool_use: null,
+ service_tier: null,
+ },
+ },
+ });
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Hello',
+ });
+
+ assert.ok(result.usage, 'Should have usage information');
+ assert.strictEqual(result.usage.inputTokens, 25);
+ assert.strictEqual(result.usage.outputTokens, 50);
+ });
+
+ it('should route requests through beta surface when plugin default is beta', async () => {
+ const mockClient = createMockAnthropicClient();
+ const ai = genkit({
+ plugins: [
+ anthropic({
+ apiVersion: 'beta',
+ [__testClient]: mockClient,
+ } as PluginOptions),
+ ],
+ });
+
+ await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Hello',
+ });
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(
+ betaCreateStub.mock.calls.length,
+ 1,
+ 'Beta API should be used'
+ );
+ const regularCreateStub = mockClient.messages.create as any;
+ assert.strictEqual(
+ regularCreateStub.mock.calls.length,
+ 0,
+ 'Stable API should not be used'
+ );
+ });
+
+ it('should stream thinking deltas as reasoning chunks', async () => {
+ const thinkingConfig = { enabled: true, budgetTokens: 3072 };
+ const streamChunks = [
+ {
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'thinking',
+ thinking: '',
+ signature: 'sig_stream_123',
+ },
+ } as any,
+ {
+ type: 'content_block_delta',
+ index: 0,
+ delta: {
+ type: 'thinking_delta',
+ thinking: 'Analyzing intermediate steps.',
+ },
+ } as any,
+ {
+ type: 'content_block_start',
+ index: 1,
+ content_block: {
+ type: 'text',
+ text: '',
+ },
+ } as any,
+ mockTextChunk('Final streamed response.'),
+ ];
+ const finalMessage = mockMessageWithContent([
+ {
+ type: 'thinking',
+ thinking: 'Analyzing intermediate steps.',
+ signature: 'sig_stream_123',
+ },
+ {
+ type: 'text',
+ text: 'Final streamed response.',
+ citations: null,
+ },
+ ]);
+ const mockClient = createMockAnthropicClient({
+ streamChunks,
+ messageResponse: finalMessage,
+ });
+
+ const ai = genkit({
+ plugins: [anthropic({ [__testClient]: mockClient } as PluginOptions)],
+ });
+
+ const chunks: any[] = [];
+ const result = await ai.generate({
+ model: 'anthropic/claude-3-5-haiku',
+ prompt: 'Explain how you reason.',
+ streamingCallback: (chunk) => chunks.push(chunk),
+ config: { thinking: thinkingConfig },
+ });
+
+ const streamStub = mockClient.messages.stream as any;
+ assert.strictEqual(streamStub.mock.calls.length, 1);
+ const streamRequest = streamStub.mock.calls[0].arguments[0];
+ assert.deepStrictEqual(streamRequest.thinking, {
+ type: 'enabled',
+ budget_tokens: 3072,
+ });
+
+ const hasReasoningChunk = chunks.some((chunk) =>
+ (chunk.content || []).some(
+ (part: any) => part.reasoning === 'Analyzing intermediate steps.'
+ )
+ );
+ assert.ok(
+ hasReasoningChunk,
+ 'Expected reasoning chunk in streaming callback'
+ );
+ assert.strictEqual(result.reasoning, 'Analyzing intermediate steps.');
+ });
+});
diff --git a/js/plugins/anthropic/tests/mocks/anthropic-client.ts b/js/plugins/anthropic/tests/mocks/anthropic-client.ts
new file mode 100644
index 0000000000..321df8f24f
--- /dev/null
+++ b/js/plugins/anthropic/tests/mocks/anthropic-client.ts
@@ -0,0 +1,389 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type Anthropic from '@anthropic-ai/sdk';
+import type {
+ BetaMessage,
+ BetaRawMessageStreamEvent,
+} from '@anthropic-ai/sdk/resources/beta/messages.mjs';
+import type {
+ Message,
+ MessageStreamEvent,
+} from '@anthropic-ai/sdk/resources/messages.mjs';
+import { mock } from 'node:test';
+
+export interface MockAnthropicClientOptions {
+ messageResponse?: Partial;
+ sequentialResponses?: Partial[]; // For tool calling - multiple responses
+ streamChunks?: MessageStreamEvent[];
+ modelList?: Array<{ id: string; display_name?: string }>;
+ shouldError?: Error;
+ streamErrorAfterChunk?: number; // Throw error after this many chunks
+ streamError?: Error; // Error to throw during streaming
+ abortSignal?: AbortSignal; // Abort signal to check
+}
+
+/**
+ * Creates a mock Anthropic client for testing
+ */
+export function createMockAnthropicClient(
+ options: MockAnthropicClientOptions = {}
+): Anthropic {
+ const messageResponse = {
+ ...mockDefaultMessage(),
+ ...options.messageResponse,
+ };
+ const betaMessageResponse = toBetaMessage(messageResponse);
+
+ // Support sequential responses for tool calling workflows
+ let callCount = 0;
+ const createStub = options.shouldError
+ ? mock.fn(async () => {
+ throw options.shouldError;
+ })
+ : options.sequentialResponses
+ ? mock.fn(async () => {
+ const response =
+ options.sequentialResponses![callCount] || messageResponse;
+ callCount++;
+ return {
+ ...mockDefaultMessage(),
+ ...response,
+ };
+ })
+ : mock.fn(async () => messageResponse);
+
+ let betaCallCount = 0;
+ const betaCreateStub = options.shouldError
+ ? mock.fn(async () => {
+ throw options.shouldError;
+ })
+ : options.sequentialResponses
+ ? mock.fn(async () => {
+ const response =
+ options.sequentialResponses![betaCallCount] || messageResponse;
+ betaCallCount++;
+ return toBetaMessage({
+ ...mockDefaultMessage(),
+ ...response,
+ });
+ })
+ : mock.fn(async () => betaMessageResponse);
+
+ const streamStub = options.shouldError
+ ? mock.fn(() => {
+ throw options.shouldError;
+ })
+ : mock.fn((_body: any, opts?: { signal?: AbortSignal }) => {
+ // Check abort signal before starting stream
+ if (opts?.signal?.aborted) {
+ throw new Error('AbortError');
+ }
+ return createMockStream(
+ options.streamChunks || [],
+ messageResponse as Message,
+ options.streamErrorAfterChunk,
+ options.streamError,
+ opts?.signal
+ );
+ });
+
+ const betaStreamStub = options.shouldError
+ ? mock.fn(() => {
+ throw options.shouldError;
+ })
+ : mock.fn((_body: any, opts?: { signal?: AbortSignal }) => {
+ if (opts?.signal?.aborted) {
+ throw new Error('AbortError');
+ }
+ const betaChunks = (options.streamChunks || []).map((chunk) =>
+ toBetaStreamEvent(chunk)
+ );
+ return createMockStream(
+ betaChunks,
+ toBetaMessage(messageResponse),
+ options.streamErrorAfterChunk,
+ options.streamError,
+ opts?.signal
+ );
+ });
+
+ const listStub = options.shouldError
+ ? mock.fn(async () => {
+ throw options.shouldError;
+ })
+ : mock.fn(async () => ({
+ data: options.modelList || mockDefaultModels(),
+ }));
+
+ return {
+ messages: {
+ create: createStub,
+ stream: streamStub,
+ },
+ models: {
+ list: listStub,
+ },
+ beta: {
+ messages: {
+ create: betaCreateStub,
+ stream: betaStreamStub,
+ },
+ },
+ } as unknown as Anthropic;
+}
+
+/**
+ * Creates a mock async iterable stream for streaming responses
+ */
+function createMockStream(
+ chunks: TEventType[],
+ finalMsg: TMessageType,
+ errorAfterChunk?: number,
+ streamError?: Error,
+ abortSignal?: AbortSignal
+) {
+ let index = 0;
+ return {
+ [Symbol.asyncIterator]() {
+ return {
+ async next() {
+ // Check abort signal
+ if (abortSignal?.aborted) {
+ const error = new Error('AbortError');
+ error.name = 'AbortError';
+ throw error;
+ }
+
+ // Check if we should throw an error after this chunk
+ if (
+ errorAfterChunk !== undefined &&
+ streamError &&
+ index >= errorAfterChunk
+ ) {
+ throw streamError;
+ }
+
+ if (index < chunks.length) {
+ return { value: chunks[index++] as TEventType, done: false };
+ }
+ return { value: undefined as unknown as TEventType, done: true };
+ },
+ };
+ },
+ async finalMessage() {
+ // Check abort signal before returning final message
+ if (abortSignal?.aborted) {
+ const error = new Error('AbortError');
+ error.name = 'AbortError';
+ throw error;
+ }
+ return finalMsg as TMessageType;
+ },
+ };
+}
+
+export interface CreateMockAnthropicMessageOptions {
+ id?: string;
+ text?: string;
+ toolUse?: {
+ id?: string;
+ name: string;
+ input: any;
+ };
+ stopReason?: Message['stop_reason'];
+ usage?: Partial;
+}
+
+/**
+ * Creates a customizable mock Anthropic Message response
+ *
+ * @example
+ * // Simple text response
+ * createMockAnthropicMessage({ text: 'Hi there!' })
+ *
+ * // Tool use response
+ * createMockAnthropicMessage({
+ * toolUse: { name: 'get_weather', input: { city: 'NYC' } }
+ * })
+ *
+ * // Custom usage
+ * createMockAnthropicMessage({ usage: { input_tokens: 5, output_tokens: 15 } })
+ */
+export function createMockAnthropicMessage(
+ options: CreateMockAnthropicMessageOptions = {}
+): Message {
+ const content: Message['content'] = [];
+
+ if (options.toolUse) {
+ content.push({
+ type: 'tool_use',
+ id: options.toolUse.id || 'toolu_test123',
+ name: options.toolUse.name,
+ input: options.toolUse.input,
+ });
+ } else {
+ content.push({
+ type: 'text',
+ text: options.text || 'Hello! How can I help you today?',
+ citations: null,
+ });
+ }
+
+ const usage: Message['usage'] = {
+ cache_creation: null,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ input_tokens: 10,
+ output_tokens: 20,
+ server_tool_use: null,
+ service_tier: null,
+ ...(options.usage ?? {}),
+ };
+
+ return {
+ id: options.id || 'msg_test123',
+ type: 'message',
+ role: 'assistant',
+ model: 'claude-3-5-sonnet-20241022',
+ content,
+ stop_reason:
+ options.stopReason || (options.toolUse ? 'tool_use' : 'end_turn'),
+ stop_sequence: null,
+ usage,
+ };
+}
+
+/**
+ * Creates a default mock Message response
+ */
+export function mockDefaultMessage(): Message {
+ return createMockAnthropicMessage();
+}
+
+/**
+ * Creates a mock text content block chunk event
+ */
+export function mockTextChunk(text: string): MessageStreamEvent {
+ return {
+ type: 'content_block_delta',
+ index: 0,
+ delta: {
+ type: 'text_delta',
+ text,
+ },
+ } as MessageStreamEvent;
+}
+
+/**
+ * Creates a mock content block start event with text
+ */
+export function mockContentBlockStart(text: string): MessageStreamEvent {
+ return {
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'text',
+ text,
+ },
+ } as MessageStreamEvent;
+}
+
+/**
+ * Creates a mock tool use content block
+ */
+export function mockToolUseChunk(
+ id: string,
+ name: string,
+ input: any
+): MessageStreamEvent {
+ return {
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'tool_use',
+ id,
+ name,
+ input,
+ },
+ } as MessageStreamEvent;
+}
+
+/**
+ * Creates a default list of mock models
+ */
+export function mockDefaultModels() {
+ return [
+ { id: 'claude-3-5-sonnet-20241022', display_name: 'Claude 3.5 Sonnet' },
+ { id: 'claude-3-5-haiku-20241022', display_name: 'Claude 3.5 Haiku' },
+ { id: 'claude-3-opus-20240229', display_name: 'Claude 3 Opus' },
+ ];
+}
+
+/**
+ * Creates a mock Message with tool use
+ */
+export function mockMessageWithToolUse(
+ toolName: string,
+ toolInput: any
+): Partial {
+ return {
+ content: [
+ {
+ type: 'tool_use',
+ id: 'toolu_test123',
+ name: toolName,
+ input: toolInput,
+ },
+ ],
+ stop_reason: 'tool_use',
+ };
+}
+
+/**
+ * Creates a mock Message with custom content
+ */
+export function mockMessageWithContent(
+ content: Message['content']
+): Partial {
+ return {
+ content,
+ stop_reason: 'end_turn',
+ };
+}
+
+function toBetaMessage(message: Message): BetaMessage {
+ return {
+ ...message,
+ container: null,
+ context_management: null,
+ usage: {
+ cache_creation: message.usage.cache_creation,
+ cache_creation_input_tokens: message.usage.cache_creation_input_tokens,
+ cache_read_input_tokens: message.usage.cache_read_input_tokens,
+ input_tokens: message.usage.input_tokens,
+ output_tokens: message.usage.output_tokens,
+ server_tool_use: message.usage.server_tool_use as any,
+ service_tier: message.usage.service_tier,
+ },
+ };
+}
+
+function toBetaStreamEvent(
+ event: MessageStreamEvent
+): BetaRawMessageStreamEvent {
+ return event as unknown as BetaRawMessageStreamEvent;
+}
diff --git a/js/plugins/anthropic/tests/stable_runner_test.ts b/js/plugins/anthropic/tests/stable_runner_test.ts
new file mode 100644
index 0000000000..9b60084b3e
--- /dev/null
+++ b/js/plugins/anthropic/tests/stable_runner_test.ts
@@ -0,0 +1,2340 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import type Anthropic from '@anthropic-ai/sdk';
+import type {
+ Message,
+ MessageCreateParams,
+ MessageParam,
+ MessageStreamEvent,
+} from '@anthropic-ai/sdk/resources/messages.mjs';
+import * as assert from 'assert';
+import type {
+ GenerateRequest,
+ GenerateResponseData,
+ MessageData,
+ Part,
+ Role,
+} from 'genkit';
+import type { CandidateData, ToolDefinition } from 'genkit/model';
+import { describe, it, mock } from 'node:test';
+
+import { claudeModel, claudeRunner } from '../src/models.js';
+import { Runner } from '../src/runner/stable.js';
+import { AnthropicConfigSchema } from '../src/types.js';
+import {
+ createMockAnthropicClient,
+ mockContentBlockStart,
+ mockTextChunk,
+} from './mocks/anthropic-client.js';
+
+// Test helper: Create a Runner instance for testing converter methods
+// Type interface to access protected methods in tests
+type RunnerProtectedMethods = {
+ toAnthropicRole: (
+ role: Role,
+ toolMessageType?: 'tool_use' | 'tool_result'
+ ) => 'user' | 'assistant';
+ toAnthropicToolResponseContent: (part: Part) => any;
+ toAnthropicMessageContent: (part: Part) => any;
+ toAnthropicMessages: (messages: MessageData[]) => {
+ system?: string;
+ messages: any[];
+ };
+ toAnthropicTool: (tool: ToolDefinition) => any;
+ toAnthropicRequestBody: (
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ) => any;
+ toAnthropicStreamingRequestBody: (
+ modelName: string,
+ request: GenerateRequest,
+ cacheSystemPrompt?: boolean
+ ) => any;
+ fromAnthropicContentBlockChunk: (
+ event: MessageStreamEvent
+ ) => Part | undefined;
+ fromAnthropicStopReason: (reason: Message['stop_reason']) => any;
+ fromAnthropicResponse: (message: Message) => GenerateResponseData;
+};
+
+const mockClient = createMockAnthropicClient();
+const testRunner = new Runner({
+ name: 'test-model',
+ client: mockClient,
+}) as Runner & RunnerProtectedMethods;
+
+const createUsage = (
+ overrides: Partial = {}
+): Message['usage'] => ({
+ cache_creation: null,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ input_tokens: 0,
+ output_tokens: 0,
+ server_tool_use: null,
+ service_tier: null,
+ ...overrides,
+});
+
+describe('toAnthropicRole', () => {
+ const testCases: {
+ genkitRole: Role;
+ toolMessageType?: 'tool_use' | 'tool_result';
+ expectedAnthropicRole: MessageParam['role'];
+ }[] = [
+ {
+ genkitRole: 'user',
+ expectedAnthropicRole: 'user',
+ },
+ {
+ genkitRole: 'model',
+ expectedAnthropicRole: 'assistant',
+ },
+ {
+ genkitRole: 'tool',
+ toolMessageType: 'tool_use',
+ expectedAnthropicRole: 'assistant',
+ },
+ {
+ genkitRole: 'tool',
+ toolMessageType: 'tool_result',
+ expectedAnthropicRole: 'user',
+ },
+ ];
+
+ for (const test of testCases) {
+ it(`should map Genkit "${test.genkitRole}" role to Anthropic "${test.expectedAnthropicRole}" role${
+ test.toolMessageType
+ ? ` when toolMessageType is "${test.toolMessageType}"`
+ : ''
+ }`, () => {
+ const actualOutput = testRunner.toAnthropicRole(
+ test.genkitRole,
+ test.toolMessageType
+ );
+ assert.strictEqual(actualOutput, test.expectedAnthropicRole);
+ });
+ }
+
+ it('should throw an error for unknown roles', () => {
+ assert.throws(
+ () => testRunner.toAnthropicRole('unknown' as Role),
+ /Unsupported genkit role: unknown/
+ );
+ });
+});
+
+describe('toAnthropicToolResponseContent', () => {
+ it('should not throw for parts without toolResponse', () => {
+ // toAnthropicToolResponseContent expects part.toolResponse to exist
+ // but will just return stringified undefined/empty object if not
+ const part: Part = { data: 'hi' } as Part;
+ const result = testRunner.toAnthropicToolResponseContent(part);
+ assert.ok(result);
+ assert.strictEqual(result.type, 'text');
+ });
+});
+
+describe('toAnthropicMessageContent', () => {
+ it('should throw if a media part contains invalid media', () => {
+ assert.throws(
+ () =>
+ testRunner.toAnthropicMessageContent({
+ media: {
+ url: '',
+ },
+ }),
+ /Media url is required but was not provided/
+ );
+ });
+
+ it('should throw if the provided part is invalid', () => {
+ assert.throws(
+ () => testRunner.toAnthropicMessageContent({ fake: 'part' } as Part),
+ /Unsupported genkit part fields encountered for current message role: {"fake":"part"}/
+ );
+ });
+
+ it('should treat remote URLs without explicit content type as image URLs', () => {
+ const result = testRunner.toAnthropicMessageContent({
+ media: {
+ url: 'https://example.com/image.png',
+ },
+ });
+
+ assert.deepStrictEqual(result, {
+ type: 'image',
+ source: {
+ type: 'url',
+ url: 'https://example.com/image.png',
+ },
+ });
+ });
+
+ it('should handle PDF with base64 data URL correctly', () => {
+ const result = testRunner.toAnthropicMessageContent({
+ media: {
+ url: 'data:application/pdf;base64,JVBERi0xLjQKJ',
+ contentType: 'application/pdf',
+ },
+ });
+
+ assert.deepStrictEqual(result, {
+ type: 'document',
+ source: {
+ type: 'base64',
+ media_type: 'application/pdf',
+ data: 'JVBERi0xLjQKJ',
+ },
+ });
+ });
+
+ it('should handle PDF with HTTP/HTTPS URL correctly', () => {
+ const result = testRunner.toAnthropicMessageContent({
+ media: {
+ url: 'https://example.com/document.pdf',
+ contentType: 'application/pdf',
+ },
+ });
+
+ assert.deepStrictEqual(result, {
+ type: 'document',
+ source: {
+ type: 'url',
+ url: 'https://example.com/document.pdf',
+ },
+ });
+ });
+});
+
+describe('toAnthropicMessages', () => {
+ const testCases: {
+ should: string;
+ inputMessages: MessageData[];
+ expectedOutput: {
+ messages: MessageParam[];
+ system?: string;
+ };
+ }[] = [
+ {
+ should: 'should transform tool request content correctly',
+ inputMessages: [
+ {
+ role: 'model',
+ content: [
+ {
+ toolRequest: {
+ ref: 'toolu_01A09q90qw90lq917835lq9',
+ name: 'tellAFunnyJoke',
+ input: { topic: 'bob' },
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ role: 'assistant',
+ content: [
+ {
+ type: 'tool_use',
+ id: 'toolu_01A09q90qw90lq917835lq9',
+ name: 'tellAFunnyJoke',
+ input: { topic: 'bob' },
+ },
+ ],
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform tool response text content correctly',
+ inputMessages: [
+ {
+ role: 'tool',
+ content: [
+ {
+ toolResponse: {
+ ref: 'call_SVDpFV2l2fW88QRFtv85FWwM',
+ name: 'tellAFunnyJoke',
+ output: 'Why did the bob cross the road?',
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'tool_result',
+ tool_use_id: 'call_SVDpFV2l2fW88QRFtv85FWwM',
+ content: [
+ {
+ type: 'text',
+ text: 'Why did the bob cross the road?',
+ },
+ ],
+ },
+ ],
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform tool response media content correctly',
+ inputMessages: [
+ {
+ role: 'tool',
+ content: [
+ {
+ toolResponse: {
+ ref: 'call_SVDpFV2l2fW88QRFtv85FWwM',
+ name: 'tellAFunnyJoke',
+ output: {
+ url: 'data:image/gif;base64,R0lGODlhAQABAAAAACw=',
+ contentType: 'image/gif',
+ },
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'tool_result',
+ tool_use_id: 'call_SVDpFV2l2fW88QRFtv85FWwM',
+ content: [
+ {
+ type: 'image',
+ source: {
+ type: 'base64',
+ data: 'R0lGODlhAQABAAAAACw=',
+ media_type: 'image/gif',
+ },
+ },
+ ],
+ },
+ ],
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should:
+ 'should transform tool response base64 image url content correctly',
+ inputMessages: [
+ {
+ role: 'tool',
+ content: [
+ {
+ toolResponse: {
+ ref: 'call_SVDpFV2l2fW88QRFtv85FWwM',
+ name: 'tellAFunnyJoke',
+ output: 'data:image/gif;base64,R0lGODlhAQABAAAAACw=',
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'tool_result',
+ tool_use_id: 'call_SVDpFV2l2fW88QRFtv85FWwM',
+ content: [
+ {
+ type: 'image',
+ source: {
+ type: 'base64',
+ data: 'R0lGODlhAQABAAAAACw=',
+ media_type: 'image/gif',
+ },
+ },
+ ],
+ },
+ ],
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform text content correctly',
+ inputMessages: [
+ { role: 'user', content: [{ text: 'hi' }] },
+ { role: 'model', content: [{ text: 'how can I help you?' }] },
+ { role: 'user', content: [{ text: 'I am testing' }] },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ content: [
+ {
+ text: 'hi',
+ type: 'text',
+ citations: null,
+ },
+ ],
+ role: 'user',
+ },
+ {
+ content: [
+ {
+ text: 'how can I help you?',
+ type: 'text',
+ citations: null,
+ },
+ ],
+ role: 'assistant',
+ },
+ {
+ content: [
+ {
+ text: 'I am testing',
+ type: 'text',
+ citations: null,
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform initial system prompt correctly',
+ inputMessages: [
+ { role: 'system', content: [{ text: 'You are an helpful assistant' }] },
+ { role: 'user', content: [{ text: 'hi' }] },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ content: [
+ {
+ text: 'hi',
+ type: 'text',
+ citations: null,
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ system: 'You are an helpful assistant',
+ },
+ },
+ {
+ should: 'should transform multi-modal (text + media) content correctly',
+ inputMessages: [
+ {
+ role: 'user',
+ content: [
+ { text: 'describe the following image:' },
+ {
+ media: {
+ url: 'data:image/gif;base64,R0lGODlhAQABAAAAACw=',
+ contentType: 'image/gif',
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ content: [
+ {
+ text: 'describe the following image:',
+ type: 'text',
+ citations: null,
+ },
+ {
+ source: {
+ type: 'base64',
+ data: 'R0lGODlhAQABAAAAACw=',
+ media_type: 'image/gif',
+ },
+ type: 'image',
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform PDF with base64 data URL correctly',
+ inputMessages: [
+ {
+ role: 'user',
+ content: [
+ {
+ media: {
+ url: 'data:application/pdf;base64,JVBERi0xLjQKJ',
+ contentType: 'application/pdf',
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ content: [
+ {
+ type: 'document',
+ source: {
+ type: 'base64',
+ media_type: 'application/pdf',
+ data: 'JVBERi0xLjQKJ',
+ },
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform PDF with HTTP/HTTPS URL correctly',
+ inputMessages: [
+ {
+ role: 'user',
+ content: [
+ {
+ media: {
+ url: 'https://example.com/document.pdf',
+ contentType: 'application/pdf',
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ content: [
+ {
+ type: 'document',
+ source: {
+ type: 'url',
+ url: 'https://example.com/document.pdf',
+ },
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ system: undefined,
+ },
+ },
+ {
+ should: 'should transform PDF alongside text and images correctly',
+ inputMessages: [
+ {
+ role: 'user',
+ content: [
+ { text: 'Analyze this PDF and image:' },
+ {
+ media: {
+ url: 'data:application/pdf;base64,JVBERi0xLjQKJ',
+ contentType: 'application/pdf',
+ },
+ },
+ {
+ media: {
+ url: 'data:image/png;base64,R0lGODlhAQABAAAAACw=',
+ contentType: 'image/png',
+ },
+ },
+ ],
+ },
+ ],
+ expectedOutput: {
+ messages: [
+ {
+ content: [
+ {
+ text: 'Analyze this PDF and image:',
+ type: 'text',
+ citations: null,
+ },
+ {
+ type: 'document',
+ source: {
+ type: 'base64',
+ media_type: 'application/pdf',
+ data: 'JVBERi0xLjQKJ',
+ },
+ },
+ {
+ source: {
+ type: 'base64',
+ data: 'R0lGODlhAQABAAAAACw=',
+ media_type: 'image/png',
+ },
+ type: 'image',
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ system: undefined,
+ },
+ },
+ ];
+
+ for (const test of testCases) {
+ it(test.should, () => {
+ const actualOutput = testRunner.toAnthropicMessages(test.inputMessages);
+ assert.deepStrictEqual(actualOutput, test.expectedOutput);
+ });
+ }
+});
+
+describe('toAnthropicTool', () => {
+ it('should transform Genkit tool definition to an Anthropic tool', () => {
+ const tool: ToolDefinition = {
+ name: 'tellAJoke',
+ description: 'Tell a joke',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ topic: { type: 'string' },
+ },
+ required: ['topic'],
+ },
+ };
+ const actualOutput = testRunner.toAnthropicTool(tool);
+ assert.deepStrictEqual(actualOutput, {
+ name: 'tellAJoke',
+ description: 'Tell a joke',
+ input_schema: {
+ type: 'object',
+ properties: {
+ topic: { type: 'string' },
+ },
+ required: ['topic'],
+ },
+ });
+ });
+});
+
+describe('fromAnthropicContentBlockChunk', () => {
+ const testCases: {
+ should: string;
+ event: MessageStreamEvent;
+ expectedOutput: Part | undefined;
+ }[] = [
+ {
+ should: 'should return text part from content_block_start event',
+ event: {
+ index: 0,
+ type: 'content_block_start',
+ content_block: {
+ type: 'text',
+ text: 'Hello, World!',
+ citations: null,
+ },
+ },
+ expectedOutput: { text: 'Hello, World!' },
+ },
+ {
+ should:
+ 'should return thinking part from content_block_start thinking event',
+ event: {
+ index: 0,
+ type: 'content_block_start',
+ content_block: {
+ type: 'thinking',
+ thinking: 'Let me reason through this.',
+ signature: 'sig_123',
+ },
+ },
+ expectedOutput: {
+ reasoning: 'Let me reason through this.',
+ custom: { anthropicThinking: { signature: 'sig_123' } },
+ },
+ },
+ {
+ should:
+ 'should return redacted thinking part from content_block_start event',
+ event: {
+ index: 0,
+ type: 'content_block_start',
+ content_block: {
+ type: 'redacted_thinking',
+ data: 'encrypted-data',
+ },
+ },
+ expectedOutput: { custom: { redactedThinking: 'encrypted-data' } },
+ },
+ {
+ should: 'should return text delta part from content_block_delta event',
+ event: {
+ index: 0,
+ type: 'content_block_delta',
+ delta: {
+ type: 'text_delta',
+ text: 'Hello, World!',
+ },
+ },
+ expectedOutput: { text: 'Hello, World!' },
+ },
+ {
+ should: 'should return thinking delta part as text content',
+ event: {
+ index: 0,
+ type: 'content_block_delta',
+ delta: {
+ type: 'thinking_delta',
+ thinking: 'Step by step...',
+ },
+ },
+ expectedOutput: { reasoning: 'Step by step...' },
+ },
+ {
+ should: 'should return tool use requests',
+ event: {
+ index: 0,
+ type: 'content_block_start',
+ content_block: {
+ type: 'tool_use',
+ id: 'abc123',
+ name: 'tellAJoke',
+ input: { topic: 'dogs' },
+ },
+ },
+ expectedOutput: {
+ toolRequest: {
+ name: 'tellAJoke',
+ input: { topic: 'dogs' },
+ ref: 'abc123',
+ },
+ },
+ },
+ {
+ should: 'should return undefined for any other event',
+ event: {
+ type: 'message_stop',
+ },
+ expectedOutput: undefined,
+ },
+ ];
+
+ for (const test of testCases) {
+ it(test.should, () => {
+ const actualOutput = testRunner.fromAnthropicContentBlockChunk(
+ test.event
+ );
+ assert.deepStrictEqual(actualOutput, test.expectedOutput);
+ });
+ }
+
+ it('should throw for unsupported tool input streaming deltas', () => {
+ assert.throws(
+ () =>
+ testRunner.fromAnthropicContentBlockChunk({
+ index: 0,
+ type: 'content_block_delta',
+ delta: {
+ type: 'input_json_delta',
+ partial_json: '{"foo":',
+ },
+ } as MessageStreamEvent),
+ /Anthropic streaming tool input \(input_json_delta\) is not yet supported/
+ );
+ });
+});
+
+describe('fromAnthropicStopReason', () => {
+ const testCases: {
+ inputStopReason: Message['stop_reason'];
+ expectedFinishReason: CandidateData['finishReason'];
+ }[] = [
+ {
+ inputStopReason: 'max_tokens',
+ expectedFinishReason: 'length',
+ },
+ {
+ inputStopReason: 'end_turn',
+ expectedFinishReason: 'stop',
+ },
+ {
+ inputStopReason: 'stop_sequence',
+ expectedFinishReason: 'stop',
+ },
+ {
+ inputStopReason: 'tool_use',
+ expectedFinishReason: 'stop',
+ },
+ {
+ inputStopReason: null,
+ expectedFinishReason: 'unknown',
+ },
+ {
+ inputStopReason: 'unknown' as any,
+ expectedFinishReason: 'other',
+ },
+ ];
+
+ for (const test of testCases) {
+ it(`should map Anthropic stop reason "${test.inputStopReason}" to Genkit finish reason "${test.expectedFinishReason}"`, () => {
+ const actualOutput = testRunner.fromAnthropicStopReason(
+ test.inputStopReason
+ );
+ assert.strictEqual(actualOutput, test.expectedFinishReason);
+ });
+ }
+});
+
+describe('fromAnthropicResponse', () => {
+ const testCases: {
+ should: string;
+ message: Message;
+ expectedOutput: Omit;
+ }[] = [
+ {
+ should: 'should work with text content',
+ message: {
+ id: 'abc123',
+ model: 'whatever',
+ type: 'message',
+ role: 'assistant',
+ stop_reason: 'max_tokens',
+ stop_sequence: null,
+ content: [
+ {
+ type: 'text',
+ text: 'Tell a joke about dogs.',
+ citations: null,
+ },
+ ],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: null,
+ cache_read_input_tokens: null,
+ }),
+ },
+ expectedOutput: {
+ candidates: [
+ {
+ index: 0,
+ finishReason: 'length',
+ message: {
+ role: 'model',
+ content: [{ text: 'Tell a joke about dogs.' }],
+ },
+ },
+ ],
+ usage: {
+ inputTokens: 10,
+ outputTokens: 20,
+ },
+ },
+ },
+ {
+ should: 'should work with tool use content',
+ message: {
+ id: 'abc123',
+ model: 'whatever',
+ type: 'message',
+ role: 'assistant',
+ stop_reason: 'tool_use',
+ stop_sequence: null,
+ content: [
+ {
+ type: 'tool_use',
+ id: 'abc123',
+ name: 'tellAJoke',
+ input: { topic: 'dogs' },
+ },
+ ],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: null,
+ cache_read_input_tokens: null,
+ }),
+ },
+ expectedOutput: {
+ candidates: [
+ {
+ index: 0,
+ finishReason: 'stop',
+ message: {
+ role: 'model',
+ content: [
+ {
+ toolRequest: {
+ name: 'tellAJoke',
+ input: { topic: 'dogs' },
+ ref: 'abc123',
+ },
+ },
+ ],
+ },
+ },
+ ],
+ usage: {
+ inputTokens: 10,
+ outputTokens: 20,
+ },
+ },
+ },
+ ];
+
+ for (const test of testCases) {
+ it(test.should, () => {
+ const actualOutput = testRunner.fromAnthropicResponse(test.message);
+ // Check custom field exists and is the message
+ assert.ok(actualOutput.custom);
+ assert.strictEqual(actualOutput.custom, test.message);
+ // Check the rest
+ assert.deepStrictEqual(
+ {
+ candidates: actualOutput.candidates,
+ usage: actualOutput.usage,
+ },
+ test.expectedOutput
+ );
+ });
+ }
+});
+
+describe('toAnthropicRequestBody', () => {
+ const testCases: {
+ should: string;
+ modelName: string;
+ genkitRequest: GenerateRequest;
+ expectedOutput: MessageCreateParams;
+ }[] = [
+ {
+ should: '(claude-3-5-haiku) handles request with text messages',
+ modelName: 'claude-3-5-haiku',
+ genkitRequest: {
+ messages: [
+ { role: 'user', content: [{ text: 'Tell a joke about dogs.' }] },
+ ],
+ output: { format: 'text' },
+ config: {
+ metadata: {
+ user_id: 'exampleUser123',
+ },
+ },
+ },
+ expectedOutput: {
+ max_tokens: 4096,
+ messages: [
+ {
+ content: [
+ {
+ text: 'Tell a joke about dogs.',
+ type: 'text',
+ citations: null,
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ model: 'claude-3-5-haiku',
+ metadata: {
+ user_id: 'exampleUser123',
+ },
+ },
+ },
+ {
+ should: '(claude-3-haiku) handles request with text messages',
+ modelName: 'claude-3-haiku',
+ genkitRequest: {
+ messages: [
+ { role: 'user', content: [{ text: 'Tell a joke about dogs.' }] },
+ ],
+ output: { format: 'text' },
+ config: {
+ metadata: {
+ user_id: 'exampleUser123',
+ },
+ },
+ },
+ expectedOutput: {
+ max_tokens: 4096,
+ messages: [
+ {
+ content: [
+ {
+ text: 'Tell a joke about dogs.',
+ type: 'text',
+ citations: null,
+ },
+ ],
+ role: 'user',
+ },
+ ],
+ model: 'claude-3-haiku',
+ metadata: {
+ user_id: 'exampleUser123',
+ },
+ },
+ },
+ ];
+ for (const test of testCases) {
+ it(test.should, () => {
+ const actualOutput = testRunner.toAnthropicRequestBody(
+ test.modelName,
+ test.genkitRequest
+ );
+ assert.deepStrictEqual(actualOutput, test.expectedOutput);
+ });
+ }
+
+ it('should accept any model name and use it directly', () => {
+ // Following Google GenAI pattern: accept any model name, let API validate
+ const result = testRunner.toAnthropicRequestBody('fake-model', {
+ messages: [],
+ } as GenerateRequest);
+
+ // Should not throw, and should use the model name directly
+ assert.strictEqual(result.model, 'fake-model');
+ });
+
+ it('should throw if output format is not text', () => {
+ assert.throws(
+ () =>
+ testRunner.toAnthropicRequestBody('claude-3-5-haiku', {
+ messages: [],
+ tools: [],
+ output: { format: 'media' },
+ } as GenerateRequest),
+ /Only text output format is supported for Claude models currently/
+ );
+ });
+
+ it('should apply system prompt caching when enabled', () => {
+ const request: GenerateRequest = {
+ messages: [
+ { role: 'system', content: [{ text: 'You are a helpful assistant' }] },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ // Test with caching enabled
+ const outputWithCaching = testRunner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ true
+ );
+ assert.deepStrictEqual(outputWithCaching.system, [
+ {
+ type: 'text',
+ text: 'You are a helpful assistant',
+ cache_control: { type: 'ephemeral' },
+ },
+ ]);
+
+ // Test with caching disabled
+ const outputWithoutCaching = testRunner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ false
+ );
+ assert.strictEqual(
+ outputWithoutCaching.system,
+ 'You are a helpful assistant'
+ );
+ });
+
+ it('should concatenate multiple text parts in system message', () => {
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { text: 'Always be concise.' },
+ { text: 'Use proper grammar.' },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ const output = testRunner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ false
+ );
+
+ assert.strictEqual(
+ output.system,
+ 'You are a helpful assistant.\n\nAlways be concise.\n\nUse proper grammar.'
+ );
+ });
+
+ it('should concatenate multiple text parts in system message with caching', () => {
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { text: 'Always be concise.' },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ const output = testRunner.toAnthropicRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ true
+ );
+
+ assert.deepStrictEqual(output.system, [
+ {
+ type: 'text',
+ text: 'You are a helpful assistant.\n\nAlways be concise.',
+ cache_control: { type: 'ephemeral' },
+ },
+ ]);
+ });
+
+ it('should throw error if system message contains media', () => {
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ {
+ media: {
+ url: 'data:image/png;base64,iVBORw0KGgoAAAANS',
+ contentType: 'image/png',
+ },
+ },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ assert.throws(
+ () =>
+ testRunner.toAnthropicRequestBody('claude-3-5-haiku', request, false),
+ /System messages can only contain text content/
+ );
+ });
+
+ it('should throw error if system message contains tool requests', () => {
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { toolRequest: { name: 'getTool', input: {}, ref: '123' } },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ assert.throws(
+ () =>
+ testRunner.toAnthropicRequestBody('claude-3-5-haiku', request, false),
+ /System messages can only contain text content/
+ );
+ });
+
+ it('should throw error if system message contains tool responses', () => {
+ const request: GenerateRequest = {
+ messages: [
+ {
+ role: 'system',
+ content: [
+ { text: 'You are a helpful assistant.' },
+ { toolResponse: { name: 'getTool', output: {}, ref: '123' } },
+ ],
+ },
+ { role: 'user', content: [{ text: 'Hi' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ assert.throws(
+ () =>
+ testRunner.toAnthropicRequestBody('claude-3-5-haiku', request, false),
+ /System messages can only contain text content/
+ );
+ });
+});
+
+describe('toAnthropicStreamingRequestBody', () => {
+ it('should set stream to true', () => {
+ const request: GenerateRequest = {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ };
+
+ const output = testRunner.toAnthropicStreamingRequestBody(
+ 'claude-3-5-haiku',
+ request
+ );
+
+ assert.strictEqual(output.stream, true);
+ assert.strictEqual(output.model, 'claude-3-5-haiku');
+ assert.strictEqual(output.max_tokens, 4096);
+ });
+
+ it('should support system prompt caching in streaming mode', () => {
+ const request: GenerateRequest = {
+ messages: [
+ { role: 'system', content: [{ text: 'You are a helpful assistant' }] },
+ { role: 'user', content: [{ text: 'Hello' }] },
+ ],
+ output: { format: 'text' },
+ };
+
+ const outputWithCaching = testRunner.toAnthropicStreamingRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ true
+ );
+ assert.deepStrictEqual(outputWithCaching.system, [
+ {
+ type: 'text',
+ text: 'You are a helpful assistant',
+ cache_control: { type: 'ephemeral' },
+ },
+ ]);
+ assert.strictEqual(outputWithCaching.stream, true);
+
+ const outputWithoutCaching = testRunner.toAnthropicStreamingRequestBody(
+ 'claude-3-5-haiku',
+ request,
+ false
+ );
+ assert.strictEqual(
+ outputWithoutCaching.system,
+ 'You are a helpful assistant'
+ );
+ assert.strictEqual(outputWithoutCaching.stream, true);
+ });
+});
+
+describe('claudeRunner', () => {
+ it('should correctly run non-streaming requests', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: {
+ content: [{ type: 'text', text: 'response', citations: null }],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ await runner(
+ { messages: [] },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ assert.deepStrictEqual(createStub.mock.calls[0].arguments, [
+ {
+ model: 'claude-3-5-haiku',
+ max_tokens: 4096,
+ messages: [],
+ },
+ {
+ signal: abortSignal,
+ },
+ ]);
+ });
+
+ it('should correctly run streaming requests', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ {
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'text',
+ text: 'res',
+ },
+ } as MessageStreamEvent,
+ ],
+ messageResponse: {
+ content: [{ type: 'text', text: 'response', citations: null }],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const streamingCallback = mock.fn();
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ await runner(
+ { messages: [] },
+ { streamingRequested: true, sendChunk: streamingCallback, abortSignal }
+ );
+
+ const streamStub = mockClient.messages.stream as any;
+ assert.strictEqual(streamStub.mock.calls.length, 1);
+ assert.deepStrictEqual(streamStub.mock.calls[0].arguments, [
+ {
+ model: 'claude-3-5-haiku',
+ max_tokens: 4096,
+ messages: [],
+ stream: true,
+ },
+ {
+ signal: abortSignal,
+ },
+ ]);
+ });
+
+ it('should use beta API when apiVersion is beta', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: {
+ content: [{ type: 'text', text: 'response', citations: null }],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ await runner(
+ {
+ messages: [],
+ config: { apiVersion: 'beta' },
+ },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(betaCreateStub.mock.calls.length, 1);
+
+ const regularCreateStub = mockClient.messages.create as any;
+ assert.strictEqual(regularCreateStub.mock.calls.length, 0);
+ });
+
+ it('should use beta API when defaultApiVersion is beta', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: {
+ content: [{ type: 'text', text: 'response', citations: null }],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ defaultApiVersion: 'beta',
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ await runner(
+ {
+ messages: [],
+ },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(betaCreateStub.mock.calls.length, 1);
+
+ const regularCreateStub = mockClient.messages.create as any;
+ assert.strictEqual(regularCreateStub.mock.calls.length, 0);
+ });
+
+ it('should use request apiVersion over defaultApiVersion', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: {
+ content: [{ type: 'text', text: 'response', citations: null }],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ // defaultApiVersion is 'stable', but request overrides to 'beta'
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ defaultApiVersion: 'stable',
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ await runner(
+ {
+ messages: [],
+ config: { apiVersion: 'beta' },
+ },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(betaCreateStub.mock.calls.length, 1);
+
+ const regularCreateStub = mockClient.messages.create as any;
+ assert.strictEqual(regularCreateStub.mock.calls.length, 0);
+ });
+
+ it('should use stable API when defaultApiVersion is beta but request overrides to stable', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: {
+ content: [{ type: 'text', text: 'response', citations: null }],
+ usage: createUsage({
+ input_tokens: 10,
+ output_tokens: 20,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ // defaultApiVersion is 'beta', but request overrides to 'stable'
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ defaultApiVersion: 'beta',
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ await runner(
+ {
+ messages: [],
+ config: { apiVersion: 'stable' },
+ },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(betaCreateStub.mock.calls.length, 0);
+
+ const regularCreateStub = mockClient.messages.create as any;
+ assert.strictEqual(regularCreateStub.mock.calls.length, 1);
+ });
+});
+
+describe('claudeRunner param object', () => {
+ it('should run requests when constructed with params object', async () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ cacheSystemPrompt: true,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+
+ await runner(
+ { messages: [{ role: 'user', content: [{ text: 'hi' }] }] },
+ { streamingRequested: false, sendChunk: () => {}, abortSignal }
+ );
+
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ assert.strictEqual(
+ createStub.mock.calls[0].arguments[0].messages[0].content[0].text,
+ 'hi'
+ );
+ });
+
+ it('should route to beta runner when defaultApiVersion is beta', async () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ defaultApiVersion: 'beta',
+ },
+ AnthropicConfigSchema
+ );
+ await runner(
+ { messages: [] },
+ {
+ streamingRequested: false,
+ sendChunk: () => {},
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(betaCreateStub.mock.calls.length, 1);
+ });
+
+ it('should throw when client is omitted from params object', () => {
+ assert.throws(() => {
+ claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: undefined as unknown as Anthropic,
+ },
+ AnthropicConfigSchema
+ );
+ }, /Anthropic client is required to create a runner/);
+ });
+});
+
+describe('claudeModel', () => {
+ it('should fall back to generic metadata for unknown models', async () => {
+ const mockClient = createMockAnthropicClient();
+ const modelAction = claudeModel({
+ name: 'unknown-model',
+ client: mockClient,
+ });
+
+ const abortSignal = new AbortController().signal;
+ await (modelAction as any)(
+ { messages: [{ role: 'user', content: [{ text: 'hi' }] }] },
+ {
+ streamingRequested: false,
+ sendChunk: () => {},
+ abortSignal,
+ }
+ );
+
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ const request = createStub.mock.calls[0].arguments[0];
+ assert.strictEqual(request.model, 'unknown-model');
+ });
+ it('should support params object configuration', async () => {
+ const mockClient = createMockAnthropicClient();
+ const modelAction = claudeModel({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ defaultApiVersion: 'beta',
+ cacheSystemPrompt: true,
+ });
+
+ const abortSignal = new AbortController().signal;
+ await (modelAction as any)(
+ { messages: [], config: { maxOutputTokens: 128 } },
+ {
+ streamingRequested: false,
+ sendChunk: () => {},
+ abortSignal,
+ }
+ );
+
+ const betaCreateStub = mockClient.beta.messages.create as any;
+ assert.strictEqual(betaCreateStub.mock.calls.length, 1);
+ assert.strictEqual(
+ betaCreateStub.mock.calls[0].arguments[0].max_tokens,
+ 128
+ );
+ });
+
+ it('should correctly define supported Claude models', () => {
+ const mockClient = createMockAnthropicClient();
+ const modelName = 'claude-3-5-haiku';
+ const modelAction = claudeModel({
+ name: modelName,
+ client: mockClient,
+ });
+
+ // Verify the model action is returned
+ assert.ok(modelAction);
+ assert.strictEqual(typeof modelAction, 'function');
+ });
+
+ it('should accept any model name and create a model action', () => {
+ // Following Google GenAI pattern: accept any model name, let API validate
+ const modelAction = claudeModel({
+ name: 'unsupported-model',
+ client: {} as Anthropic,
+ });
+ assert.ok(modelAction, 'Should create model action for any model name');
+ assert.strictEqual(typeof modelAction, 'function');
+ });
+
+ it('should handle streaming with multiple text chunks', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ mockContentBlockStart('Hello'),
+ mockTextChunk(' world'),
+ mockTextChunk('!'),
+ ],
+ messageResponse: {
+ content: [{ type: 'text', text: 'Hello world!', citations: null }],
+ usage: createUsage({
+ input_tokens: 5,
+ output_tokens: 10,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const chunks: any[] = [];
+ const streamingCallback = mock.fn((chunk: any) => {
+ chunks.push(chunk);
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+
+ const result = await runner(
+ { messages: [{ role: 'user', content: [{ text: 'Hi' }] }] },
+ { streamingRequested: true, sendChunk: streamingCallback, abortSignal }
+ );
+
+ // Verify we received all the streaming chunks
+ assert.ok(chunks.length > 0, 'Should have received streaming chunks');
+ assert.strictEqual(chunks.length, 3, 'Should have received 3 chunks');
+
+ // Verify the final result
+ assert.ok(result.candidates);
+ assert.strictEqual(
+ result.candidates[0].message.content[0].text,
+ 'Hello world!'
+ );
+ assert.ok(result.usage);
+ assert.strictEqual(result.usage.inputTokens, 5);
+ assert.strictEqual(result.usage.outputTokens, 10);
+ });
+
+ it('should handle tool use in streaming mode', async () => {
+ const streamChunks = [
+ {
+ type: 'content_block_start',
+ index: 0,
+ content_block: {
+ type: 'tool_use',
+ id: 'toolu_123',
+ name: 'get_weather',
+ input: { city: 'NYC' },
+ },
+ } as MessageStreamEvent,
+ ];
+ const mockClient = createMockAnthropicClient({
+ streamChunks,
+ messageResponse: {
+ content: [
+ {
+ type: 'tool_use',
+ id: 'toolu_123',
+ name: 'get_weather',
+ input: { city: 'NYC' },
+ },
+ ],
+ stop_reason: 'tool_use',
+ usage: createUsage({
+ input_tokens: 15,
+ output_tokens: 25,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const chunks: any[] = [];
+ const streamingCallback = mock.fn((chunk: any) => {
+ chunks.push(chunk);
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+
+ const result = await runner(
+ {
+ messages: [
+ { role: 'user', content: [{ text: 'What is the weather?' }] },
+ ],
+ tools: [
+ {
+ name: 'get_weather',
+ description: 'Get the weather for a city',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ city: { type: 'string' },
+ },
+ required: ['city'],
+ },
+ },
+ ],
+ },
+ { streamingRequested: true, sendChunk: streamingCallback, abortSignal }
+ );
+
+ // Verify we received the tool use chunk
+ assert.ok(chunks.length > 0, 'Should have received chunks');
+
+ // Verify the final result contains tool use
+ assert.ok(result.candidates);
+ const toolRequest = result.candidates[0].message.content.find(
+ (p) => p.toolRequest
+ );
+ assert.ok(toolRequest, 'Should have a tool request');
+ assert.strictEqual(toolRequest.toolRequest?.name, 'get_weather');
+ assert.deepStrictEqual(toolRequest.toolRequest?.input, { city: 'NYC' });
+ });
+
+ it('should handle streaming errors and partial responses', async () => {
+ const streamError = new Error('Network error during streaming');
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [mockContentBlockStart('Hello'), mockTextChunk(' world')],
+ streamErrorAfterChunk: 1, // Throw error after first chunk
+ streamError: streamError,
+ messageResponse: {
+ content: [{ type: 'text', text: 'Hello world', citations: null }],
+ usage: createUsage({
+ input_tokens: 5,
+ output_tokens: 10,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortSignal = new AbortController().signal;
+ const chunks: any[] = [];
+ const sendChunk = (chunk: any) => {
+ chunks.push(chunk);
+ };
+
+ // Should throw error during streaming
+ await assert.rejects(
+ async () => {
+ await runner(
+ { messages: [{ role: 'user', content: [{ text: 'Hi' }] }] },
+ {
+ streamingRequested: true,
+ sendChunk,
+ abortSignal,
+ }
+ );
+ },
+ (error: Error) => {
+ // Verify error is propagated
+ assert.strictEqual(error.message, 'Network error during streaming');
+ // Verify we received at least one chunk before error
+ assert.ok(
+ chunks.length > 0,
+ 'Should have received some chunks before error'
+ );
+ return true;
+ }
+ );
+ });
+
+ it('should handle abort signal during streaming', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ mockContentBlockStart('Hello'),
+ mockTextChunk(' world'),
+ mockTextChunk('!'),
+ ],
+ messageResponse: {
+ content: [{ type: 'text', text: 'Hello world!', citations: null }],
+ usage: createUsage({
+ input_tokens: 5,
+ output_tokens: 15,
+ cache_creation_input_tokens: 0,
+ cache_read_input_tokens: 0,
+ }),
+ },
+ });
+
+ const runner = claudeRunner(
+ {
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ },
+ AnthropicConfigSchema
+ );
+ const abortController = new AbortController();
+ const chunks: any[] = [];
+ const sendChunk = (chunk: any) => {
+ chunks.push(chunk);
+ // Abort after first chunk
+ if (chunks.length === 1) {
+ abortController.abort();
+ }
+ };
+
+ // Should throw AbortError when signal is aborted
+ await assert.rejects(
+ async () => {
+ await runner(
+ { messages: [{ role: 'user', content: [{ text: 'Hi' }] }] },
+ {
+ streamingRequested: true,
+ sendChunk,
+ abortSignal: abortController.signal,
+ }
+ );
+ },
+ (error: Error) => {
+ // Verify abort error is thrown
+ assert.ok(
+ error.name === 'AbortError' || error.message.includes('AbortError'),
+ 'Should throw AbortError'
+ );
+ return true;
+ }
+ );
+ });
+
+ it('should handle unknown models using generic settings', async () => {
+ const mockClient = createMockAnthropicClient();
+ const modelAction = claudeModel({
+ name: 'unknown-model',
+ client: mockClient,
+ });
+
+ const abortSignal = new AbortController().signal;
+ await (modelAction as any)(
+ { messages: [{ role: 'user', content: [{ text: 'hi' }] }] },
+ {
+ streamingRequested: false,
+ sendChunk: () => {},
+ abortSignal,
+ }
+ );
+
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+ assert.strictEqual(
+ createStub.mock.calls[0].arguments[0].model,
+ 'unknown-model'
+ );
+ });
+});
+
+describe('BaseRunner helper utilities', () => {
+ it('should throw descriptive errors for invalid PDF data URLs', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ assert.throws(
+ () =>
+ runner['toPdfDocumentSource']({
+ url: 'data:text/plain;base64,AAA',
+ contentType: 'application/pdf',
+ } as any),
+ /PDF contentType mismatch/
+ );
+ });
+
+ it('should stringify non-media tool responses', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ const result = runner['toAnthropicToolResponseContent']({
+ toolResponse: {
+ ref: 'call_1',
+ name: 'tool',
+ output: { value: 42 },
+ },
+ } as any);
+
+ assert.deepStrictEqual(result, {
+ type: 'text',
+ text: JSON.stringify({ value: 42 }),
+ });
+ });
+
+ it('should parse image data URLs', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ const source = runner['toImageSource']({
+ url: 'data:image/png;base64,AAA',
+ contentType: 'image/png',
+ });
+
+ assert.strictEqual(source.kind, 'base64');
+ if (source.kind !== 'base64') {
+ throw new Error('Expected base64 image source');
+ }
+ assert.strictEqual(source.mediaType, 'image/png');
+ assert.strictEqual(source.data, 'AAA');
+ });
+
+ it('should pass through remote image URLs', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ const source = runner['toImageSource']({
+ url: 'https://example.com/image.png',
+ contentType: 'image/png',
+ });
+
+ assert.strictEqual(source.kind, 'url');
+ if (source.kind !== 'url') {
+ throw new Error('Expected url image source');
+ }
+ assert.strictEqual(source.url, 'https://example.com/image.png');
+ });
+
+ it('should parse WEBP image data URLs with matching contentType', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ const source = runner['toImageSource']({
+ url: 'data:image/webp;base64,AAA',
+ contentType: 'image/webp',
+ });
+
+ assert.strictEqual(source.kind, 'base64');
+ if (source.kind !== 'base64') {
+ throw new Error('Expected base64 image source');
+ }
+ assert.strictEqual(source.mediaType, 'image/webp');
+ assert.strictEqual(source.data, 'AAA');
+ });
+
+ it('should prefer data URL content type over media.contentType for WEBP', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ // Even if contentType says PNG, data URL says WEBP - should use WEBP
+ const source = runner['toImageSource']({
+ url: 'data:image/webp;base64,AAA',
+ contentType: 'image/png',
+ });
+
+ assert.strictEqual(source.kind, 'base64');
+ if (source.kind !== 'base64') {
+ throw new Error('Expected base64 image source');
+ }
+ // Key fix: should use data URL type (webp), not contentType (png)
+ assert.strictEqual(source.mediaType, 'image/webp');
+ assert.strictEqual(source.data, 'AAA');
+ });
+
+ it('should handle WEBP via toAnthropicMessageContent', () => {
+ const result = testRunner.toAnthropicMessageContent({
+ media: {
+ url: 'data:image/webp;base64,AAA',
+ contentType: 'image/webp',
+ },
+ });
+
+ assert.strictEqual(result.type, 'image');
+ assert.strictEqual(result.source.type, 'base64');
+ assert.strictEqual(result.source.media_type, 'image/webp');
+ assert.strictEqual(result.source.data, 'AAA');
+ });
+
+ it('should handle WEBP in tool response content', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ const result = runner['toAnthropicToolResponseContent']({
+ toolResponse: {
+ ref: 'call_123',
+ name: 'get_image',
+ output: {
+ url: 'data:image/webp;base64,AAA',
+ contentType: 'image/webp',
+ },
+ },
+ } as any);
+
+ assert.strictEqual(result.type, 'image');
+ assert.strictEqual(result.source.type, 'base64');
+ assert.strictEqual(result.source.media_type, 'image/webp');
+ assert.strictEqual(result.source.data, 'AAA');
+ });
+
+ it('should throw helpful error for text/plain in toImageSource', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ assert.throws(
+ () =>
+ runner['toImageSource']({
+ url: 'data:text/plain;base64,AAA',
+ contentType: 'text/plain',
+ }),
+ (error: Error) => {
+ return (
+ error.message.includes('Text files should be sent as text content') &&
+ error.message.includes('text:')
+ );
+ }
+ );
+ });
+
+ it('should throw helpful error for text/plain in toAnthropicMessageContent', () => {
+ assert.throws(
+ () =>
+ testRunner.toAnthropicMessageContent({
+ media: {
+ url: 'data:text/plain;base64,AAA',
+ contentType: 'text/plain',
+ },
+ }),
+ (error: Error) => {
+ return (
+ error.message.includes('Text files should be sent as text content') &&
+ error.message.includes('text:')
+ );
+ }
+ );
+ });
+
+ it('should throw helpful error for text/plain in tool response', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ assert.throws(
+ () =>
+ runner['toAnthropicToolResponseContent']({
+ toolResponse: {
+ ref: 'call_123',
+ name: 'get_file',
+ output: {
+ url: 'data:text/plain;base64,AAA',
+ contentType: 'text/plain',
+ },
+ },
+ } as any),
+ (error: Error) => {
+ return error.message.includes(
+ 'Text files should be sent as text content'
+ );
+ }
+ );
+ });
+
+ it('should throw helpful error for text/plain with remote URL', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ });
+
+ assert.throws(
+ () =>
+ runner['toImageSource']({
+ url: 'https://example.com/file.txt',
+ contentType: 'text/plain',
+ }),
+ (error: Error) => {
+ return (
+ error.message.includes('Text files should be sent as text content') &&
+ error.message.includes('text:')
+ );
+ }
+ );
+ });
+});
+
+describe('Runner request bodies and error branches', () => {
+ it('should include optional config fields in non-streaming request body', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ cacheSystemPrompt: true,
+ }) as Runner & RunnerProtectedMethods;
+
+ const body = runner['toAnthropicRequestBody'](
+ 'claude-3-5-haiku',
+ {
+ messages: [
+ {
+ role: 'system',
+ content: [{ text: 'You are helpful.' }],
+ },
+ {
+ role: 'user',
+ content: [{ text: 'Tell me a joke' }],
+ },
+ ],
+ config: {
+ maxOutputTokens: 256,
+ topK: 3,
+ topP: 0.75,
+ temperature: 0.6,
+ stopSequences: ['END'],
+ metadata: { user_id: 'user-xyz' },
+ tool_choice: { type: 'auto' },
+ thinking: { enabled: true, budgetTokens: 2048 },
+ },
+ tools: [
+ {
+ name: 'get_weather',
+ description: 'Returns the weather',
+ inputSchema: { type: 'object' },
+ },
+ ],
+ } as unknown as GenerateRequest,
+ true
+ );
+
+ assert.strictEqual(body.model, 'claude-3-5-haiku');
+ assert.ok(Array.isArray(body.system));
+ assert.strictEqual(body.system?.[0].cache_control?.type, 'ephemeral');
+ assert.strictEqual(body.max_tokens, 256);
+ assert.strictEqual(body.top_k, 3);
+ assert.strictEqual(body.top_p, 0.75);
+ assert.strictEqual(body.temperature, 0.6);
+ assert.deepStrictEqual(body.stop_sequences, ['END']);
+ assert.deepStrictEqual(body.metadata, { user_id: 'user-xyz' });
+ assert.deepStrictEqual(body.tool_choice, { type: 'auto' });
+ assert.strictEqual(body.tools?.length, 1);
+ assert.deepStrictEqual(body.thinking, {
+ type: 'enabled',
+ budget_tokens: 2048,
+ });
+ });
+
+ it('should include optional config fields in streaming request body', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ cacheSystemPrompt: true,
+ }) as Runner & RunnerProtectedMethods;
+
+ const body = runner['toAnthropicStreamingRequestBody'](
+ 'claude-3-5-haiku',
+ {
+ messages: [
+ {
+ role: 'system',
+ content: [{ text: 'Stay brief.' }],
+ },
+ {
+ role: 'user',
+ content: [{ text: 'Summarize the weather.' }],
+ },
+ ],
+ config: {
+ maxOutputTokens: 64,
+ topK: 2,
+ topP: 0.6,
+ temperature: 0.4,
+ stopSequences: ['STOP'],
+ metadata: { user_id: 'user-abc' },
+ tool_choice: { type: 'any' },
+ thinking: { enabled: true, budgetTokens: 1536 },
+ },
+ tools: [
+ {
+ name: 'summarize_weather',
+ description: 'Summarizes a forecast',
+ inputSchema: { type: 'object' },
+ },
+ ],
+ } as unknown as GenerateRequest,
+ true
+ );
+
+ assert.strictEqual(body.stream, true);
+ assert.ok(Array.isArray(body.system));
+ assert.strictEqual(body.max_tokens, 64);
+ assert.strictEqual(body.top_k, 2);
+ assert.strictEqual(body.top_p, 0.6);
+ assert.strictEqual(body.temperature, 0.4);
+ assert.deepStrictEqual(body.stop_sequences, ['STOP']);
+ assert.deepStrictEqual(body.metadata, { user_id: 'user-abc' });
+ assert.deepStrictEqual(body.tool_choice, { type: 'any' });
+ assert.strictEqual(body.tools?.length, 1);
+ assert.deepStrictEqual(body.thinking, {
+ type: 'enabled',
+ budget_tokens: 1536,
+ });
+ });
+
+ it('should disable thinking when explicitly turned off', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ }) as Runner & RunnerProtectedMethods;
+
+ const body = runner['toAnthropicRequestBody'](
+ 'claude-3-5-haiku',
+ {
+ messages: [],
+ config: {
+ thinking: { enabled: false },
+ },
+ } as unknown as GenerateRequest,
+ false
+ );
+
+ assert.deepStrictEqual(body.thinking, { type: 'disabled' });
+ });
+
+ it('should throw descriptive errors for missing tool refs', () => {
+ const mockClient = createMockAnthropicClient();
+ const runner = new Runner({
+ name: 'claude-3-5-haiku',
+ client: mockClient,
+ cacheSystemPrompt: false,
+ }) as Runner & RunnerProtectedMethods;
+
+ assert.throws(
+ () =>
+ runner['toAnthropicMessageContent']({
+ toolRequest: {
+ name: 'get_weather',
+ input: {},
+ },
+ } as any),
+ /Tool request ref is required/
+ );
+
+ assert.throws(
+ () =>
+ runner['toAnthropicMessageContent']({
+ toolResponse: {
+ ref: undefined,
+ name: 'get_weather',
+ output: 'Sunny',
+ },
+ } as any),
+ /Tool response ref is required/
+ );
+
+ assert.throws(
+ () =>
+ runner['toAnthropicMessageContent']({
+ data: 'unexpected',
+ } as any),
+ /Unsupported genkit part fields/
+ );
+ });
+});
diff --git a/js/plugins/anthropic/tests/streaming_test.ts b/js/plugins/anthropic/tests/streaming_test.ts
new file mode 100644
index 0000000000..84d45e0d9a
--- /dev/null
+++ b/js/plugins/anthropic/tests/streaming_test.ts
@@ -0,0 +1,366 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import type { ModelAction } from 'genkit/model';
+import { describe, mock, test } from 'node:test';
+import { anthropic } from '../src/index.js';
+import { PluginOptions, __testClient } from '../src/types.js';
+import {
+ createMockAnthropicClient,
+ createMockAnthropicMessage,
+ mockContentBlockStart,
+ mockTextChunk,
+ mockToolUseChunk,
+} from './mocks/anthropic-client.js';
+
+describe('Streaming Integration Tests', () => {
+ test('should use streaming API when onChunk is provided', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ mockContentBlockStart('Hello'),
+ mockTextChunk(' world'),
+ mockTextChunk('!'),
+ ],
+ messageResponse: createMockAnthropicMessage({
+ text: 'Hello world!',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ // Verify final response
+ assert.ok(response, 'Response should be returned');
+ assert.ok(
+ response.candidates?.[0]?.message.content[0].text,
+ 'Response should have text content'
+ );
+
+ // Since we can't control whether the runner chooses streaming or not from
+ // the plugin level, just verify we got a response
+ // The runner-level tests verify streaming behavior in detail
+ });
+
+ test('should handle streaming with multiple content blocks', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ mockContentBlockStart('First block'),
+ mockTextChunk(' continues'),
+ {
+ type: 'content_block_start',
+ index: 1,
+ content_block: {
+ type: 'text',
+ text: 'Second block',
+ },
+ } as any,
+ {
+ type: 'content_block_delta',
+ index: 1,
+ delta: {
+ type: 'text_delta',
+ text: ' here',
+ },
+ } as any,
+ ],
+ messageResponse: createMockAnthropicMessage({
+ text: 'First block continues',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ // Verify response is returned even with multiple content blocks
+ assert.ok(response, 'Response should be returned');
+ });
+
+ test('should handle streaming with tool use', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [
+ mockToolUseChunk('toolu_123', 'get_weather', { city: 'NYC' }),
+ ],
+ messageResponse: createMockAnthropicMessage({
+ toolUse: {
+ id: 'toolu_123',
+ name: 'get_weather',
+ input: { city: 'NYC' },
+ },
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Get NYC weather' }] }],
+ tools: [
+ {
+ name: 'get_weather',
+ description: 'Get weather for a city',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ city: { type: 'string' },
+ },
+ required: ['city'],
+ },
+ },
+ ],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ // Verify tool use in response
+ assert.ok(response.candidates?.[0]?.message.content[0].toolRequest);
+ assert.strictEqual(
+ response.candidates[0].message.content[0].toolRequest?.name,
+ 'get_weather'
+ );
+ });
+
+ test('should handle abort signal', async () => {
+ const abortController = new AbortController();
+
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Hello world',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ // Abort before starting
+ abortController.abort();
+
+ // Test that abort signal is passed through
+ // The actual abort behavior is tested in runner tests
+ try {
+ await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: abortController.signal,
+ }
+ );
+ // If we get here, the mock doesn't fully simulate abort behavior,
+ // which is fine since runner tests cover this
+ } catch (error: any) {
+ // Expected abort error
+ assert.ok(
+ error.message.includes('Abort') || error.name === 'AbortError',
+ 'Should throw abort error'
+ );
+ }
+ });
+
+ test('should handle errors during streaming', async () => {
+ const mockClient = createMockAnthropicClient({
+ shouldError: new Error('API error'),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ try {
+ await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: new AbortController().signal,
+ }
+ );
+ assert.fail('Should have thrown an error');
+ } catch (error: any) {
+ assert.strictEqual(error.message, 'API error');
+ }
+ });
+
+ test('should handle empty response', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [],
+ messageResponse: createMockAnthropicMessage({
+ text: '',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response, 'Should return response even with empty content');
+ });
+
+ test('should include usage metadata in streaming response', async () => {
+ const mockClient = createMockAnthropicClient({
+ streamChunks: [mockContentBlockStart('Response'), mockTextChunk(' text')],
+ messageResponse: createMockAnthropicMessage({
+ text: 'Response text',
+ usage: {
+ input_tokens: 50,
+ output_tokens: 25,
+ },
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ const response = await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ output: { format: 'text' },
+ },
+ {
+ onChunk: mock.fn() as any,
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ assert.ok(response.usage, 'Should include usage metadata');
+ assert.strictEqual(response.usage?.inputTokens, 50);
+ assert.strictEqual(response.usage?.outputTokens, 25);
+ });
+
+ test('should not stream when onChunk is not provided', async () => {
+ const mockClient = createMockAnthropicClient({
+ messageResponse: createMockAnthropicMessage({
+ text: 'Non-streaming response',
+ }),
+ });
+
+ const plugin = anthropic({
+ apiKey: 'test-key',
+ [__testClient]: mockClient,
+ } as PluginOptions);
+
+ const modelAction = plugin.resolve!(
+ 'model',
+ 'claude-3-5-haiku-20241022'
+ ) as ModelAction;
+
+ await modelAction(
+ {
+ messages: [{ role: 'user', content: [{ text: 'Hello' }] }],
+ },
+ {
+ abortSignal: new AbortController().signal,
+ }
+ );
+
+ // Verify non-streaming API was called
+ const createStub = mockClient.messages.create as any;
+ assert.strictEqual(createStub.mock.calls.length, 1);
+
+ // Verify stream API was NOT called
+ const streamStub = mockClient.messages.stream as any;
+ assert.strictEqual(streamStub.mock.calls.length, 0);
+ });
+});
diff --git a/js/plugins/anthropic/tests/types_test.ts b/js/plugins/anthropic/tests/types_test.ts
new file mode 100644
index 0000000000..64c91e1547
--- /dev/null
+++ b/js/plugins/anthropic/tests/types_test.ts
@@ -0,0 +1,89 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as assert from 'assert';
+import { z } from 'genkit';
+import { describe, it } from 'node:test';
+import { AnthropicConfigSchema, resolveBetaEnabled } from '../src/types.js';
+
+describe('resolveBetaEnabled', () => {
+ it('should return true when config.apiVersion is beta', () => {
+ const config: z.infer = {
+ apiVersion: 'beta',
+ };
+ assert.strictEqual(resolveBetaEnabled(config, 'stable'), true);
+ });
+
+ it('should return true when pluginDefaultApiVersion is beta', () => {
+ assert.strictEqual(resolveBetaEnabled(undefined, 'beta'), true);
+ });
+
+ it('should return false when config.apiVersion is stable', () => {
+ const config: z.infer = {
+ apiVersion: 'stable',
+ };
+ assert.strictEqual(resolveBetaEnabled(config, 'stable'), false);
+ });
+
+ it('should return false when both are stable', () => {
+ const config: z.infer = {
+ apiVersion: 'stable',
+ };
+ assert.strictEqual(resolveBetaEnabled(config, 'stable'), false);
+ });
+
+ it('should return false when neither is specified', () => {
+ assert.strictEqual(resolveBetaEnabled(undefined, undefined), false);
+ });
+
+ it('should return false when config is undefined and plugin default is stable', () => {
+ assert.strictEqual(resolveBetaEnabled(undefined, 'stable'), false);
+ });
+
+ it('should prioritize config.apiVersion over pluginDefaultApiVersion (beta over stable)', () => {
+ const config: z.infer = {
+ apiVersion: 'beta',
+ };
+ // Even though plugin default is stable, request config should override
+ assert.strictEqual(resolveBetaEnabled(config, 'stable'), true);
+ });
+
+ it('should prioritize config.apiVersion over pluginDefaultApiVersion (stable over beta)', () => {
+ const config: z.infer = {
+ apiVersion: 'stable',
+ };
+ // Request explicitly wants stable, should override plugin default
+ assert.strictEqual(resolveBetaEnabled(config, 'beta'), false);
+ });
+
+ it('should return false when config is empty object', () => {
+ const config: z.infer = {};
+ assert.strictEqual(resolveBetaEnabled(config, undefined), false);
+ });
+
+ it('should return true when config is empty but plugin default is beta', () => {
+ const config: z.infer = {};
+ assert.strictEqual(resolveBetaEnabled(config, 'beta'), true);
+ });
+
+ it('should handle config with other fields but no apiVersion', () => {
+ const config: z.infer = {
+ metadata: { user_id: 'test-user' },
+ };
+ assert.strictEqual(resolveBetaEnabled(config, 'stable'), false);
+ assert.strictEqual(resolveBetaEnabled(config, 'beta'), true);
+ });
+});
diff --git a/js/plugins/anthropic/tsconfig.json b/js/plugins/anthropic/tsconfig.json
new file mode 100644
index 0000000000..596e2cf729
--- /dev/null
+++ b/js/plugins/anthropic/tsconfig.json
@@ -0,0 +1,4 @@
+{
+ "extends": "../../tsconfig.json",
+ "include": ["src"]
+}
diff --git a/js/plugins/anthropic/tsup.config.ts b/js/plugins/anthropic/tsup.config.ts
new file mode 100644
index 0000000000..d55507161f
--- /dev/null
+++ b/js/plugins/anthropic/tsup.config.ts
@@ -0,0 +1,22 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { defineConfig, type Options } from 'tsup';
+import { defaultOptions } from '../../tsup.common';
+
+export default defineConfig({
+ ...(defaultOptions as Options),
+});
diff --git a/js/pnpm-lock.yaml b/js/pnpm-lock.yaml
index c1918c9103..109b07c772 100644
--- a/js/pnpm-lock.yaml
+++ b/js/pnpm-lock.yaml
@@ -257,6 +257,37 @@ importers:
specifier: ^4.9.0
version: 4.9.5
+ plugins/anthropic:
+ dependencies:
+ '@anthropic-ai/sdk':
+ specifier: ^0.68.0
+ version: 0.68.0(zod@3.25.67)
+ devDependencies:
+ '@types/node':
+ specifier: ^20.11.16
+ version: 20.19.1
+ check-node-version:
+ specifier: ^4.2.1
+ version: 4.2.1
+ genkit:
+ specifier: workspace:*
+ version: link:../../genkit
+ npm-run-all:
+ specifier: ^4.1.5
+ version: 4.1.5
+ rimraf:
+ specifier: ^6.0.1
+ version: 6.0.1
+ tsup:
+ specifier: ^8.3.5
+ version: 8.5.0(postcss@8.4.47)(tsx@4.20.3)(typescript@4.9.5)(yaml@2.8.0)
+ tsx:
+ specifier: ^4.19.2
+ version: 4.20.3
+ typescript:
+ specifier: ^4.9.0
+ version: 4.9.5
+
plugins/checks:
dependencies:
'@genkit-ai/ai':
@@ -994,6 +1025,25 @@ importers:
specifier: '>=12.2'
version: 13.4.0(encoding@0.1.13)
+ testapps/anthropic:
+ dependencies:
+ '@genkit-ai/anthropic':
+ specifier: workspace:*
+ version: link:../../plugins/anthropic
+ genkit:
+ specifier: workspace:*
+ version: link:../../genkit
+ devDependencies:
+ cross-env:
+ specifier: ^10.1.0
+ version: 10.1.0
+ tsx:
+ specifier: ^4.19.2
+ version: 4.20.3
+ typescript:
+ specifier: ^5.6.2
+ version: 5.8.3
+
testapps/basic-gemini:
dependencies:
'@genkit-ai/firebase':
@@ -2042,6 +2092,15 @@ packages:
'@anthropic-ai/sdk@0.24.3':
resolution: {integrity: sha512-916wJXO6T6k8R6BAAcLhLPv/pnLGy7YSEBZXZ1XTFbLcTZE8oTy3oDW9WJf9KKZwMvVcePIfoTSvzXHRcGxkQQ==}
+ '@anthropic-ai/sdk@0.68.0':
+ resolution: {integrity: sha512-SMYAmbbiprG8k1EjEPMTwaTqssDT7Ae+jxcR5kWXiqTlbwMR2AthXtscEVWOHkRfyAV5+y3PFYTJRNa3OJWIEw==}
+ hasBin: true
+ peerDependencies:
+ zod: ^3.25.0 || ^4.0.0
+ peerDependenciesMeta:
+ zod:
+ optional: true
+
'@anthropic-ai/sdk@0.9.1':
resolution: {integrity: sha512-wa1meQ2WSfoY8Uor3EdrJq0jTiZJoKoSii2ZVWRY1oN4Tlr5s59pADg9T79FTbPe1/se5c3pBeZgJL63wmuoBA==}
@@ -2202,6 +2261,10 @@ packages:
peerDependencies:
'@babel/core': ^7.0.0-0
+ '@babel/runtime@7.28.4':
+ resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==}
+ engines: {node: '>=6.9.0'}
+
'@babel/template@7.25.7':
resolution: {integrity: sha512-wRwtAgI3bAS+JGU2upWNL9lSlDcRCqD05BZ1n3X2ONLH1WilFP6O1otQjeMK/1g0pvYcXC7b/qVUB1keofjtZA==}
engines: {node: '>=6.9.0'}
@@ -4600,6 +4663,10 @@ packages:
resolution: {integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==}
engines: {node: '>=4'}
+ chalk@3.0.0:
+ resolution: {integrity: sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==}
+ engines: {node: '>=8'}
+
chalk@4.1.2:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
@@ -4611,6 +4678,11 @@ packages:
charenc@0.0.2:
resolution: {integrity: sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==}
+ check-node-version@4.2.1:
+ resolution: {integrity: sha512-YYmFYHV/X7kSJhuN/QYHUu998n/TRuDe8UenM3+m5NrkiH670lb9ILqHIvBencvJc4SDh+XcbXMR4b+TtubJiw==}
+ engines: {node: '>=8.3.0'}
+ hasBin: true
+
chokidar@4.0.3:
resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==}
engines: {node: '>= 14.16.0'}
@@ -5924,6 +5996,10 @@ packages:
json-parse-even-better-errors@2.3.1:
resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
+ json-schema-to-ts@3.1.1:
+ resolution: {integrity: sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==}
+ engines: {node: '>=16'}
+
json-schema-traverse@0.4.1:
resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
@@ -6325,6 +6401,9 @@ packages:
makeerror@1.0.12:
resolution: {integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==}
+ map-values@1.0.1:
+ resolution: {integrity: sha512-BbShUnr5OartXJe1GeccAWtfro11hhgNJg6G9/UtWKjVGvV5U4C09cg5nk8JUevhXODaXY+hQ3xxMUKSs62ONQ==}
+
markdown-it@14.1.0:
resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==}
hasBin: true
@@ -6558,6 +6637,9 @@ packages:
resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
engines: {node: '>=0.10.0'}
+ object-filter@1.0.2:
+ resolution: {integrity: sha512-NahvP2vZcy1ZiiYah30CEPw0FpDcSkSePJBMpzl5EQgCmISijiGuJm3SPYp7U+Lf2TljyaIw3E5EgkEx/TNEVA==}
+
object-hash@3.0.0:
resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==}
engines: {node: '>= 6'}
@@ -6923,6 +7005,9 @@ packages:
resolution: {integrity: sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==}
engines: {node: '>=0.6'}
+ queue-microtask@1.2.3:
+ resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
+
range-parser@1.2.1:
resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==}
engines: {node: '>= 0.6'}
@@ -7036,6 +7121,9 @@ packages:
resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==}
engines: {node: '>= 18'}
+ run-parallel@1.2.0:
+ resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
+
safe-array-concat@1.1.3:
resolution: {integrity: sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==}
engines: {node: '>=0.4'}
@@ -7074,11 +7162,6 @@ packages:
engines: {node: '>=10'}
hasBin: true
- semver@7.6.3:
- resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==}
- engines: {node: '>=10'}
- hasBin: true
-
semver@7.7.2:
resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==}
engines: {node: '>=10'}
@@ -7408,6 +7491,9 @@ packages:
resolution: {integrity: sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==}
engines: {node: '>= 14.0.0'}
+ ts-algebra@2.0.0:
+ resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==}
+
ts-interface-checker@0.1.13:
resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==}
@@ -7845,6 +7931,12 @@ snapshots:
transitivePeerDependencies:
- encoding
+ '@anthropic-ai/sdk@0.68.0(zod@3.25.67)':
+ dependencies:
+ json-schema-to-ts: 3.1.1
+ optionalDependencies:
+ zod: 3.25.67
+
'@anthropic-ai/sdk@0.9.1(encoding@0.1.13)':
dependencies:
'@types/node': 18.19.112
@@ -8042,6 +8134,8 @@ snapshots:
'@babel/core': 7.25.7
'@babel/helper-plugin-utils': 7.25.7
+ '@babel/runtime@7.28.4': {}
+
'@babel/template@7.25.7':
dependencies:
'@babel/code-frame': 7.25.7
@@ -10100,7 +10194,7 @@ snapshots:
'@opentelemetry/propagator-b3': 1.25.1(@opentelemetry/api@1.9.0)
'@opentelemetry/propagator-jaeger': 1.25.1(@opentelemetry/api@1.9.0)
'@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0)
- semver: 7.6.3
+ semver: 7.7.2
'@opentelemetry/semantic-conventions@1.25.1': {}
@@ -10861,6 +10955,11 @@ snapshots:
escape-string-regexp: 1.0.5
supports-color: 5.5.0
+ chalk@3.0.0:
+ dependencies:
+ ansi-styles: 4.3.0
+ supports-color: 7.2.0
+
chalk@4.1.2:
dependencies:
ansi-styles: 4.3.0
@@ -10870,6 +10969,15 @@ snapshots:
charenc@0.0.2: {}
+ check-node-version@4.2.1:
+ dependencies:
+ chalk: 3.0.0
+ map-values: 1.0.1
+ minimist: 1.2.8
+ object-filter: 1.0.2
+ run-parallel: 1.2.0
+ semver: 6.3.1
+
chokidar@4.0.3:
dependencies:
readdirp: 4.1.2
@@ -12740,6 +12848,11 @@ snapshots:
json-parse-even-better-errors@2.3.1: {}
+ json-schema-to-ts@3.1.1:
+ dependencies:
+ '@babel/runtime': 7.28.4
+ ts-algebra: 2.0.0
+
json-schema-traverse@0.4.1: {}
json-schema-traverse@1.0.0: {}
@@ -13082,6 +13195,8 @@ snapshots:
dependencies:
tmpl: 1.0.5
+ map-values@1.0.1: {}
+
markdown-it@14.1.0:
dependencies:
argparse: 2.0.1
@@ -13293,6 +13408,8 @@ snapshots:
object-assign@4.1.1: {}
+ object-filter@1.0.2: {}
+
object-hash@3.0.0: {}
object-inspect@1.13.1: {}
@@ -13654,6 +13771,8 @@ snapshots:
dependencies:
side-channel: 1.1.0
+ queue-microtask@1.2.3: {}
+
range-parser@1.2.1: {}
raw-body@2.5.2:
@@ -13817,6 +13936,10 @@ snapshots:
transitivePeerDependencies:
- supports-color
+ run-parallel@1.2.0:
+ dependencies:
+ queue-microtask: 1.2.3
+
safe-array-concat@1.1.3:
dependencies:
call-bind: 1.0.8
@@ -13854,8 +13977,6 @@ snapshots:
dependencies:
lru-cache: 6.0.0
- semver@7.6.3: {}
-
semver@7.7.2: {}
send@0.19.0:
@@ -14261,6 +14382,8 @@ snapshots:
triple-beam@1.4.1: {}
+ ts-algebra@2.0.0: {}
+
ts-interface-checker@0.1.13: {}
ts-jest@29.4.0(@babel/core@7.25.7)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.25.7))(jest-util@29.7.0)(jest@29.7.0(@types/node@20.19.1)(ts-node@10.9.2(@types/node@20.19.1)(typescript@4.9.5)))(typescript@4.9.5):
diff --git a/js/testapps/anthropic/README.md b/js/testapps/anthropic/README.md
new file mode 100644
index 0000000000..3343b90ea7
--- /dev/null
+++ b/js/testapps/anthropic/README.md
@@ -0,0 +1,67 @@
+# Anthropic Plugin Sample
+
+This test app demonstrates usage of the Genkit Anthropic plugin against both the stable and beta runners, organized by feature.
+
+## Directory Structure
+
+```
+src/
+ stable/
+ basic.ts - Basic stable API examples (hello, streaming)
+ text-plain.ts - Text/plain error handling demonstration
+ webp.ts - WEBP image handling demonstration
+ pdf.ts - PDF document processing examples
+ attention-first-page.pdf - Sample PDF file for testing
+ beta/
+ basic.ts - Basic beta API examples
+```
+
+## Setup
+
+1. From the repo root run `pnpm install` followed by `pnpm run setup` to link workspace dependencies.
+2. In this directory, optionally run `pnpm install` if you want a local `node_modules/`.
+3. Export an Anthropic API key (or add it to a `.env` file) before running any samples:
+
+ ```bash
+ export ANTHROPIC_API_KEY=your-key
+ ```
+
+## Available scripts
+
+### Basic Examples
+- `pnpm run build` – Compile the TypeScript sources into `lib/`.
+- `pnpm run start:stable` – Run the compiled stable basic sample.
+- `pnpm run start:beta` – Run the compiled beta basic sample.
+- `pnpm run dev:stable` – Start the Genkit Dev UI over `src/stable/basic.ts` with live reload.
+- `pnpm run dev:beta` – Start the Genkit Dev UI over `src/beta/basic.ts` with live reload.
+
+### Feature-Specific Examples
+- `pnpm run dev:stable:text-plain` – Start Dev UI for text/plain error handling demo.
+- `pnpm run dev:stable:webp` – Start Dev UI for WEBP image handling demo.
+- `pnpm run dev:stable:pdf` – Start Dev UI for PDF document processing demo.
+
+## Flows
+
+Each source file defines flows that can be invoked from the Dev UI or the Genkit CLI:
+
+### Basic Examples
+- `anthropic-stable-hello` – Simple greeting using stable API
+- `anthropic-stable-stream` – Streaming response example
+- `anthropic-beta-hello` – Simple greeting using beta API
+- `anthropic-beta-stream` – Streaming response with beta API
+- `anthropic-beta-opus41` – Test Opus 4.1 model with beta API
+
+### Text/Plain Handling
+- `stable-text-plain-error` – Demonstrates the helpful error when using text/plain as media
+- `stable-text-plain-correct` – Shows the correct way to send text content
+
+### WEBP Image Handling
+- `stable-webp-matching` – WEBP image with matching contentType
+- `stable-webp-mismatched` – WEBP image with mismatched contentType (demonstrates the fix)
+
+### PDF Document Processing
+- `stable-pdf-base64` – Process a PDF from a local file using base64 encoding
+- `stable-pdf-url` – Process a PDF from a publicly accessible URL
+- `stable-pdf-analysis` – Analyze a PDF document for key topics, concepts, and visual elements
+
+Example: `genkit flow:run anthropic-stable-hello`
diff --git a/js/testapps/anthropic/package.json b/js/testapps/anthropic/package.json
new file mode 100644
index 0000000000..08e1a0d2fd
--- /dev/null
+++ b/js/testapps/anthropic/package.json
@@ -0,0 +1,36 @@
+{
+ "name": "anthropic-testapp",
+ "version": "0.0.1",
+ "description": "Sample Genkit app showcasing Anthropic plugin stable and beta usage.",
+ "main": "lib/stable/basic.js",
+ "scripts": {
+ "build": "tsc",
+ "build:watch": "tsc --watch",
+ "start:stable": "node lib/stable/basic.js",
+ "start:beta": "node lib/beta/basic.js",
+ "dev:stable": "genkit start -- npx tsx --watch src/stable/basic.ts",
+ "dev:beta": "genkit start -- npx tsx --watch src/beta/basic.ts",
+ "dev:stable:text-plain": "genkit start -- npx tsx --watch src/stable/text-plain.ts",
+ "dev:stable:webp": "genkit start -- npx tsx --watch src/stable/webp.ts",
+ "dev:stable:pdf": "genkit start -- npx tsx --watch src/stable/pdf.ts",
+ "genkit:dev": "cross-env GENKIT_ENV=dev npm run dev:stable",
+ "genkit:start": "cross-env GENKIT_ENV=dev genkit start -- tsx --watch src/stable/basic.ts",
+ "dev": "export GENKIT_RUNTIME_ID=$(openssl rand -hex 8) && node lib/stable/basic.js 2>&1"
+ },
+ "keywords": [
+ "genkit",
+ "anthropic",
+ "sample"
+ ],
+ "author": "",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "genkit": "workspace:*",
+ "@genkit-ai/anthropic": "workspace:*"
+ },
+ "devDependencies": {
+ "cross-env": "^10.1.0",
+ "tsx": "^4.19.2",
+ "typescript": "^5.6.2"
+ }
+}
diff --git a/js/testapps/anthropic/src/beta/basic.ts b/js/testapps/anthropic/src/beta/basic.ts
new file mode 100644
index 0000000000..d1309b3400
--- /dev/null
+++ b/js/testapps/anthropic/src/beta/basic.ts
@@ -0,0 +1,76 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { anthropic } from '@genkit-ai/anthropic';
+import { genkit } from 'genkit';
+
+const ai = genkit({
+ plugins: [
+ // Default all flows in this sample to the beta surface
+ anthropic({ apiVersion: 'beta', cacheSystemPrompt: true }),
+ ],
+});
+
+const betaHaiku = anthropic.model('claude-3-5-haiku', { apiVersion: 'beta' });
+const betaSonnet = anthropic.model('claude-sonnet-4-5', { apiVersion: 'beta' });
+const betaOpus41 = anthropic.model('claude-opus-4-1', { apiVersion: 'beta' });
+
+ai.defineFlow('anthropic-beta-hello', async () => {
+ const { text } = await ai.generate({
+ model: betaHaiku,
+ prompt:
+ 'You are Claude on the beta API. Provide a concise greeting that mentions that you are using the beta API.',
+ config: { temperature: 0.6 },
+ });
+
+ return text;
+});
+
+ai.defineFlow('anthropic-beta-stream', async (_, { sendChunk }) => {
+ const { stream } = ai.generateStream({
+ model: betaSonnet,
+ prompt: [
+ {
+ text: 'Outline two experimental capabilities unlocked by the Anthropic beta API.',
+ },
+ ],
+ config: {
+ apiVersion: 'beta',
+ temperature: 0.4,
+ },
+ });
+
+ const collected: string[] = [];
+ for await (const chunk of stream) {
+ if (chunk.text) {
+ collected.push(chunk.text);
+ sendChunk(chunk.text);
+ }
+ }
+
+ return collected.join('');
+});
+
+ai.defineFlow('anthropic-beta-opus41', async () => {
+ const { text } = await ai.generate({
+ model: betaOpus41,
+ prompt:
+ 'You are Claude Opus 4.1 on the beta API. Provide a brief greeting that confirms you are using the beta API.',
+ config: { temperature: 0.6 },
+ });
+
+ return text;
+});
diff --git a/js/testapps/anthropic/src/stable/attention-first-page.pdf b/js/testapps/anthropic/src/stable/attention-first-page.pdf
new file mode 100644
index 0000000000..95c6625029
Binary files /dev/null and b/js/testapps/anthropic/src/stable/attention-first-page.pdf differ
diff --git a/js/testapps/anthropic/src/stable/basic.ts b/js/testapps/anthropic/src/stable/basic.ts
new file mode 100644
index 0000000000..246a42539a
--- /dev/null
+++ b/js/testapps/anthropic/src/stable/basic.ts
@@ -0,0 +1,51 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { anthropic } from '@genkit-ai/anthropic';
+import { genkit } from 'genkit';
+
+const ai = genkit({
+ plugins: [
+ // Configure the plugin with environment-driven API key
+ anthropic(),
+ ],
+});
+
+ai.defineFlow('anthropic-stable-hello', async () => {
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ prompt: 'You are a friendly Claude assistant. Greet the user briefly.',
+ });
+
+ return text;
+});
+
+ai.defineFlow('anthropic-stable-stream', async (_, { sendChunk }) => {
+ const { stream } = ai.generateStream({
+ model: anthropic.model('claude-sonnet-4-5'),
+ prompt: 'Compose a short limerick about using Genkit with Anthropic.',
+ });
+
+ let response = '';
+ for await (const chunk of stream) {
+ response += chunk.text ?? '';
+ if (chunk.text) {
+ sendChunk(chunk.text);
+ }
+ }
+
+ return response;
+});
diff --git a/js/testapps/anthropic/src/stable/pdf.ts b/js/testapps/anthropic/src/stable/pdf.ts
new file mode 100644
index 0000000000..8953dff696
--- /dev/null
+++ b/js/testapps/anthropic/src/stable/pdf.ts
@@ -0,0 +1,122 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { anthropic } from '@genkit-ai/anthropic';
+import * as fs from 'fs';
+import { genkit } from 'genkit';
+import * as path from 'path';
+
+const ai = genkit({
+ plugins: [anthropic()],
+});
+
+/**
+ * This flow demonstrates PDF document processing with Claude using base64 encoding.
+ * The PDF is read from the source directory and sent as a base64 data URL.
+ */
+ai.defineFlow('stable-pdf-base64', async () => {
+ // Read PDF file from the same directory as this source file
+ const pdfPath = path.join(__dirname, 'attention-first-page.pdf');
+ const pdfBuffer = fs.readFileSync(pdfPath);
+ const pdfBase64 = pdfBuffer.toString('base64');
+
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ text: 'What are the key findings or main points in this document?',
+ },
+ {
+ media: {
+ url: `data:application/pdf;base64,${pdfBase64}`,
+ contentType: 'application/pdf',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ return text;
+});
+
+/**
+ * This flow demonstrates PDF document processing with a URL reference.
+ * Note: This requires the PDF to be hosted at a publicly accessible URL.
+ */
+ai.defineFlow('stable-pdf-url', async () => {
+ // Example: Using a publicly hosted PDF URL
+ // In a real application, you would use your own hosted PDF
+ const pdfUrl =
+ 'https://assets.anthropic.com/m/1cd9d098ac3e6467/original/Claude-3-Model-Card-October-Addendum.pdf';
+
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ text: 'Summarize the key points from this document.',
+ },
+ {
+ media: {
+ url: pdfUrl,
+ contentType: 'application/pdf',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ return text;
+});
+
+/**
+ * This flow demonstrates analyzing specific aspects of a PDF document.
+ * Claude can understand both text and visual elements (charts, tables, images) in PDFs.
+ */
+ai.defineFlow('stable-pdf-analysis', async () => {
+ const pdfPath = path.join(__dirname, 'attention-first-page.pdf');
+ const pdfBuffer = fs.readFileSync(pdfPath);
+ const pdfBase64 = pdfBuffer.toString('base64');
+
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ text: 'Analyze this document and provide:\n1. The main topic or subject\n2. Any key technical concepts mentioned\n3. Any visual elements (charts, tables, diagrams) if present',
+ },
+ {
+ media: {
+ url: `data:application/pdf;base64,${pdfBase64}`,
+ contentType: 'application/pdf',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ return text;
+});
diff --git a/js/testapps/anthropic/src/stable/text-plain.ts b/js/testapps/anthropic/src/stable/text-plain.ts
new file mode 100644
index 0000000000..0b290d53e6
--- /dev/null
+++ b/js/testapps/anthropic/src/stable/text-plain.ts
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { anthropic } from '@genkit-ai/anthropic';
+import { genkit } from 'genkit';
+
+const ai = genkit({
+ plugins: [anthropic()],
+});
+
+/**
+ * This flow demonstrates the error that occurs when trying to use text/plain
+ * files as media. The plugin will throw a helpful error message guiding users
+ * to use text content instead.
+ *
+ * Error message: "Unsupported media type: text/plain. Text files should be sent
+ * as text content in the message, not as media. For example, use { text: '...' }
+ * instead of { media: { url: '...', contentType: 'text/plain' } }"
+ */
+ai.defineFlow('stable-text-plain-error', async () => {
+ try {
+ await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ media: {
+ url: 'data:text/plain;base64,SGVsbG8gV29ybGQ=',
+ contentType: 'text/plain',
+ },
+ },
+ ],
+ },
+ ],
+ });
+ return 'Unexpected: Should have thrown an error';
+ } catch (error: any) {
+ return {
+ error: error.message,
+ note: 'This demonstrates the helpful error message for text/plain files',
+ };
+ }
+});
+
+/**
+ * This flow demonstrates the correct way to send text content.
+ * Instead of using media with text/plain, use the text field directly.
+ */
+ai.defineFlow('stable-text-plain-correct', async () => {
+ // Read the text content (in a real app, you'd read from a file)
+ const textContent = 'Hello World\n\nThis is a text file content.';
+
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ text: `Please summarize this text file content:\n\n${textContent}`,
+ },
+ ],
+ },
+ ],
+ });
+
+ return text;
+});
diff --git a/js/testapps/anthropic/src/stable/webp.ts b/js/testapps/anthropic/src/stable/webp.ts
new file mode 100644
index 0000000000..f8a861024b
--- /dev/null
+++ b/js/testapps/anthropic/src/stable/webp.ts
@@ -0,0 +1,95 @@
+/**
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { anthropic } from '@genkit-ai/anthropic';
+import { genkit } from 'genkit';
+
+const ai = genkit({
+ plugins: [anthropic()],
+});
+
+/**
+ * This flow demonstrates WEBP image handling with matching contentType.
+ * Both the data URL and the contentType field specify image/webp.
+ */
+ai.defineFlow('stable-webp-matching', async () => {
+ // Minimal valid WEBP image (1x1 pixel, transparent)
+ // In a real app, you'd load an actual WEBP image file
+ const webpImageData =
+ 'data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAwA0JaQAA3AA/vuUAAA=';
+
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ { text: 'Describe this image:' },
+ {
+ media: {
+ url: webpImageData,
+ contentType: 'image/webp',
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ return text;
+});
+
+/**
+ * This flow demonstrates the fix for WEBP images with mismatched contentType.
+ * Even if contentType says 'image/png', the plugin will use 'image/webp' from
+ * the data URL, preventing API validation errors.
+ *
+ * This fix ensures that the media_type sent to Anthropic matches the actual
+ * image data, which is critical for WEBP images that were previously causing
+ * "Image does not match the provided media type" errors.
+ */
+ai.defineFlow('stable-webp-mismatched', async () => {
+ // Minimal valid WEBP image (1x1 pixel, transparent)
+ const webpImageData =
+ 'data:image/webp;base64,UklGRiQAAABXRUJQVlA4IBgAAAAwAQCdASoBAAEAAwA0JaQAA3AA/vuUAAA=';
+
+ const { text } = await ai.generate({
+ model: anthropic.model('claude-sonnet-4-5'),
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ text: 'Describe this image (note: contentType is wrong but data URL is correct):',
+ },
+ {
+ media: {
+ // Data URL says WEBP, but contentType says PNG
+ // The plugin will use WEBP from the data URL (the fix)
+ url: webpImageData,
+ contentType: 'image/png', // This mismatch is handled correctly
+ },
+ },
+ ],
+ },
+ ],
+ });
+
+ return {
+ result: text,
+ note: 'The plugin correctly used image/webp from the data URL, not image/png from contentType',
+ };
+});
diff --git a/js/testapps/anthropic/tsconfig.json b/js/testapps/anthropic/tsconfig.json
new file mode 100644
index 0000000000..efbb566bf7
--- /dev/null
+++ b/js/testapps/anthropic/tsconfig.json
@@ -0,0 +1,14 @@
+{
+ "compileOnSave": true,
+ "include": ["src"],
+ "compilerOptions": {
+ "module": "commonjs",
+ "noImplicitReturns": true,
+ "outDir": "lib",
+ "sourceMap": true,
+ "strict": true,
+ "target": "es2017",
+ "skipLibCheck": true,
+ "esModuleInterop": true
+ }
+}