From c427bd1d07faec643ed9c6d1eea429932946057d Mon Sep 17 00:00:00 2001 From: Ashish Shubham Date: Fri, 17 Oct 2025 13:00:22 -0700 Subject: [PATCH 1/2] Handle OpenAI API MCP bug --- package-lock.json | 468 +++++- package.json | 6 +- src/cloudflare-utils.ts | 18 + worker-configuration.d.ts | 3346 +++++++++++++++++++++++++++++++++---- 4 files changed, 3411 insertions(+), 427 deletions(-) diff --git a/package-lock.json b/package-lock.json index f43ff1b..54a8da2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,9 +12,9 @@ "@cloudflare/workers-oauth-provider": "^0.0.5", "@hono/zod-validator": "^0.7.2", "@microlabs/otel-cf-workers": "^1.0.0-rc.52", - "@modelcontextprotocol/sdk": "^1.18.0", + "@modelcontextprotocol/sdk": "^1.20.1", "@thoughtspot/rest-api-sdk": "^2.13.1", - "agents": "^0.1.3", + "agents": "^0.2.14", "hono": "^4.9.7", "rxjs": "^7.8.2", "yaml": "^2.7.1", @@ -41,26 +41,43 @@ "typescript": "^5.8.3", "vitest": "^3.1.2", "workers-mcp": "^0.0.13", - "wrangler": "^4.20.0" + "wrangler": "^4.43.0" }, "optionalDependencies": { "express": "^5.1.0" } }, "node_modules/@ai-sdk/gateway": { - "version": "1.0.20", - "resolved": "https://registry.npmjs.org/@ai-sdk/gateway/-/gateway-1.0.20.tgz", - "integrity": "sha512-2K0kGnHyLfT1v2+3xXbLqohfWBJ/vmIh1FTWnrvZfvuUuBdOi2DMgnSQzkLvFVLyM8mhOcx+ZwU6IOOsuyOv/w==", + "version": "1.0.39", + "resolved": "https://registry.npmjs.org/@ai-sdk/gateway/-/gateway-1.0.39.tgz", + "integrity": "sha512-ijYCKG2sbn2RBVfIgaXNXvzHAf2HpFXxQODtjMI+T7Z4CLryflytchsZZ9qrGtsjiQVopKOV6m6kj4lq5fnbsg==", "license": "Apache-2.0", "dependencies": { "@ai-sdk/provider": "2.0.0", - "@ai-sdk/provider-utils": "3.0.8" + "@ai-sdk/provider-utils": "3.0.12", + "@vercel/oidc": "3.0.2" }, "engines": { "node": ">=18" }, "peerDependencies": { - "zod": "^3.25.76 || ^4" + "zod": "^3.25.76 || ^4.1.8" + } + }, + "node_modules/@ai-sdk/openai": { + "version": "2.0.48", + "resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-2.0.48.tgz", + "integrity": "sha512-dIGOVtHaScTNIQzxkE4I8T5PpoutFWxonR/awdRz+5sCpoO7V2kVL44+X6piJbQIMdFYUK/h+HTX3+BjTbRHmw==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "2.0.0", + "@ai-sdk/provider-utils": "3.0.12" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.25.76 || ^4.1.8" } }, "node_modules/@ai-sdk/provider": { @@ -76,9 +93,9 @@ } }, "node_modules/@ai-sdk/provider-utils": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-3.0.8.tgz", - "integrity": "sha512-cDj1iigu7MW2tgAQeBzOiLhjHOUM9vENsgh4oAVitek0d//WdgfPCsKO3euP7m7LyO/j9a1vr/So+BGNdpFXYw==", + "version": "3.0.12", + "resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-3.0.12.tgz", + "integrity": "sha512-ZtbdvYxdMoria+2SlNarEk6Hlgyf+zzcznlD55EAl+7VZvJaSg2sqPvwArY7L6TfDEDJsnCq0fdhBSkYo0Xqdg==", "license": "Apache-2.0", "dependencies": { "@ai-sdk/provider": "2.0.0", @@ -89,7 +106,7 @@ "node": ">=18" }, "peerDependencies": { - "zod": "^3.25.76 || ^4" + "zod": "^3.25.76 || ^4.1.8" } }, "node_modules/@ampproject/remapping": { @@ -117,6 +134,23 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@apidevtools/json-schema-ref-parser": { + "version": "11.9.3", + "resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-11.9.3.tgz", + "integrity": "sha512-60vepv88RwcJtSHrD6MjIL6Ta3SOYbgfnkHb+ppAVK+o9mXprRtulx7VlRl3lN3bbvysAfCS7WMVfhUYemB0IQ==", + "license": "MIT", + "dependencies": { + "@jsdevtools/ono": "^7.1.3", + "@types/json-schema": "^7.0.15", + "js-yaml": "^4.1.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/philsturgeon" + } + }, "node_modules/@babel/code-frame": { "version": "7.27.1", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", @@ -645,6 +679,48 @@ "vitest": "2.0.x - 3.2.x" } }, + "node_modules/@cloudflare/vitest-pool-workers/node_modules/path-to-regexp": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@cloudflare/vitest-pool-workers/node_modules/wrangler": { + "version": "4.35.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-4.35.0.tgz", + "integrity": "sha512-HbyXtbrh4Fi3mU8ussY85tVdQ74qpVS1vctUgaPc+bPrXBTqfDLkZ6VRtHAVF/eBhz4SFmhJtCQpN1caY2Ak8A==", + "dev": true, + "license": "MIT OR Apache-2.0", + "dependencies": { + "@cloudflare/kv-asset-handler": "0.4.0", + "@cloudflare/unenv-preset": "2.7.3", + "blake3-wasm": "2.1.5", + "esbuild": "0.25.4", + "miniflare": "4.20250906.0", + "path-to-regexp": "6.3.0", + "unenv": "2.0.0-rc.21", + "workerd": "1.20250906.0" + }, + "bin": { + "wrangler": "bin/wrangler.js", + "wrangler2": "bin/wrangler.js" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@cloudflare/workers-types": "^4.20250906.0" + }, + "peerDependenciesMeta": { + "@cloudflare/workers-types": { + "optional": true + } + } + }, "node_modules/@cloudflare/workerd-darwin-64": { "version": "1.20250906.0", "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20250906.0.tgz", @@ -740,9 +816,9 @@ } }, "node_modules/@cloudflare/workers-types": { - "version": "4.20250913.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20250913.0.tgz", - "integrity": "sha512-JjrYEvRn7cyALxwoFTw3XChaQneHSJOXqz2t5iKEpNzAnC2iPQU75rtTK/gw03Jjy4SHY5aEBh/uqQePtonZlA==", + "version": "4.20251011.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20251011.0.tgz", + "integrity": "sha512-gQpih+pbq3sP4uXltUeCSbPgZxTNp2gQd8639SaIbQMwgA6oJNHLhIART1fWy6DQACngiRzDVULA2x0ohmkGTQ==", "license": "MIT OR Apache-2.0" }, "node_modules/@cspotcode/source-map-support": { @@ -1674,6 +1750,12 @@ "@jridgewell/sourcemap-codec": "^1.4.10" } }, + "node_modules/@jsdevtools/ono": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/@jsdevtools/ono/-/ono-7.1.3.tgz", + "integrity": "sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==", + "license": "MIT" + }, "node_modules/@jsdoc/salty": { "version": "0.2.9", "resolved": "https://registry.npmjs.org/@jsdoc/salty/-/salty-0.2.9.tgz", @@ -1705,9 +1787,9 @@ } }, "node_modules/@modelcontextprotocol/sdk": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.18.0.tgz", - "integrity": "sha512-JvKyB6YwS3quM+88JPR0axeRgvdDu3Pv6mdZUy+w4qVkCzGgumb9bXG/TmtDRQv+671yaofVfXSQmFLlWU5qPQ==", + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.20.1.tgz", + "integrity": "sha512-j/P+yuxXfgxb+mW7OEoRCM3G47zCTDqUPivJo/VzpjbG8I9csTXtOprCf5FfOfHK4whOJny0aHuBEON+kS7CCA==", "license": "MIT", "dependencies": { "ajv": "^6.12.6", @@ -2611,12 +2693,24 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "license": "MIT" + }, "node_modules/@types/linkify-it": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-5.0.0.tgz", "integrity": "sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q==", "dev": true }, + "node_modules/@types/lodash": { + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==", + "license": "MIT" + }, "node_modules/@types/markdown-it": { "version": "14.1.2", "resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-14.1.2.tgz", @@ -2703,6 +2797,15 @@ "@types/send": "*" } }, + "node_modules/@vercel/oidc": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@vercel/oidc/-/oidc-3.0.2.tgz", + "integrity": "sha512-JekxQ0RApo4gS4un/iMGsIL1/k4KUBe3HmnGcDvzHuFBdQdudEJgTqcsJC7y6Ul4Yw5CeykgvQbX2XeEJd0+DA==", + "license": "Apache-2.0", + "engines": { + "node": ">= 20" + } + }, "node_modules/@vitest/coverage-istanbul": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/@vitest/coverage-istanbul/-/coverage-istanbul-3.1.2.tgz", @@ -2910,40 +3013,54 @@ } }, "node_modules/agents": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/agents/-/agents-0.1.3.tgz", - "integrity": "sha512-n7H7GtdAvQ+VMtninCnS8uA3Uy7ypZcqf0ogKFFTSksivXVhUKfb5jg/auCfgr2wvJM6vZ8EUWX3j6aGVAbYJA==", + "version": "0.2.14", + "resolved": "https://registry.npmjs.org/agents/-/agents-0.2.14.tgz", + "integrity": "sha512-DjMZomVmB11DCBglkeVyeHhb9FpdEfdtXT6+ZPpda7HT0yQxz1zDu6scKSpyodTu1sS7O3tA7xo4F1mvW+7M+Q==", "license": "MIT", "dependencies": { - "@modelcontextprotocol/sdk": "^1.18.0", - "ai": "5.0.39", + "@ai-sdk/openai": "2.0.48", + "@modelcontextprotocol/sdk": "^1.20.0", + "ai": "5.0.68", "cron-schedule": "^5.0.4", + "json-schema": "^0.4.0", + "json-schema-to-typescript": "^15.0.4", "mimetext": "^3.0.27", - "nanoid": "^5.1.5", - "partyserver": "^0.0.73", - "partysocket": "1.1.5", - "zod": "^3.25.76" + "nanoid": "^5.1.6", + "partyserver": "^0.0.75", + "partysocket": "1.1.6", + "zod": "^3.25.76", + "zod-to-ts": "^1.2.0" }, "peerDependencies": { - "react": "*" + "react": "*", + "viem": ">=2.0.0", + "x402": "^0.6.5" + }, + "peerDependenciesMeta": { + "viem": { + "optional": true + }, + "x402": { + "optional": true + } } }, "node_modules/ai": { - "version": "5.0.39", - "resolved": "https://registry.npmjs.org/ai/-/ai-5.0.39.tgz", - "integrity": "sha512-1AOjTHY8MUY4T/X/I+otGTbvKmMQCCGWffuVDyQ21l/2Vv/QoLZcw+ZZHVvp+wvQcPOsfXjURGSFZtin7rnghA==", + "version": "5.0.68", + "resolved": "https://registry.npmjs.org/ai/-/ai-5.0.68.tgz", + "integrity": "sha512-SB6r+4TkKVlSg2ozGBSfuf6Is5hrcX/bpGBzOoyHIN3b4ILGhaly0IHEvP8+3GGIHXqtkPVEUmR6V05jKdjNlg==", "license": "Apache-2.0", "dependencies": { - "@ai-sdk/gateway": "1.0.20", + "@ai-sdk/gateway": "1.0.39", "@ai-sdk/provider": "2.0.0", - "@ai-sdk/provider-utils": "3.0.8", + "@ai-sdk/provider-utils": "3.0.12", "@opentelemetry/api": "1.9.0" }, "engines": { "node": ">=18" }, "peerDependencies": { - "zod": "^3.25.76 || ^4" + "zod": "^3.25.76 || ^4.1.8" } }, "node_modules/ajv": { @@ -2991,8 +3108,7 @@ "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/array-back": { "version": "6.2.2", @@ -4254,7 +4370,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -4273,7 +4388,6 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, "dependencies": { "is-extglob": "^2.1.1" }, @@ -4411,6 +4525,18 @@ "dev": true, "license": "MIT" }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, "node_modules/js2xmlparser": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-4.0.2.tgz", @@ -4494,6 +4620,29 @@ "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", "license": "(AFL-2.1 OR BSD-3-Clause)" }, + "node_modules/json-schema-to-typescript": { + "version": "15.0.4", + "resolved": "https://registry.npmjs.org/json-schema-to-typescript/-/json-schema-to-typescript-15.0.4.tgz", + "integrity": "sha512-Su9oK8DR4xCmDsLlyvadkXzX6+GGXJpbhwoLtOGArAG61dvbW4YQmSEno2y66ahpIdmLMg6YUf/QHLgiwvkrHQ==", + "license": "MIT", + "dependencies": { + "@apidevtools/json-schema-ref-parser": "^11.5.5", + "@types/json-schema": "^7.0.15", + "@types/lodash": "^4.17.7", + "is-glob": "^4.0.3", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "minimist": "^1.2.8", + "prettier": "^3.2.5", + "tinyglobby": "^0.2.9" + }, + "bin": { + "json2ts": "dist/src/cli.js" + }, + "engines": { + "node": ">=16.0.0" + } + }, "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", @@ -4568,8 +4717,7 @@ "node_modules/lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, "node_modules/long": { "version": "5.3.2", @@ -4858,6 +5006,15 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/minipass": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", @@ -4886,9 +5043,9 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/nanoid": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.5.tgz", - "integrity": "sha512-Ir/+ZpE9fDsNH0hQ3C68uyThDXzYcim2EqcZ8zn8Chtt1iylPT9xXJB0kPCnqzgcEGikO9RxSrh63MsmVCU7Fw==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.1.6.tgz", + "integrity": "sha512-c7+7RQ+dMB5dPwwCp4ee1/iV/q2P6aK1mTZcfr1BTuVlyW9hJYiMPybJCcnBlQtuSmTIWNeazm/zqNoZSSElBg==", "funding": [ { "type": "github", @@ -5065,21 +5222,21 @@ } }, "node_modules/partyserver": { - "version": "0.0.73", - "resolved": "https://registry.npmjs.org/partyserver/-/partyserver-0.0.73.tgz", - "integrity": "sha512-SSnlX+abSbMYM4qVndqoDdBbqD8W8uDUZOS4P7dyDlf8FNaRKryNnlU3gdnUZnMVmAkXWl7OJs5jdt932dCF5Q==", + "version": "0.0.75", + "resolved": "https://registry.npmjs.org/partyserver/-/partyserver-0.0.75.tgz", + "integrity": "sha512-i/18vvdxuGjx+rpQ+fDdExlvQoRb7EfTF+6b+kA2ILEpHemtpLWV8NdgDrOPEklRNdCc/4WlzDtYn05d17aZAQ==", "license": "ISC", "dependencies": { - "nanoid": "^5.1.5" + "nanoid": "^5.1.6" }, "peerDependencies": { "@cloudflare/workers-types": "^4.20240729.0" } }, "node_modules/partysocket": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/partysocket/-/partysocket-1.1.5.tgz", - "integrity": "sha512-8uw9foq9bij4sKLCtTSHvyqMrMTQ5FJjrHc7BjoM2s95Vu7xYCN63ABpI7OZHC7ZMP5xaom/A+SsoFPXmTV6ZQ==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/partysocket/-/partysocket-1.1.6.tgz", + "integrity": "sha512-LkEk8N9hMDDsDT0iDK0zuwUDFVrVMUXFXCeN3850Ng8wtjPqPBeJlwdeY6ROlJSEh3tPoTTasXoSBYH76y118w==", "license": "MIT", "dependencies": { "event-target-polyfill": "^0.0.4" @@ -5208,6 +5365,21 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/prettier": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, "node_modules/protobufjs": { "version": "7.5.3", "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.3.tgz", @@ -5897,7 +6069,6 @@ "version": "0.2.13", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", - "dev": true, "license": "MIT", "dependencies": { "fdir": "^6.4.4", @@ -5914,7 +6085,6 @@ "version": "6.4.4", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", - "dev": true, "license": "MIT", "peerDependencies": { "picomatch": "^3 || ^4" @@ -5929,7 +6099,6 @@ "version": "4.0.2", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", - "dev": true, "license": "MIT", "engines": { "node": ">=12" @@ -6081,7 +6250,6 @@ "version": "5.8.3", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", - "dev": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -6520,20 +6688,20 @@ } }, "node_modules/wrangler": { - "version": "4.35.0", - "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-4.35.0.tgz", - "integrity": "sha512-HbyXtbrh4Fi3mU8ussY85tVdQ74qpVS1vctUgaPc+bPrXBTqfDLkZ6VRtHAVF/eBhz4SFmhJtCQpN1caY2Ak8A==", + "version": "4.43.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-4.43.0.tgz", + "integrity": "sha512-IBNqXlYHSUSCNNWj/tQN4hFiQy94l7fTxEnJWETXyW69+cjUyjQ7MfeoId3vIV9KBgY8y5M5uf2XulU95OikJg==", "dev": true, "license": "MIT OR Apache-2.0", "dependencies": { "@cloudflare/kv-asset-handler": "0.4.0", - "@cloudflare/unenv-preset": "2.7.3", + "@cloudflare/unenv-preset": "2.7.7", "blake3-wasm": "2.1.5", "esbuild": "0.25.4", - "miniflare": "4.20250906.0", + "miniflare": "4.20251008.0", "path-to-regexp": "6.3.0", "unenv": "2.0.0-rc.21", - "workerd": "1.20250906.0" + "workerd": "1.20251008.0" }, "bin": { "wrangler": "bin/wrangler.js", @@ -6546,7 +6714,7 @@ "fsevents": "~2.3.2" }, "peerDependencies": { - "@cloudflare/workers-types": "^4.20250906.0" + "@cloudflare/workers-types": "^4.20251008.0" }, "peerDependenciesMeta": { "@cloudflare/workers-types": { @@ -6554,6 +6722,134 @@ } } }, + "node_modules/wrangler/node_modules/@cloudflare/unenv-preset": { + "version": "2.7.7", + "resolved": "https://registry.npmjs.org/@cloudflare/unenv-preset/-/unenv-preset-2.7.7.tgz", + "integrity": "sha512-HtZuh166y0Olbj9bqqySckz0Rw9uHjggJeoGbDx5x+sgezBXlxO6tQSig2RZw5tgObF8mWI8zaPvQMkQZtAODw==", + "dev": true, + "license": "MIT OR Apache-2.0", + "peerDependencies": { + "unenv": "2.0.0-rc.21", + "workerd": "^1.20250927.0" + }, + "peerDependenciesMeta": { + "workerd": { + "optional": true + } + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-darwin-64": { + "version": "1.20251008.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20251008.0.tgz", + "integrity": "sha512-yph0H+8mMOK5Z9oDwjb8rI96oTVt4no5lZ43aorcbzsWG9VUIaXSXlBBoB3von6p4YCRW+J3n36fBM9XZ6TLaA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-darwin-arm64": { + "version": "1.20251008.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20251008.0.tgz", + "integrity": "sha512-Yc4lMGSbM4AEtYRpyDpmk77MsHb6X2BSwJgMgGsLVPmckM7ZHivZkJChfcNQjZ/MGR6nkhYc4iF6TcVS+UMEVw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-linux-64": { + "version": "1.20251008.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20251008.0.tgz", + "integrity": "sha512-AjoQnylw4/5G6SmfhZRsli7EuIK7ZMhmbxtU0jkpciTlVV8H01OsFOgS1d8zaTXMfkWamEfMouy8oH/L7B9YcQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-linux-arm64": { + "version": "1.20251008.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20251008.0.tgz", + "integrity": "sha512-hRy9yyvzVq1HsqHZUmFkAr0C8JGjAD/PeeVEGCKL3jln3M9sNCKIrbDXiL+efe+EwajJNNlDxpO+s30uVWVaRg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-windows-64": { + "version": "1.20251008.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20251008.0.tgz", + "integrity": "sha512-Gm0RR+ehfNMsScn2pUcn3N9PDUpy7FyvV9ecHEyclKttvztyFOcmsF14bxEaSVv7iM4TxWEBn1rclmYHxDM4ow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/miniflare": { + "version": "4.20251008.0", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-4.20251008.0.tgz", + "integrity": "sha512-sKCNYNzXG6l8qg0Oo7y8WcDKcpbgw0qwZsxNpdZilFTR4EavRow2TlcwuPSVN99jqAjhz0M4VXvTdSGdtJ2VfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "0.8.1", + "acorn": "8.14.0", + "acorn-walk": "8.3.2", + "exit-hook": "2.2.1", + "glob-to-regexp": "0.4.1", + "sharp": "^0.33.5", + "stoppable": "1.1.0", + "undici": "7.14.0", + "workerd": "1.20251008.0", + "ws": "8.18.0", + "youch": "4.1.0-beta.10", + "zod": "3.22.3" + }, + "bin": { + "miniflare": "bootstrap.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/wrangler/node_modules/path-to-regexp": { "version": "6.3.0", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", @@ -6561,6 +6857,47 @@ "dev": true, "license": "MIT" }, + "node_modules/wrangler/node_modules/undici": { + "version": "7.14.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.14.0.tgz", + "integrity": "sha512-Vqs8HTzjpQXZeXdpsfChQTlafcMQaaIwnGwLam1wudSSjlJeQ3bw1j+TLPePgrCnCpUXx7Ba5Pdpf5OBih62NQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, + "node_modules/wrangler/node_modules/workerd": { + "version": "1.20251008.0", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20251008.0.tgz", + "integrity": "sha512-HwaJmXO3M1r4S8x2ea2vy8Rw/y/38HRQuK/gNDRQ7w9cJXn6xSl1sIIqKCffULSUjul3wV3I3Nd/GfbmsRReEA==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "bin": { + "workerd": "bin/workerd" + }, + "engines": { + "node": ">=16" + }, + "optionalDependencies": { + "@cloudflare/workerd-darwin-64": "1.20251008.0", + "@cloudflare/workerd-darwin-arm64": "1.20251008.0", + "@cloudflare/workerd-linux-64": "1.20251008.0", + "@cloudflare/workerd-linux-arm64": "1.20251008.0", + "@cloudflare/workerd-windows-64": "1.20251008.0" + } + }, + "node_modules/wrangler/node_modules/zod": { + "version": "3.22.3", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.22.3.tgz", + "integrity": "sha512-EjIevzuJRiRPbVH4mGc8nApb/lVLKVpmUhAaR5R5doKGfAnGJ6Gr3CViAVjP+4FWSxCsybeWQdcgCtbX+7oZug==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, "node_modules/wrap-ansi": { "version": "8.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", @@ -6763,6 +7100,15 @@ "peerDependencies": { "zod": "^3.24.1" } + }, + "node_modules/zod-to-ts": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/zod-to-ts/-/zod-to-ts-1.2.0.tgz", + "integrity": "sha512-x30XE43V+InwGpvTySRNz9kB7qFU8DlyEy7BsSTCHPH1R0QasMmHWZDCzYm6bVXtj/9NNJAZF3jW8rzFvH5OFA==", + "peerDependencies": { + "typescript": "^4.9.4 || ^5.0.2", + "zod": "^3" + } } } } diff --git a/package.json b/package.json index fd455c4..4d03b61 100644 --- a/package.json +++ b/package.json @@ -45,15 +45,15 @@ "typescript": "^5.8.3", "vitest": "^3.1.2", "workers-mcp": "^0.0.13", - "wrangler": "^4.20.0" + "wrangler": "^4.43.0" }, "dependencies": { "@cloudflare/workers-oauth-provider": "^0.0.5", "@hono/zod-validator": "^0.7.2", "@microlabs/otel-cf-workers": "^1.0.0-rc.52", - "@modelcontextprotocol/sdk": "^1.18.0", + "@modelcontextprotocol/sdk": "^1.20.1", "@thoughtspot/rest-api-sdk": "^2.13.1", - "agents": "^0.1.3", + "agents": "^0.2.14", "hono": "^4.9.7", "rxjs": "^7.8.2", "yaml": "^2.7.1", diff --git a/src/cloudflare-utils.ts b/src/cloudflare-utils.ts index 52c3314..bc360bc 100644 --- a/src/cloudflare-utils.ts +++ b/src/cloudflare-utils.ts @@ -18,6 +18,24 @@ export function instrumentedMCPServer(MCPServer: new (c async init() { await this.server.init(); } + + public static serve(path: string) { + const server = super.serve(path, { + corsOptions: { + headers: "Content-Type, Accept, mcp-session-id, mcp-protocol-version, Authorization, x-ts-host" + } + }); + const serverFetch = server.fetch; + server.fetch = async (request: Request, env: any, ctx: ExecutionContext) => { + // Due to https://community.openai.com/t/the-responses-api-terminates-a-session-too-early/1312539/16 + // We need to ignore DELETE requests from OpenAI MCP clients. As the DELETE makes the session terminate too early. + if (request.method === "DELETE" && request.headers.get("user-agent")?.includes("openai-mcp")) { + return new Response(null, { status: 403 }); + } + return serverFetch(request, env, ctx); + } + return server; + } } return instrumentDO(Agent, config) as unknown as typeof Agent; diff --git a/worker-configuration.d.ts b/worker-configuration.d.ts index 97cd150..4e97ea3 100644 --- a/worker-configuration.d.ts +++ b/worker-configuration.d.ts @@ -1,7 +1,11 @@ /* eslint-disable */ -// Generated by Wrangler by running `wrangler types` (hash: 795c945834fb09a44599f2298322698f) -// Runtime types generated with workerd@1.20250417.0 2025-04-17 nodejs_compat +// Generated by Wrangler by running `wrangler types` (hash: dd8f74b737332ef00b9f3db968b07266) +// Runtime types generated with workerd@1.20251008.0 2025-04-17 nodejs_compat declare namespace Cloudflare { + interface GlobalProps { + mainModule: typeof import("./src/index"); + durableNamespaces: "ThoughtSpotMCP" | "ThoughtSpotOpenAIDeepResearchMCP"; + } interface Env { OAUTH_KV: KVNamespace; MCP_OBJECT: DurableObjectNamespace; @@ -92,7 +96,7 @@ interface Console { clear(): void; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/count_static) */ count(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countreset_static) */ + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countReset_static) */ countReset(label?: string): void; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/debug_static) */ debug(...data: any[]): void; @@ -104,9 +108,9 @@ interface Console { error(...data: any[]): void; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/group_static) */ group(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupcollapsed_static) */ + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupCollapsed_static) */ groupCollapsed(...data: any[]): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupend_static) */ + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupEnd_static) */ groupEnd(): void; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/info_static) */ info(...data: any[]): void; @@ -116,9 +120,9 @@ interface Console { table(tabularData?: any, properties?: string[]): void; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/time_static) */ time(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeend_static) */ + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeEnd_static) */ timeEnd(label?: string): void; - /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timelog_static) */ + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeLog_static) */ timeLog(label?: string, ...data: any[]): void; timeStamp(label?: string): void; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/trace_static) */ @@ -293,25 +297,25 @@ declare function dispatchEvent(event: WorkerGlobalScopeEventMap[keyof WorkerGlob declare function btoa(data: string): string; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/atob) */ declare function atob(data: string): string; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/setTimeout) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */ declare function setTimeout(callback: (...args: any[]) => void, msDelay?: number): number; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/setTimeout) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */ declare function setTimeout(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/clearTimeout) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearTimeout) */ declare function clearTimeout(timeoutId: number | null): void; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/setInterval) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */ declare function setInterval(callback: (...args: any[]) => void, msDelay?: number): number; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/setInterval) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */ declare function setInterval(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/clearInterval) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearInterval) */ declare function clearInterval(timeoutId: number | null): void; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/queueMicrotask) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/queueMicrotask) */ declare function queueMicrotask(task: Function): void; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/structuredClone) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/structuredClone) */ declare function structuredClone(value: T, options?: StructuredSerializeOptions): T; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/reportError) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/reportError) */ declare function reportError(error: any): void; -/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/fetch) */ +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/fetch) */ declare function fetch(input: RequestInfo | URL, init?: RequestInit): Promise; declare const self: ServiceWorkerGlobalScope; /** @@ -342,15 +346,15 @@ declare const origin: string; declare const navigator: Navigator; interface TestController { } -interface ExecutionContext { +interface ExecutionContext { waitUntil(promise: Promise): void; passThroughOnException(): void; - props: any; + readonly props: Props; } type ExportedHandlerFetchHandler = (request: Request>, env: Env, ctx: ExecutionContext) => Response | Promise; type ExportedHandlerTailHandler = (events: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; type ExportedHandlerTraceHandler = (traces: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; -type ExportedHandlerTailStreamHandler = (event: TailStream.TailEvent, env: Env, ctx: ExecutionContext) => TailStream.TailEventHandlerType | Promise; +type ExportedHandlerTailStreamHandler = (event: TailStream.TailEvent, env: Env, ctx: ExecutionContext) => TailStream.TailEventHandlerType | Promise; type ExportedHandlerScheduledHandler = (controller: ScheduledController, env: Env, ctx: ExecutionContext) => void | Promise; type ExportedHandlerQueueHandler = (batch: MessageBatch, env: Env, ctx: ExecutionContext) => void | Promise; type ExportedHandlerTestHandler = (controller: TestController, env: Env, ctx: ExecutionContext) => void | Promise; @@ -379,18 +383,6 @@ declare abstract class Navigator { readonly userAgent: string; readonly hardwareConcurrency: number; } -/** -* The Workers runtime supports a subset of the Performance API, used to measure timing and performance, -* as well as timing of subrequests and other operations. -* -* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/) -*/ -interface Performance { - /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */ - readonly timeOrigin: number; - /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */ - now(): number; -} interface AlarmInvocationInfo { readonly isRetry: boolean; readonly retryCount: number; @@ -414,14 +406,15 @@ interface DurableObjectId { equals(other: DurableObjectId): boolean; readonly name?: string; } -interface DurableObjectNamespace { +declare abstract class DurableObjectNamespace { newUniqueId(options?: DurableObjectNamespaceNewUniqueIdOptions): DurableObjectId; idFromName(name: string): DurableObjectId; idFromString(id: string): DurableObjectId; get(id: DurableObjectId, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub; + getByName(name: string, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub; jurisdiction(jurisdiction: DurableObjectJurisdiction): DurableObjectNamespace; } -type DurableObjectJurisdiction = "eu" | "fedramp"; +type DurableObjectJurisdiction = "eu" | "fedramp" | "fedramp-high"; interface DurableObjectNamespaceNewUniqueIdOptions { jurisdiction?: DurableObjectJurisdiction; } @@ -429,8 +422,11 @@ type DurableObjectLocationHint = "wnam" | "enam" | "sam" | "weur" | "eeur" | "ap interface DurableObjectNamespaceGetDurableObjectOptions { locationHint?: DurableObjectLocationHint; } -interface DurableObjectState { +interface DurableObjectClass<_T extends Rpc.DurableObjectBranded | undefined = undefined> { +} +interface DurableObjectState { waitUntil(promise: Promise): void; + readonly props: Props; readonly id: DurableObjectId; readonly storage: DurableObjectStorage; container?: Container; @@ -473,6 +469,7 @@ interface DurableObjectStorage { deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise; sync(): Promise; sql: SqlStorage; + kv: SyncKvStorage; transactionSync(closure: () => T): T; getCurrentBookmark(): Promise; getBookmarkForTime(timestamp: number | Date): Promise; @@ -790,6 +787,7 @@ declare class Blob { slice(start?: number, end?: number, type?: string): Blob; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/arrayBuffer) */ arrayBuffer(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/bytes) */ bytes(): Promise; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) */ text(): Promise; @@ -1084,10 +1082,15 @@ interface TextEncoderEncodeIntoResult { */ declare class ErrorEvent extends Event { constructor(type: string, init?: ErrorEventErrorEventInit); + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/filename) */ get filename(): string; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/message) */ get message(): string; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/lineno) */ get lineno(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/colno) */ get colno(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/error) */ get error(): any; } interface ErrorEventErrorEventInit { @@ -1097,6 +1100,47 @@ interface ErrorEventErrorEventInit { colno?: number; error?: any; } +/** + * A message received by a target object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent) + */ +declare class MessageEvent extends Event { + constructor(type: string, initializer: MessageEventInit); + /** + * Returns the data of the message. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/data) + */ + readonly data: any; + /** + * Returns the origin of the message, for server-sent events and cross-document messaging. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/origin) + */ + readonly origin: string | null; + /** + * Returns the last event ID string, for server-sent events. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/lastEventId) + */ + readonly lastEventId: string; + /** + * Returns the WindowProxy of the source window, for cross-document messaging, and the MessagePort being attached, in the connect event fired at SharedWorkerGlobalScope objects. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/source) + */ + readonly source: MessagePort | null; + /** + * Returns the MessagePort array sent with the message, for cross-document messaging and channel messaging. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/ports) + */ + readonly ports: MessagePort[]; +} +interface MessageEventInit { + data: ArrayBuffer | string; +} /** * Provides a way to easily construct a set of key/value pairs representing form fields and their values, which can then be easily sent using the XMLHttpRequest.send() method. It uses the same format a form would use if the encoding type were set to "multipart/form-data". * @@ -1261,6 +1305,7 @@ declare abstract class Body { get bodyUsed(): boolean; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/arrayBuffer) */ arrayBuffer(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bytes) */ bytes(): Promise; /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/text) */ text(): Promise; @@ -1372,7 +1417,11 @@ interface Request> e * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/integrity) */ integrity: string; - /* Returns a boolean indicating whether or not request can outlive the global in which it was created. */ + /** + * Returns a boolean indicating whether or not request can outlive the global in which it was created. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/keepalive) + */ keepalive: boolean; /** * Returns the cache mode associated with request, which is a string indicating how the request will interact with the browser's cache when fetching. @@ -1400,7 +1449,7 @@ interface RequestInit { signal?: (AbortSignal | null); encodeResponseBody?: "automatic" | "manual"; } -type Service = Fetcher; +type Service Rpc.WorkerEntrypointBranded) | Rpc.WorkerEntrypointBranded | ExportedHandler | undefined = undefined> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded ? Fetcher> : T extends Rpc.WorkerEntrypointBranded ? Fetcher : T extends Exclude ? never : Fetcher; type Fetcher = (T extends Rpc.EntrypointBranded ? Rpc.Provider : unknown) & { fetch(input: RequestInfo | URL, init?: RequestInit): Promise; connect(address: SocketAddress | string, options?: SocketOptions): Socket; @@ -1572,6 +1621,7 @@ interface R2ObjectBody extends R2Object { get body(): ReadableStream; get bodyUsed(): boolean; arrayBuffer(): Promise; + bytes(): Promise; text(): Promise; json(): Promise; blob(): Promise; @@ -1991,6 +2041,7 @@ interface TraceItem { readonly scriptVersion?: ScriptVersion; readonly dispatchNamespace?: string; readonly scriptTags?: string[]; + readonly durableObjectId?: string; readonly outcome: string; readonly executionModel: string; readonly truncated: boolean; @@ -2272,23 +2323,6 @@ interface CloseEventInit { reason?: string; wasClean?: boolean; } -/** - * A message received by a target object. - * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent) - */ -declare class MessageEvent extends Event { - constructor(type: string, initializer: MessageEventInit); - /** - * Returns the data of the message. - * - * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/data) - */ - readonly data: ArrayBuffer | string; -} -interface MessageEventInit { - data: ArrayBuffer | string; -} type WebSocketEventMap = { close: CloseEvent; message: MessageEvent; @@ -2476,6 +2510,106 @@ interface ContainerStartupOptions { enableInternet: boolean; env?: Record; } +/** + * This Channel Messaging API interface represents one of the two ports of a MessageChannel, allowing messages to be sent from one port and listening out for them arriving at the other. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort) + */ +interface MessagePort extends EventTarget { + /** + * Posts a message through the channel. Objects listed in transfer are transferred, not just cloned, meaning that they are no longer usable on the sending side. + * + * Throws a "DataCloneError" DOMException if transfer contains duplicate objects or port, or if message could not be cloned. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/postMessage) + */ + postMessage(data?: any, options?: (any[] | MessagePortPostMessageOptions)): void; + /** + * Disconnects the port, so that it is no longer active. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/close) + */ + close(): void; + /** + * Begins dispatching messages received on the port. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/start) + */ + start(): void; + get onmessage(): any | null; + set onmessage(value: any | null); +} +interface MessagePortPostMessageOptions { + transfer?: any[]; +} +type LoopbackForExport Rpc.EntrypointBranded) | ExportedHandler | undefined = undefined> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded ? LoopbackServiceStub> : T extends new (...args: any[]) => Rpc.DurableObjectBranded ? LoopbackDurableObjectClass> : T extends ExportedHandler ? LoopbackServiceStub : undefined; +type LoopbackServiceStub = Fetcher & (T extends CloudflareWorkersModule.WorkerEntrypoint ? (opts: { + props?: Props; +}) => Fetcher : (opts: { + props?: any; +}) => Fetcher); +type LoopbackDurableObjectClass = DurableObjectClass & (T extends CloudflareWorkersModule.DurableObject ? (opts: { + props?: Props; +}) => DurableObjectClass : (opts: { + props?: any; +}) => DurableObjectClass); +interface SyncKvStorage { + get(key: string): T | undefined; + list(options?: SyncKvListOptions): Iterable<[ + string, + T + ]>; + put(key: string, value: T): void; + delete(key: string): boolean; +} +interface SyncKvListOptions { + start?: string; + startAfter?: string; + end?: string; + prefix?: string; + reverse?: boolean; + limit?: number; +} +interface WorkerStub { + getEntrypoint(name?: string, options?: WorkerStubEntrypointOptions): Fetcher; +} +interface WorkerStubEntrypointOptions { + props?: any; +} +interface WorkerLoader { + get(name: string, getCode: () => WorkerLoaderWorkerCode | Promise): WorkerStub; +} +interface WorkerLoaderModule { + js?: string; + cjs?: string; + text?: string; + data?: ArrayBuffer; + json?: any; + py?: string; +} +interface WorkerLoaderWorkerCode { + compatibilityDate: string; + compatibilityFlags?: string[]; + allowExperimental?: boolean; + mainModule: string; + modules: Record; + env?: any; + globalOutbound?: (Fetcher | null); + tails?: Fetcher[]; + streamingTails?: Fetcher[]; +} +/** +* The Workers runtime supports a subset of the Performance API, used to measure timing and performance, +* as well as timing of subrequests and other operations. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/) +*/ +declare abstract class Performance { + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */ + get timeOrigin(): number; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */ + now(): number; +} type AiImageClassificationInput = { image: number[]; }; @@ -2530,6 +2664,18 @@ declare abstract class BaseAiImageTextToText { inputs: AiImageTextToTextInput; postProcessedOutputs: AiImageTextToTextOutput; } +type AiMultimodalEmbeddingsInput = { + image: string; + text: string[]; +}; +type AiIMultimodalEmbeddingsOutput = { + data: number[][]; + shape: number[]; +}; +declare abstract class BaseAiMultimodalEmbeddings { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} type AiObjectDetectionInput = { image: number[]; }; @@ -2660,13 +2806,28 @@ type AiTextGenerationInput = { tools?: AiTextGenerationToolInput[] | AiTextGenerationToolLegacyInput[] | (object & NonNullable); functions?: AiTextGenerationFunctionsInput[]; }; +type AiTextGenerationToolLegacyOutput = { + name: string; + arguments: unknown; +}; +type AiTextGenerationToolOutput = { + id: string; + type: "function"; + function: { + name: string; + arguments: string; + }; +}; +type UsageTags = { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; +}; type AiTextGenerationOutput = { response?: string; - tool_calls?: { - name: string; - arguments: unknown; - }[]; -} | ReadableStream; + tool_calls?: AiTextGenerationToolLegacyOutput[] & AiTextGenerationToolOutput[]; + usage?: UsageTags; +}; declare abstract class BaseAiTextGeneration { inputs: AiTextGenerationInput; postProcessedOutputs: AiTextGenerationOutput; @@ -2712,6 +2873,45 @@ declare abstract class BaseAiTranslation { inputs: AiTranslationInput; postProcessedOutputs: AiTranslationOutput; } +type Ai_Cf_Baai_Bge_Base_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Base_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | AsyncResponse; +interface AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Base_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Output; +} type Ai_Cf_Openai_Whisper_Input = string | { /** * An array of integers that represent the audio data constrained to 8-bit unsigned integer values @@ -2741,6 +2941,114 @@ declare abstract class Base_Ai_Cf_Openai_Whisper { inputs: Ai_Cf_Openai_Whisper_Input; postProcessedOutputs: Ai_Cf_Openai_Whisper_Output; } +type Ai_Cf_Meta_M2M100_1_2B_Input = { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; + }[]; +}; +type Ai_Cf_Meta_M2M100_1_2B_Output = { + /** + * The translated text in the target language + */ + translated_text?: string; +} | AsyncResponse; +declare abstract class Base_Ai_Cf_Meta_M2M100_1_2B { + inputs: Ai_Cf_Meta_M2M100_1_2B_Input; + postProcessedOutputs: Ai_Cf_Meta_M2M100_1_2B_Output; +} +type Ai_Cf_Baai_Bge_Small_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Small_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | AsyncResponse; +declare abstract class Base_Ai_Cf_Baai_Bge_Small_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Output; +} +type Ai_Cf_Baai_Bge_Large_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Large_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | AsyncResponse; +declare abstract class Base_Ai_Cf_Baai_Bge_Large_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Output; +} type Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input = string | { /** * The input text prompt for the model to generate a response. @@ -2832,7 +3140,7 @@ interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input { /** * Preprocess the audio with a voice activity detection model. */ - vad_filter?: string; + vad_filter?: boolean; /** * A text prompt to help provide context to the model on the contents of the audio. */ @@ -2922,7 +3230,12 @@ declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo { inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input; postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output; } -type Ai_Cf_Baai_Bge_M3_Input = BGEM3InputQueryAndContexts | BGEM3InputEmbedding; +type Ai_Cf_Baai_Bge_M3_Input = BGEM3InputQueryAndContexts | BGEM3InputEmbedding | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: (BGEM3InputQueryAndContexts1 | BGEM3InputEmbedding1)[]; +}; interface BGEM3InputQueryAndContexts { /** * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts @@ -2949,7 +3262,33 @@ interface BGEM3InputEmbedding { */ truncate_inputs?: boolean; } -type Ai_Cf_Baai_Bge_M3_Output = BGEM3OuputQuery | BGEM3OutputEmbeddingForContexts | BGEM3OuputEmbedding; +interface BGEM3InputQueryAndContexts1 { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface BGEM3InputEmbedding1 { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +type Ai_Cf_Baai_Bge_M3_Output = BGEM3OuputQuery | BGEM3OutputEmbeddingForContexts | BGEM3OuputEmbedding | AsyncResponse; interface BGEM3OuputQuery { response?: { /** @@ -3065,23 +3404,48 @@ interface Messages { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role: string; + role?: string; /** - * The content of the message as a string. + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 */ - content: string; - }[]; - image?: number[] | string; - functions?: { - name: string; - code: string; - }[]; - /** - * A list of tools available for the assistant to use. - */ - tools?: ({ - /** - * The name of the tool. More descriptive the better. + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + image?: number[] | (string & NonNullable); + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. */ name: string; /** @@ -3218,120 +3582,22 @@ type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = { */ name?: string; }[]; -} | ReadableStream; +}; declare abstract class Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct { inputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input; postProcessedOutputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output; } -interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { - /** - * An array of message objects representing the conversation history. - */ - messages: { - /** - * The role of the message sender must alternate between 'user' and 'assistant'. - */ - role: "user" | "assistant"; - /** - * The content of the message as a string. - */ - content: string; - }[]; - /** - * The maximum number of tokens to generate in the response. - */ - max_tokens?: number; - /** - * Controls the randomness of the output; higher values produce more random results. - */ - temperature?: number; - /** - * Dictate the output format of the generated response. - */ - response_format?: { - /** - * Set to json_object to process and output generated text as JSON. - */ - type?: string; - }; -} -interface Ai_Cf_Meta_Llama_Guard_3_8B_Output { - response?: string | { - /** - * Whether the conversation is safe or not. - */ - safe?: boolean; - /** - * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe. - */ - categories?: string[]; - }; - /** - * Usage statistics for the inference request - */ - usage?: { - /** - * Total number of tokens in input - */ - prompt_tokens?: number; - /** - * Total number of tokens in output - */ - completion_tokens?: number; - /** - * Total number of input and output tokens - */ - total_tokens?: number; - }; -} -declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B { - inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input; - postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output; -} -interface Ai_Cf_Baai_Bge_Reranker_Base_Input { - /** - * A query you wish to perform against the provided contexts. - */ - /** - * Number of returned results starting with the best score. - */ - top_k?: number; - /** - * List of provided contexts. Note that the index in this array is important, as the response will refer to it. - */ - contexts: { - /** - * One of the provided context content - */ - text?: string; - }[]; -} -interface Ai_Cf_Baai_Bge_Reranker_Base_Output { - response?: { - /** - * Index of the context in the request - */ - id?: number; - /** - * Score of the context under the index. - */ - score?: number; - }[]; -} -declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base { - inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input; - postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output; -} -type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input = Ai_Cf_Meta_Llama_4_Prompt | Ai_Cf_Meta_Llama_4_Messages; -interface Ai_Cf_Meta_Llama_4_Prompt { +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input = Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt | Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages | AsyncBatch; +interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt { /** * The input text prompt for the model to generate a response. */ prompt: string; /** - * JSON schema that should be fulfilled for the response. + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. */ - guided_json?: object; + lora?: string; + response_format?: JSONMode; /** * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. */ @@ -3373,7 +3639,11 @@ interface Ai_Cf_Meta_Llama_4_Prompt { */ presence_penalty?: number; } -interface Ai_Cf_Meta_Llama_4_Messages { +interface JSONMode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { /** * An array of message objects representing the conversation history. */ @@ -3381,36 +3651,11 @@ interface Ai_Cf_Meta_Llama_4_Messages { /** * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). */ - role?: string; + role: string; /** - * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + * The content of the message as a string. */ - tool_call_id?: string; - content?: string | { - /** - * Type of the content provided - */ - type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }[] | { - /** - * Type of the content provided - */ - type?: string; - text?: string; - image_url?: { - /** - * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted - */ - url?: string; - }; - }; + content: string; }[]; functions?: { name: string; @@ -3458,135 +3703,2231 @@ interface Ai_Cf_Meta_Llama_4_Messages { }; } | { /** - * Specifies the type of tool (e.g., 'function'). + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: JSONMode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface AsyncBatch { + requests?: { + /** + * User-supplied reference. This field will be present in the response as well it can be used to reference the request and response. It's NOT validated to be unique. + */ + external_reference?: string; + /** + * Prompt for the text generation model + */ + prompt?: string; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + response_format?: JSONMode; + }[]; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +} | string | AsyncResponse; +declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast { + inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender must alternate between 'user' and 'assistant'. + */ + role: "user" | "assistant"; + /** + * The content of the message as a string. + */ + content: string; + }[]; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Dictate the output format of the generated response. + */ + response_format?: { + /** + * Set to json_object to process and output generated text as JSON. + */ + type?: string; + }; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Output { + response?: string | { + /** + * Whether the conversation is safe or not. + */ + safe?: boolean; + /** + * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe. + */ + categories?: string[]; + }; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B { + inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Input { + /** + * A query you wish to perform against the provided contexts. + */ + /** + * Number of returned results starting with the best score. + */ + top_k?: number; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Output { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base { + inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input = Qwen2_5_Coder_32B_Instruct_Prompt | Qwen2_5_Coder_32B_Instruct_Messages; +interface Qwen2_5_Coder_32B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: JSONMode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Qwen2_5_Coder_32B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: JSONMode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct { + inputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output; +} +type Ai_Cf_Qwen_Qwq_32B_Input = Qwen_Qwq_32B_Prompt | Qwen_Qwq_32B_Messages; +interface Qwen_Qwq_32B_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Qwen_Qwq_32B_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Qwen_Qwq_32B_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwq_32B { + inputs: Ai_Cf_Qwen_Qwq_32B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwq_32B_Output; +} +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input = Mistral_Small_3_1_24B_Instruct_Prompt | Mistral_Small_3_1_24B_Instruct_Messages; +interface Mistral_Small_3_1_24B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Mistral_Small_3_1_24B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct { + inputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output; +} +type Ai_Cf_Google_Gemma_3_12B_It_Input = Google_Gemma_3_12B_It_Prompt | Google_Gemma_3_12B_It_Messages; +interface Google_Gemma_3_12B_It_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Google_Gemma_3_12B_It_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Google_Gemma_3_12B_It_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It { + inputs: Ai_Cf_Google_Gemma_3_12B_It_Input; + postProcessedOutputs: Ai_Cf_Google_Gemma_3_12B_It_Output; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input = Ai_Cf_Meta_Llama_4_Prompt | Ai_Cf_Meta_Llama_4_Messages | Ai_Cf_Meta_Llama_4_Async_Batch; +interface Ai_Cf_Meta_Llama_4_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: JSONMode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: JSONMode; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Async_Batch { + requests: (Ai_Cf_Meta_Llama_4_Prompt_Inner | Ai_Cf_Meta_Llama_4_Messages_Inner)[]; +} +interface Ai_Cf_Meta_Llama_4_Prompt_Inner { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: JSONMode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Messages_Inner { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: JSONMode; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The tool call id. + */ + id?: string; + /** + * Specifies the type of tool (e.g., 'function'). + */ + type?: string; + /** + * Details of the function tool. + */ + function?: { + /** + * The name of the tool to be called + */ + name?: string; + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + }; + }[]; +}; +declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct { + inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output; +} +interface Ai_Cf_Deepgram_Nova_3_Input { + audio: { + body: object; + contentType: string; + }; + /** + * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param. + */ + custom_topic_mode?: "extended" | "strict"; + /** + * Custom topics you want the model to detect within your input audio or text if present Submit up to 100 + */ + custom_topic?: string; + /** + * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param + */ + custom_intent_mode?: "extended" | "strict"; + /** + * Custom intents you want the model to detect within your input audio if present + */ + custom_intent?: string; + /** + * Identifies and extracts key entities from content in submitted audio + */ + detect_entities?: boolean; + /** + * Identifies the dominant language spoken in submitted audio + */ + detect_language?: boolean; + /** + * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + */ + diarize?: boolean; + /** + * Identify and extract key entities from content in submitted audio + */ + dictation?: boolean; + /** + * Specify the expected encoding of your submitted audio + */ + encoding?: "linear16" | "flac" | "mulaw" | "amr-nb" | "amr-wb" | "opus" | "speex" | "g729"; + /** + * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing + */ + extra?: string; + /** + * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um' + */ + filler_words?: boolean; + /** + * Key term prompting can boost or suppress specialized terminology and brands. + */ + keyterm?: string; + /** + * Keywords can boost or suppress specialized terminology and brands. + */ + keywords?: string; + /** + * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available. + */ + language?: string; + /** + * Spoken measurements will be converted to their corresponding abbreviations. + */ + measurements?: boolean; + /** + * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip. + */ + mip_opt_out?: boolean; + /** + * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio + */ + mode?: "general" | "medical" | "finance"; + /** + * Transcribe each audio channel independently. + */ + multichannel?: boolean; + /** + * Numerals converts numbers from written format to numerical format. + */ + numerals?: boolean; + /** + * Splits audio into paragraphs to improve transcript readability. + */ + paragraphs?: boolean; + /** + * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely. + */ + profanity_filter?: boolean; + /** + * Add punctuation and capitalization to the transcript. + */ + punctuate?: boolean; + /** + * Redaction removes sensitive information from your transcripts. + */ + redact?: string; + /** + * Search for terms or phrases in submitted audio and replaces them. + */ + replace?: string; + /** + * Search for terms or phrases in submitted audio. + */ + search?: string; + /** + * Recognizes the sentiment throughout a transcript or text. + */ + sentiment?: boolean; + /** + * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability. + */ + smart_format?: boolean; + /** + * Detect topics throughout a transcript or text. + */ + topics?: boolean; + /** + * Segments speech into meaningful semantic units. + */ + utterances?: boolean; + /** + * Seconds to wait before detecting a pause between words in submitted audio. + */ + utt_split?: number; + /** + * The number of channels in the submitted audio + */ + channels?: number; + /** + * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets. + */ + interim_results?: boolean; + /** + * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing + */ + endpointing?: string; + /** + * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets. + */ + vad_events?: boolean; + /** + * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets. + */ + utterance_end_ms?: boolean; +} +interface Ai_Cf_Deepgram_Nova_3_Output { + results?: { + channels?: { + alternatives?: { + confidence?: number; + transcript?: string; + words?: { + confidence?: number; + end?: number; + start?: number; + word?: string; + }[]; + }[]; + }[]; + summary?: { + result?: string; + short?: string; + }; + sentiments?: { + segments?: { + text?: string; + start_word?: number; + end_word?: number; + sentiment?: string; + sentiment_score?: number; + }[]; + average?: { + sentiment?: string; + sentiment_score?: number; + }; + }; + }; +} +declare abstract class Base_Ai_Cf_Deepgram_Nova_3 { + inputs: Ai_Cf_Deepgram_Nova_3_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output; +} +type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input = { + /** + * readable stream with audio data and content-type specified for that data + */ + audio: { + body: object; + contentType: string; + }; + /** + * type of data PCM data that's sent to the inference server as raw array + */ + dtype?: "uint8" | "float32" | "float64"; +} | { + /** + * base64 encoded audio data + */ + audio: string; + /** + * type of data PCM data that's sent to the inference server as raw array + */ + dtype?: "uint8" | "float32" | "float64"; +}; +interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output { + /** + * if true, end-of-turn was detected + */ + is_complete?: boolean; + /** + * probability of the end-of-turn detection + */ + probability?: number; +} +declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 { + inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input; + postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output; +} +type Ai_Cf_Openai_Gpt_Oss_120B_Input = GPT_OSS_120B_Responses | GPT_OSS_120B_Responses_Async; +interface GPT_OSS_120B_Responses { + /** + * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types + */ + input: string | unknown[]; + reasoning?: { + /** + * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + */ + effort?: "low" | "medium" | "high"; + /** + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. + */ + summary?: "auto" | "concise" | "detailed"; + }; +} +interface GPT_OSS_120B_Responses_Async { + requests: { + /** + * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types + */ + input: string | unknown[]; + reasoning?: { + /** + * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + */ + effort?: "low" | "medium" | "high"; + /** + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. + */ + summary?: "auto" | "concise" | "detailed"; + }; + }[]; +} +type Ai_Cf_Openai_Gpt_Oss_120B_Output = {} | (string & NonNullable); +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B { + inputs: Ai_Cf_Openai_Gpt_Oss_120B_Input; + postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_120B_Output; +} +type Ai_Cf_Openai_Gpt_Oss_20B_Input = GPT_OSS_20B_Responses | GPT_OSS_20B_Responses_Async; +interface GPT_OSS_20B_Responses { + /** + * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types + */ + input: string | unknown[]; + reasoning?: { + /** + * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. */ - type: string; + effort?: "low" | "medium" | "high"; /** - * Details of the function tool. + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. */ - function: { - /** - * The name of the function. - */ - name: string; + summary?: "auto" | "concise" | "detailed"; + }; +} +interface GPT_OSS_20B_Responses_Async { + requests: { + /** + * Responses API Input messages. Refer to OpenAI Responses API docs to learn more about supported content types + */ + input: string | unknown[]; + reasoning?: { /** - * A brief description of what the function does. + * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. */ - description: string; + effort?: "low" | "medium" | "high"; /** - * Schema defining the parameters accepted by the function. + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed. */ - parameters: { - /** - * The type of the parameters object (usually 'object'). - */ - type: string; - /** - * List of required parameter names. - */ - required?: string[]; - /** - * Definitions of each parameter. - */ - properties: { - [k: string]: { - /** - * The data type of the parameter. - */ - type: string; - /** - * A description of the expected parameter. - */ - description: string; - }; - }; - }; + summary?: "auto" | "concise" | "detailed"; }; - })[]; + }[]; +} +type Ai_Cf_Openai_Gpt_Oss_20B_Output = {} | (string & NonNullable); +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B { + inputs: Ai_Cf_Openai_Gpt_Oss_20B_Input; + postProcessedOutputs: Ai_Cf_Openai_Gpt_Oss_20B_Output; +} +interface Ai_Cf_Leonardo_Phoenix_1_0_Input { /** - * JSON schema that should be fufilled for the response. + * A text description of the image you want to generate. */ - guided_json?: object; + prompt: string; /** - * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt */ - raw?: boolean; + guidance?: number; /** - * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + * Random seed for reproducibility of the image generation */ - stream?: boolean; + seed?: number; /** - * The maximum number of tokens to generate in the response. + * The height of the generated image in pixels */ - max_tokens?: number; + height?: number; /** - * Controls the randomness of the output; higher values produce more random results. + * The width of the generated image in pixels */ - temperature?: number; + width?: number; /** - * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + * The number of diffusion steps; higher values can improve quality but take longer */ - top_p?: number; + num_steps?: number; /** - * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + * Specify what to exclude from the generated images */ - top_k?: number; + negative_prompt?: string; +} +/** + * The generated image in JPEG format + */ +type Ai_Cf_Leonardo_Phoenix_1_0_Output = string; +declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 { + inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Input { /** - * Random seed for reproducibility of the generation. + * A text description of the image you want to generate. + */ + prompt: string; + /** + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + */ + guidance?: number; + /** + * Random seed for reproducibility of the image generation */ seed?: number; /** - * Penalty for repeated tokens; higher values discourage repetition. + * The height of the generated image in pixels */ - repetition_penalty?: number; + height?: number; /** - * Decreases the likelihood of the model repeating the same lines verbatim. + * The width of the generated image in pixels */ - frequency_penalty?: number; + width?: number; /** - * Increases the likelihood of the model introducing new topics. + * The number of diffusion steps; higher values can improve quality but take longer */ - presence_penalty?: number; + num_steps?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + steps?: number; } -type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = { +interface Ai_Cf_Leonardo_Lucid_Origin_Output { /** - * The generated text response from the model + * The generated image in Base64 format. */ - response: string; + image?: string; +} +declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin { + inputs: Ai_Cf_Leonardo_Lucid_Origin_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output; +} +interface Ai_Cf_Deepgram_Aura_1_Input { /** - * Usage statistics for the inference request + * Speaker used to produce the audio. */ - usage?: { - /** - * Total number of tokens in input - */ - prompt_tokens?: number; - /** - * Total number of tokens in output - */ - completion_tokens?: number; - /** - * Total number of input and output tokens - */ - total_tokens?: number; - }; + speaker?: "angus" | "asteria" | "arcas" | "orion" | "orpheus" | "athena" | "luna" | "zeus" | "perseus" | "helios" | "hera" | "stella"; /** - * An array of tool calls requests made during the response generation + * Encoding of the output audio. */ - tool_calls?: { - /** - * The arguments passed to be passed to the tool call request - */ - arguments?: object; - /** - * The name of the tool to be called - */ - name?: string; - }[]; -} | string; -declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct { - inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input; - postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output; + encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac"; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: "none" | "wav" | "ogg"; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_1_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_1 { + inputs: Ai_Cf_Deepgram_Aura_1_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output; } interface AiModels { "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification; @@ -3596,11 +5937,8 @@ interface AiModels { "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage; "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage; "@cf/myshell-ai/melotts": BaseAiTextToSpeech; - "@cf/baai/bge-base-en-v1.5": BaseAiTextEmbeddings; - "@cf/baai/bge-small-en-v1.5": BaseAiTextEmbeddings; - "@cf/baai/bge-large-en-v1.5": BaseAiTextEmbeddings; + "@cf/google/embeddinggemma-300m": BaseAiTextEmbeddings; "@cf/microsoft/resnet-50": BaseAiImageClassification; - "@cf/facebook/detr-resnet-50": BaseAiObjectDetection; "@cf/meta/llama-2-7b-chat-int8": BaseAiTextGeneration; "@cf/mistral/mistral-7b-instruct-v0.1": BaseAiTextGeneration; "@cf/meta/llama-2-7b-chat-fp16": BaseAiTextGeneration; @@ -3635,28 +5973,50 @@ interface AiModels { "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration; "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration; "@hf/meta-llama/meta-llama-3-8b-instruct": BaseAiTextGeneration; - "@cf/meta/llama-3.1-8b-instruct": BaseAiTextGeneration; "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration; "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration; "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration; "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration; - "@cf/meta/llama-3.3-70b-instruct-fp8-fast": BaseAiTextGeneration; "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration; - "@cf/meta/m2m100-1.2b": BaseAiTranslation; "@cf/facebook/bart-large-cnn": BaseAiSummarization; "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText; + "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5; "@cf/openai/whisper": Base_Ai_Cf_Openai_Whisper; + "@cf/meta/m2m100-1.2b": Base_Ai_Cf_Meta_M2M100_1_2B; + "@cf/baai/bge-small-en-v1.5": Base_Ai_Cf_Baai_Bge_Small_En_V1_5; + "@cf/baai/bge-large-en-v1.5": Base_Ai_Cf_Baai_Bge_Large_En_V1_5; "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M; "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En; "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo; "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3; "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell; "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct; + "@cf/meta/llama-3.3-70b-instruct-fp8-fast": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast; "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B; "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base; + "@cf/qwen/qwen2.5-coder-32b-instruct": Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct; + "@cf/qwen/qwq-32b": Base_Ai_Cf_Qwen_Qwq_32B; + "@cf/mistralai/mistral-small-3.1-24b-instruct": Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct; + "@cf/google/gemma-3-12b-it": Base_Ai_Cf_Google_Gemma_3_12B_It; "@cf/meta/llama-4-scout-17b-16e-instruct": Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct; + "@cf/deepgram/nova-3": Base_Ai_Cf_Deepgram_Nova_3; + "@cf/pipecat-ai/smart-turn-v2": Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2; + "@cf/openai/gpt-oss-120b": Base_Ai_Cf_Openai_Gpt_Oss_120B; + "@cf/openai/gpt-oss-20b": Base_Ai_Cf_Openai_Gpt_Oss_20B; + "@cf/leonardo/phoenix-1.0": Base_Ai_Cf_Leonardo_Phoenix_1_0; + "@cf/leonardo/lucid-origin": Base_Ai_Cf_Leonardo_Lucid_Origin; + "@cf/deepgram/aura-1": Base_Ai_Cf_Deepgram_Aura_1; } type AiOptions = { + /** + * Send requests as an asynchronous batch job, only works for supported models + * https://developers.cloudflare.com/workers-ai/features/batch-api + */ + queueRequest?: boolean; + /** + * Establish websocket connections, only works for supported models + */ + websocket?: boolean; gateway?: GatewayOptions; returnRawResponse?: boolean; prefix?: string; @@ -3703,9 +6063,13 @@ declare abstract class Ai { aiGatewayLogId: string | null; gateway(gatewayId: string): AiGateway; autorag(autoragId: string): AutoRAG; - run(model: Name, inputs: AiModelList[Name]["inputs"], options?: Options): Promise(model: Name, inputs: InputOptions, options?: Options): Promise; + } | { + websocket: true; + } ? Response : InputOptions extends { + stream: true; + } ? ReadableStream : AiModelList[Name]["postProcessedOutputs"]>; models(params?: AiModelsSearchParams): Promise; toMarkdown(files: { name: string; @@ -3738,6 +6102,12 @@ type GatewayOptions = { requestTimeoutMs?: number; retries?: GatewayRetries; }; +type UniversalGatewayOptions = Exclude & { + /** + ** @deprecated + */ + id?: string; +}; type AiGatewayPatchLog = { score?: number | null; feedback?: -1 | 1 | null; @@ -3806,7 +6176,7 @@ declare abstract class AiGateway { patchLog(logId: string, data: AiGatewayPatchLog): Promise; getLog(logId: string): Promise; run(data: AIGatewayUniversalRequest | AIGatewayUniversalRequest[], options?: { - gateway?: GatewayOptions; + gateway?: UniversalGatewayOptions; extraHeaders?: object; }): Promise; getUrl(provider?: AIGatewayProviders | string): Promise; // eslint-disable-line @@ -3817,8 +6187,20 @@ interface AutoRAGNotFoundError extends Error { } interface AutoRAGUnauthorizedError extends Error { } +interface AutoRAGNameNotSetError extends Error { +} +type ComparisonFilter = { + key: string; + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + value: string | number | boolean; +}; +type CompoundFilter = { + type: 'and' | 'or'; + filters: ComparisonFilter[]; +}; type AutoRagSearchRequest = { query: string; + filters?: CompoundFilter | ComparisonFilter; max_num_results?: number; ranking_options?: { ranker?: string; @@ -3828,6 +6210,7 @@ type AutoRagSearchRequest = { }; type AutoRagAiSearchRequest = AutoRagSearchRequest & { stream?: boolean; + system_prompt?: string; }; type AutoRagAiSearchRequestStreaming = Omit & { stream: true; @@ -3848,10 +6231,20 @@ type AutoRagSearchResponse = { has_more: boolean; next_page: string | null; }; +type AutoRagListResponse = { + id: string; + enable: boolean; + type: string; + source: string; + vectorize_name: string; + paused: boolean; + status: string; +}[]; type AutoRagAiSearchResponse = AutoRagSearchResponse & { response: string; }; declare abstract class AutoRAG { + list(): Promise; search(params: AutoRagSearchRequest): Promise; aiSearch(params: AutoRagAiSearchRequestStreaming): Promise; aiSearch(params: AutoRagAiSearchRequest): Promise; @@ -3893,6 +6286,12 @@ interface BasicImageTransformations { * breaks aspect ratio */ fit?: "scale-down" | "contain" | "cover" | "crop" | "pad" | "squeeze"; + /** + * Image segmentation using artificial intelligence models. Sets pixels not + * within selected segment area to transparent e.g "foreground" sets every + * background pixel as transparent. + */ + segment?: "foreground"; /** * When cropping with fit: "cover", this defines the side or point that should * be left uncropped. The value is either a string @@ -3905,7 +6304,7 @@ interface BasicImageTransformations { * preserve as much as possible around a point at 20% of the height of the * source image. */ - gravity?: 'left' | 'right' | 'top' | 'bottom' | 'center' | 'auto' | 'entropy' | BasicImageTransformationsGravityCoordinates; + gravity?: 'face' | 'left' | 'right' | 'top' | 'bottom' | 'center' | 'auto' | 'entropy' | BasicImageTransformationsGravityCoordinates; /** * Background color to add underneath the image. Applies only to images with * transparency (such as PNG). Accepts any CSS color (#RRGGBB, rgba(…), @@ -4192,13 +6591,13 @@ interface IncomingRequestCfPropertiesBase extends Record { * * @example 395747 */ - asn: number; + asn?: number; /** * The organization which owns the ASN of the incoming request. * * @example "Google Cloud" */ - asOrganization: string; + asOrganization?: string; /** * The original value of the `Accept-Encoding` header if Cloudflare modified it. * @@ -4322,7 +6721,7 @@ interface IncomingRequestCfPropertiesCloudflareForSaaSEnterprise { * This field is only present if you have Cloudflare for SaaS enabled on your account * and you have followed the [required steps to enable it]((https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/domain-support/custom-metadata/)). */ - hostMetadata: HostMetadata; + hostMetadata?: HostMetadata; } interface IncomingRequestCfPropertiesCloudflareAccessOrApiShield { /** @@ -4609,6 +7008,11 @@ interface D1Meta { */ sql_duration_ms: number; }; + /** + * Number of total attempts to execute the query, due to automatic retries. + * Note: All other fields in the response like `timings` only apply to the last attempt. + */ + total_attempts?: number; } interface D1Response { success: true; @@ -4626,11 +7030,11 @@ type D1SessionConstraint = // Indicates that the first query should go to the primary, and the rest queries // using the same D1DatabaseSession will go to any replica that is consistent with // the bookmark maintained by the session (returned by the first query). -"first-primary" +'first-primary' // Indicates that the first query can go anywhere (primary or replica), and the rest queries // using the same D1DatabaseSession will go to any replica that is consistent with // the bookmark maintained by the session (returned by the first query). - | "first-unconstrained"; + | 'first-unconstrained'; type D1SessionBookmark = string; declare abstract class D1Database { prepare(query: string): D1PreparedStatement; @@ -4748,6 +7152,22 @@ declare module "cloudflare:email" { }; export { _EmailMessage as EmailMessage }; } +/** + * Hello World binding to serve as an explanatory example. DO NOT USE + */ +interface HelloWorldBinding { + /** + * Retrieve the current stored value + */ + get(): Promise<{ + value: string; + ms?: number; + }>; + /** + * Set a new stored value + */ + set(value: string): Promise; +} interface Hyperdrive { /** * Connect directly to Hyperdrive as if it's your database, returning a TCP socket. @@ -4825,7 +7245,8 @@ type ImageTransform = { fit?: 'scale-down' | 'contain' | 'pad' | 'squeeze' | 'cover' | 'crop'; flip?: 'h' | 'v' | 'hv'; gamma?: number; - gravity?: 'left' | 'right' | 'top' | 'bottom' | 'center' | 'auto' | 'entropy' | { + segment?: 'foreground'; + gravity?: 'face' | 'left' | 'right' | 'top' | 'bottom' | 'center' | 'auto' | 'entropy' | { x?: number; y?: number; mode: 'remainder' | 'box-center'; @@ -4833,7 +7254,7 @@ type ImageTransform = { rotate?: 0 | 90 | 180 | 270; saturation?: number; sharpen?: number; - trim?: "border" | { + trim?: 'border' | { top?: number; bottom?: number; left?: number; @@ -4855,10 +7276,14 @@ type ImageDrawOptions = { bottom?: number; right?: number; }; +type ImageInputOptions = { + encoding?: 'base64'; +}; type ImageOutputOptions = { format: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp' | 'image/avif' | 'rgb' | 'rgba'; quality?: number; background?: string; + anim?: boolean; }; interface ImagesBinding { /** @@ -4866,13 +7291,13 @@ interface ImagesBinding { * @throws {@link ImagesError} with code 9412 if input is not an image * @param stream The image bytes */ - info(stream: ReadableStream): Promise; + info(stream: ReadableStream, options?: ImageInputOptions): Promise; /** * Begin applying a series of transformations to an image * @param stream The image bytes * @returns A transform handle */ - input(stream: ReadableStream): ImageTransformer; + input(stream: ReadableStream, options?: ImageInputOptions): ImageTransformer; } interface ImageTransformer { /** @@ -4895,6 +7320,9 @@ interface ImageTransformer { */ output(options: ImageOutputOptions): Promise; } +type ImageTransformationOutputOptions = { + encoding?: 'base64'; +}; interface ImageTransformationResult { /** * The image as a response, ready to store in cache or return to users @@ -4907,13 +7335,132 @@ interface ImageTransformationResult { /** * The bytes of the response */ - image(): ReadableStream; + image(options?: ImageTransformationOutputOptions): ReadableStream; } interface ImagesError extends Error { readonly code: number; readonly message: string; readonly stack?: string; } +/** + * Media binding for transforming media streams. + * Provides the entry point for media transformation operations. + */ +interface MediaBinding { + /** + * Creates a media transformer from an input stream. + * @param media - The input media bytes + * @returns A MediaTransformer instance for applying transformations + */ + input(media: ReadableStream): MediaTransformer; +} +/** + * Media transformer for applying transformation operations to media content. + * Handles sizing, fitting, and other input transformation parameters. + */ +interface MediaTransformer { + /** + * Applies transformation options to the media content. + * @param transform - Configuration for how the media should be transformed + * @returns A generator for producing the transformed media output + */ + transform(transform: MediaTransformationInputOptions): MediaTransformationGenerator; +} +/** + * Generator for producing media transformation results. + * Configures the output format and parameters for the transformed media. + */ +interface MediaTransformationGenerator { + /** + * Generates the final media output with specified options. + * @param output - Configuration for the output format and parameters + * @returns The final transformation result containing the transformed media + */ + output(output: MediaTransformationOutputOptions): MediaTransformationResult; +} +/** + * Result of a media transformation operation. + * Provides multiple ways to access the transformed media content. + */ +interface MediaTransformationResult { + /** + * Returns the transformed media as a readable stream of bytes. + * @returns A stream containing the transformed media data + */ + media(): ReadableStream; + /** + * Returns the transformed media as an HTTP response object. + * @returns The transformed media as a Response, ready to store in cache or return to users + */ + response(): Response; + /** + * Returns the MIME type of the transformed media. + * @returns The content type string (e.g., 'image/jpeg', 'video/mp4') + */ + contentType(): string; +} +/** + * Configuration options for transforming media input. + * Controls how the media should be resized and fitted. + */ +type MediaTransformationInputOptions = { + /** How the media should be resized to fit the specified dimensions */ + fit?: 'contain' | 'cover' | 'scale-down'; + /** Target width in pixels */ + width?: number; + /** Target height in pixels */ + height?: number; +}; +/** + * Configuration options for Media Transformations output. + * Controls the format, timing, and type of the generated output. + */ +type MediaTransformationOutputOptions = { + /** + * Output mode determining the type of media to generate + */ + mode?: 'video' | 'spritesheet' | 'frame' | 'audio'; + /** Whether to include audio in the output */ + audio?: boolean; + /** + * Starting timestamp for frame extraction or start time for clips. (e.g. '2s'). + */ + time?: string; + /** + * Duration for video clips, audio extraction, and spritesheet generation (e.g. '5s'). + */ + duration?: string; + /** + * Number of frames in the spritesheet. + */ + imageCount?: number; + /** + * Output format for the generated media. + */ + format?: 'jpg' | 'png' | 'm4a'; +}; +/** + * Error object for media transformation operations. + * Extends the standard Error interface with additional media-specific information. + */ +interface MediaError extends Error { + readonly code: number; + readonly message: string; + readonly stack?: string; +} +declare module 'cloudflare:node' { + interface NodeStyleServer { + listen(...args: unknown[]): this; + address(): { + port?: number | null | undefined; + }; + } + export function httpServerHandler(port: number): ExportedHandler; + export function httpServerHandler(options: { + port: number; + }): ExportedHandler; + export function httpServerHandler(server: NodeStyleServer): ExportedHandler; +} type Params

= Record; type EventContext = { request: Request>; @@ -5129,10 +7676,49 @@ declare namespace Rpc { }; } declare namespace Cloudflare { + // Type of `env`. + // + // The specific project can extend `Env` by redeclaring it in project-specific files. Typescript + // will merge all declarations. + // + // You can use `wrangler types` to generate the `Env` type automatically. interface Env { } + // Project-specific parameters used to inform types. + // + // This interface is, again, intended to be declared in project-specific files, and then that + // declaration will be merged with this one. + // + // A project should have a declaration like this: + // + // interface GlobalProps { + // // Declares the main module's exports. Used to populate Cloudflare.Exports aka the type + // // of `ctx.exports`. + // mainModule: typeof import("my-main-module"); + // + // // Declares which of the main module's exports are configured with durable storage, and + // // thus should behave as Durable Object namsepace bindings. + // durableNamespaces: "MyDurableObject" | "AnotherDurableObject"; + // } + // + // You can use `wrangler types` to generate `GlobalProps` automatically. + interface GlobalProps { + } + // Evaluates to the type of a property in GlobalProps, defaulting to `Default` if it is not + // present. + type GlobalProp = K extends keyof GlobalProps ? GlobalProps[K] : Default; + // The type of the program's main module exports, if known. Requires `GlobalProps` to declare the + // `mainModule` property. + type MainModule = GlobalProp<"mainModule", {}>; + // The type of ctx.exports, which contains loopback bindings for all top-level exports. + type Exports = { + [K in keyof MainModule]: LoopbackForExport + // If the export is listed in `durableNamespaces`, then it is also a + // DurableObjectNamespace. + & (K extends GlobalProp<"durableNamespaces", never> ? MainModule[K] extends new (...args: any[]) => infer DoInstance ? DoInstance extends Rpc.DurableObjectBranded ? DurableObjectNamespace : DurableObjectNamespace : DurableObjectNamespace : {}); + }; } -declare module 'cloudflare:workers' { +declare namespace CloudflareWorkersModule { export type RpcStub = Rpc.Stub; export const RpcStub: { new (value: T): Rpc.Stub; @@ -5141,9 +7727,9 @@ declare module 'cloudflare:workers' { [Rpc.__RPC_TARGET_BRAND]: never; } // `protected` fields don't appear in `keyof`s, so can't be accessed over RPC - export abstract class WorkerEntrypoint implements Rpc.WorkerEntrypointBranded { + export abstract class WorkerEntrypoint implements Rpc.WorkerEntrypointBranded { [Rpc.__WORKER_ENTRYPOINT_BRAND]: never; - protected ctx: ExecutionContext; + protected ctx: ExecutionContext; protected env: Env; constructor(ctx: ExecutionContext, env: Env); fetch?(request: Request): Response | Promise; @@ -5153,9 +7739,9 @@ declare module 'cloudflare:workers' { queue?(batch: MessageBatch): void | Promise; test?(controller: TestController): void | Promise; } - export abstract class DurableObject implements Rpc.DurableObjectBranded { + export abstract class DurableObject implements Rpc.DurableObjectBranded { [Rpc.__DURABLE_OBJECT_BRAND]: never; - protected ctx: DurableObjectState; + protected ctx: DurableObjectState; protected env: Env; constructor(ctx: DurableObjectState, env: Env); fetch?(request: Request): Response | Promise; @@ -5168,6 +7754,7 @@ declare module 'cloudflare:workers' { export type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; export type WorkflowDelayDuration = WorkflowSleepDuration; export type WorkflowTimeoutDuration = WorkflowSleepDuration; + export type WorkflowRetentionDuration = WorkflowSleepDuration; export type WorkflowBackoff = 'constant' | 'linear' | 'exponential'; export type WorkflowStepConfig = { retries?: { @@ -5204,8 +7791,12 @@ declare module 'cloudflare:workers' { constructor(ctx: ExecutionContext, env: Env); run(event: Readonly>, step: WorkflowStep): Promise; } + export function waitUntil(promise: Promise): void; export const env: Cloudflare.Env; } +declare module 'cloudflare:workers' { + export = CloudflareWorkersModule; +} interface SecretsStoreSecret { /** * Get a secret from the Secrets Store, returning a string of the secret value @@ -5226,12 +7817,11 @@ declare namespace TailStream { readonly type: "fetch"; readonly method: string; readonly url: string; - readonly cfJson: string; + readonly cfJson?: object; readonly headers: Header[]; } interface JsRpcEventInfo { readonly type: "jsrpc"; - readonly methodName: string; } interface ScheduledEventInfo { readonly type: "scheduled"; @@ -5272,10 +7862,6 @@ declare namespace TailStream { readonly type: "hibernatableWebSocket"; readonly info: HibernatableWebSocketEventInfoClose | HibernatableWebSocketEventInfoError | HibernatableWebSocketEventInfoMessage; } - interface Resume { - readonly type: "resume"; - readonly attachment?: any; - } interface CustomEventInfo { readonly type: "custom"; } @@ -5289,20 +7875,18 @@ declare namespace TailStream { readonly tag?: string; readonly message?: string; } - interface Trigger { - readonly traceId: string; - readonly invocationId: string; - readonly spanId: string; - } interface Onset { readonly type: "onset"; + readonly attributes: Attribute[]; + // id for the span being opened by this Onset event. + readonly spanId: string; readonly dispatchNamespace?: string; readonly entrypoint?: string; + readonly executionModel: string; readonly scriptName?: string; readonly scriptTags?: string[]; readonly scriptVersion?: ScriptVersion; - readonly trigger?: Trigger; - readonly info: FetchEventInfo | JsRpcEventInfo | ScheduledEventInfo | AlarmEventInfo | QueueEventInfo | EmailEventInfo | TraceEventInfo | HibernatableWebSocketEventInfo | Resume | CustomEventInfo; + readonly info: FetchEventInfo | JsRpcEventInfo | ScheduledEventInfo | AlarmEventInfo | QueueEventInfo | EmailEventInfo | TraceEventInfo | HibernatableWebSocketEventInfo | CustomEventInfo; } interface Outcome { readonly type: "outcome"; @@ -5310,13 +7894,12 @@ declare namespace TailStream { readonly cpuTime: number; readonly wallTime: number; } - interface Hibernate { - readonly type: "hibernate"; - } interface SpanOpen { readonly type: "spanOpen"; - readonly op?: string; - readonly info?: FetchEventInfo | JsRpcEventInfo | Attribute[]; + readonly name: string; + // id for the span being opened by this SpanOpen event. + readonly spanId: string; + readonly info?: FetchEventInfo | JsRpcEventInfo | Attributes; } interface SpanClose { readonly type: "spanClose"; @@ -5336,36 +7919,62 @@ declare namespace TailStream { interface Log { readonly type: "log"; readonly level: "debug" | "error" | "info" | "log" | "warn"; - readonly message: string; + readonly message: object; } + // This marks the worker handler return information. + // This is separate from Outcome because the worker invocation can live for a long time after + // returning. For example - Websockets that return an http upgrade response but then continue + // streaming information or SSE http connections. interface Return { readonly type: "return"; - readonly info?: FetchResponseInfo | Attribute[]; - } - interface Link { - readonly type: "link"; - readonly label?: string; - readonly traceId: string; - readonly invocationId: string; - readonly spanId: string; + readonly info?: FetchResponseInfo; } interface Attribute { - readonly type: "attribute"; readonly name: string; - readonly value: string | string[] | boolean | boolean[] | number | number[]; + readonly value: string | string[] | boolean | boolean[] | number | number[] | bigint | bigint[]; + } + interface Attributes { + readonly type: "attributes"; + readonly info: Attribute[]; } - type Mark = DiagnosticChannelEvent | Exception | Log | Return | Link | Attribute[]; - interface TailEvent { + type EventType = Onset | Outcome | SpanOpen | SpanClose | DiagnosticChannelEvent | Exception | Log | Return | Attributes; + // Context in which this trace event lives. + interface SpanContext { + // Single id for the entire top-level invocation + // This should be a new traceId for the first worker stage invoked in the eyeball request and then + // same-account service-bindings should reuse the same traceId but cross-account service-bindings + // should use a new traceId. readonly traceId: string; + // spanId in which this event is handled + // for Onset and SpanOpen events this would be the parent span id + // for Outcome and SpanClose these this would be the span id of the opening Onset and SpanOpen events + // For Hibernate and Mark this would be the span under which they were emitted. + // spanId is not set ONLY if: + // 1. This is an Onset event + // 2. We are not inherting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation) + readonly spanId?: string; + } + interface TailEvent { + // invocation id of the currently invoked worker stage. + // invocation id will always be unique to every Onset event and will be the same until the Outcome event. readonly invocationId: string; - readonly spanId: string; + // Inherited spanContext for this event. + readonly spanContext: SpanContext; readonly timestamp: Date; readonly sequence: number; - readonly event: Onset | Outcome | Hibernate | SpanOpen | SpanClose | Mark; + readonly event: Event; } - type TailEventHandler = (event: TailEvent) => void | Promise; - type TailEventHandlerName = "onset" | "outcome" | "hibernate" | "spanOpen" | "spanClose" | "diagnosticChannel" | "exception" | "log" | "return" | "link" | "attribute"; - type TailEventHandlerObject = Record; + type TailEventHandler = (event: TailEvent) => void | Promise; + type TailEventHandlerObject = { + outcome?: TailEventHandler; + spanOpen?: TailEventHandler; + spanClose?: TailEventHandler; + diagnosticChannel?: TailEventHandler; + exception?: TailEventHandler; + log?: TailEventHandler; + return?: TailEventHandler; + attributes?: TailEventHandler; + }; type TailEventHandlerType = TailEventHandler | TailEventHandlerObject; } // Copyright (c) 2022-2023 Cloudflare, Inc. @@ -5678,6 +8287,9 @@ declare abstract class Workflow { */ public createBatch(batch: WorkflowInstanceCreateOptions[]): Promise; } +type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; +type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; +type WorkflowRetentionDuration = WorkflowSleepDuration; interface WorkflowInstanceCreateOptions { /** * An id for your Workflow instance. Must be unique within the Workflow. @@ -5687,6 +8299,14 @@ interface WorkflowInstanceCreateOptions { * The event payload the Workflow instance is triggered with */ params?: PARAMS; + /** + * The retention policy for Workflow instance. + * Defaults to the maximum retention period available for the owner's account. + */ + retention?: { + successRetention?: WorkflowRetentionDuration; + errorRetention?: WorkflowRetentionDuration; + }; } type InstanceStatus = { status: 'queued' // means that instance is waiting to be started (see concurrency limits) From e975e0826f20356c4ec79ee3e426dac54842e8d1 Mon Sep 17 00:00:00 2001 From: Ashish Shubham Date: Fri, 17 Oct 2025 15:10:20 -0700 Subject: [PATCH 2/2] lint fix --- src/cloudflare-utils.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cloudflare-utils.ts b/src/cloudflare-utils.ts index bc360bc..7f7750e 100644 --- a/src/cloudflare-utils.ts +++ b/src/cloudflare-utils.ts @@ -20,7 +20,7 @@ export function instrumentedMCPServer(MCPServer: new (c } public static serve(path: string) { - const server = super.serve(path, { + const server = McpAgent.serve(path, { corsOptions: { headers: "Content-Type, Accept, mcp-session-id, mcp-protocol-version, Authorization, x-ts-host" }