diff --git a/CHANGELOG.md b/CHANGELOG.md
index a17950ff594..c381bd3e6f5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,60 @@
# Roo Code Changelog
+## [3.28.8] - 2025-09-25
+
+
+
+- Fix: Resolve frequent "No tool used" errors by clarifying tool-use rules (thanks @hannesrudolph!)
+- Fix: Include initial ask in condense summarization (thanks @hannesrudolph!)
+- Add support for more free models in the Roo provider (thanks @mrubens!)
+- Show cloud switcher and option to add a team when logged in (thanks @mrubens!)
+- Add Opengraph image for web (thanks @brunobergher!)
+
+## [3.28.7] - 2025-09-23
+
+
+
+- UX: Collapse thinking blocks by default with UI settings to always show them (thanks @brunobergher!)
+- Fix: Resolve checkpoint restore popover positioning issue (#8219 by @NaccOll, PR by @app/roomote)
+- Add cloud account switcher functionality (thanks @mrubens!)
+- Add support for zai-org/GLM-4.5-turbo model in Chutes provider (#8155 by @mugnimaestra, PR by @app/roomote)
+
+## [3.28.6] - 2025-09-23
+
+
+
+- Feat: Add GPT-5-Codex model (thanks @daniel-lxs!)
+- Feat: Add keyboard shortcut for toggling auto-approve (Cmd/Ctrl+Alt+A) (thanks @brunobergher!)
+- Fix: Improve reasoning block formatting for better readability (thanks @daniel-lxs!)
+- Fix: Respect Ollama Modelfile num_ctx configuration (#7797 by @hannesrudolph, PR by @app/roomote)
+- Fix: Prevent checkpoint text from wrapping in non-English languages (#8206 by @NaccOll, PR by @app/roomote)
+- Remove language selection and word wrap toggle from CodeBlock (thanks @mrubens!)
+- Feat: Add package.nls.json checking to find-missing-translations script (thanks @app/roomote!)
+- Fix: Bare metal evals fixes (thanks @cte!)
+- Fix: Follow-up questions should trigger the "interactive" state (thanks @cte!)
+
+## [3.28.5] - 2025-09-20
+
+
+
+- Fix: Resolve duplicate rehydrate during reasoning; centralize rehydrate and preserve cancel metadata (#8153 by @hannesrudolph, PR by @hannesrudolph)
+- Add an announcement for Supernova (thanks @mrubens!)
+- Wrap code blocks by default for improved readability (thanks @mrubens!)
+- Fix: Support dash prefix in parseMarkdownChecklist for todo lists (#8054 by @NaccOll, PR by app/roomote)
+- Fix: Apply tiered pricing for Gemini models via Vertex AI (#8017 by @ikumi3, PR by app/roomote)
+- Update SambaNova models to latest versions (thanks @snova-jorgep!)
+- Update privacy policy to allow occasional emails (thanks @jdilla1277!)
+
+## [3.28.4] - 2025-09-19
+
+
+
+- UX: Redesigned Message Feed (thanks @brunobergher!)
+- UX: Responsive Auto-Approve (thanks @brunobergher!)
+- Add telemetry retry queue for network resilience (thanks @daniel-lxs!)
+- Fix: Transform keybindings in nightly build to fix command+y shortcut (thanks @app/roomote!)
+- New code-supernova stealth model in the Roo Code Cloud provider (thanks @mrubens!)
+
## [3.28.3] - 2025-09-16

diff --git a/apps/web-evals/package.json b/apps/web-evals/package.json
index 869355100f5..37740163323 100644
--- a/apps/web-evals/package.json
+++ b/apps/web-evals/package.json
@@ -5,10 +5,10 @@
"scripts": {
"lint": "next lint --max-warnings 0",
"check-types": "tsc -b",
- "dev": "scripts/check-services.sh && next dev",
+ "dev": "scripts/check-services.sh && next dev -p 3446",
"format": "prettier --write src",
"build": "next build",
- "start": "next start",
+ "start": "next start -p 3446",
"clean": "rimraf tsconfig.tsbuildinfo .next .turbo"
},
"dependencies": {
diff --git a/apps/web-roo-code/public/Roo-Code-Logo-Horiz-blk.svg b/apps/web-roo-code/public/Roo-Code-Logo-Horiz-blk.svg
index 5be021c28f8..d89abdb5861 100644
--- a/apps/web-roo-code/public/Roo-Code-Logo-Horiz-blk.svg
+++ b/apps/web-roo-code/public/Roo-Code-Logo-Horiz-blk.svg
@@ -1,2971 +1,10 @@
-
-
\ No newline at end of file
+
diff --git a/apps/web-roo-code/public/Roo-Code-Logo-Horiz-white.svg b/apps/web-roo-code/public/Roo-Code-Logo-Horiz-white.svg
index 89a15f16d41..412c1956059 100644
--- a/apps/web-roo-code/public/Roo-Code-Logo-Horiz-white.svg
+++ b/apps/web-roo-code/public/Roo-Code-Logo-Horiz-white.svg
@@ -1,2965 +1,10 @@
-
-
\ No newline at end of file
+
diff --git a/apps/web-roo-code/public/opengraph.png b/apps/web-roo-code/public/opengraph.png
new file mode 100644
index 00000000000..fe75ec976b0
Binary files /dev/null and b/apps/web-roo-code/public/opengraph.png differ
diff --git a/apps/web-roo-code/src/app/legal/cookies/page.tsx b/apps/web-roo-code/src/app/legal/cookies/page.tsx
new file mode 100644
index 00000000000..cb67b8672c5
--- /dev/null
+++ b/apps/web-roo-code/src/app/legal/cookies/page.tsx
@@ -0,0 +1,198 @@
+import type { Metadata } from "next"
+import { SEO } from "@/lib/seo"
+
+const TITLE = "Cookie Policy"
+const DESCRIPTION = "Learn about how Roo Code uses cookies to enhance your experience and provide our services."
+const PATH = "/legal/cookies"
+const OG_IMAGE = SEO.ogImage
+
+export const metadata: Metadata = {
+ title: TITLE,
+ description: DESCRIPTION,
+ alternates: {
+ canonical: `${SEO.url}${PATH}`,
+ },
+ openGraph: {
+ title: TITLE,
+ description: DESCRIPTION,
+ url: `${SEO.url}${PATH}`,
+ siteName: SEO.name,
+ images: [
+ {
+ url: OG_IMAGE.url,
+ width: OG_IMAGE.width,
+ height: OG_IMAGE.height,
+ alt: OG_IMAGE.alt,
+ },
+ ],
+ locale: SEO.locale,
+ type: "article",
+ },
+ twitter: {
+ card: SEO.twitterCard,
+ title: TITLE,
+ description: DESCRIPTION,
+ images: [OG_IMAGE.url],
+ },
+ keywords: [...SEO.keywords, "cookies", "privacy", "tracking", "analytics"],
+}
+
+export default function CookiePolicy() {
+ return (
+ <>
+
+
+
Updated: September 18, 2025
+
+
Cookie Policy
+
+
+ This Cookie Policy explains how Roo Code uses cookies and similar technologies to recognize you
+ when you visit our website.
+
+
+
What are cookies?
+
+ Cookies are small data files that are placed on your computer or mobile device when you visit a
+ website. Cookies help make websites work more efficiently and provide reporting information.
+
+
+
Cookies we use
+
+ We use a minimal number of cookies to provide essential functionality and improve your
+ experience.
+
+ Essential cookies are required for our website to operate. These include authentication cookies
+ from Clerk that allow you to stay logged in to your account. These cookies cannot be disabled
+ without losing core website functionality. The lawful basis for processing these cookies is our
+ legitimate interest in providing secure access to our services.
+
+
+
Analytics cookies
+
+ We use PostHog analytics cookies to understand how visitors interact with our website. This
+ helps us improve our services and user experience. Analytics cookies are placed only if you give
+ consent through our cookie banner. The lawful basis for processing these cookies is your
+ consent, which you can withdraw at any time.
+
+
+
Third-party services
+
+ Our blog at{" "}
+
+ blog.roocode.com
+ {" "}
+ is hosted on Substack. When you visit it, Substack may set cookies for analytics,
+ personalization, and advertising/marketing. These cookies are managed directly by Substack and
+ are outside our control. You can read more in{" "}
+
+ Substack's Cookie Policy
+
+ .
+
+
+
How to control cookies
+
You can control and manage cookies through your browser settings. Most browsers allow you to:
+
+
View what cookies are stored on your device
+
Delete cookies individually or all at once
+
Block third-party cookies
+
Block cookies from specific websites
+
Block all cookies from being set
+
Delete all cookies when you close your browser
+
+
+ Please note that blocking essential cookies may prevent you from using certain features of our
+ website, such as staying logged in to your account.
+
+
+
Changes to this policy
+
+ We may update this Cookie Policy from time to time. When we make changes, we will update the
+ date at the top of this policy. We encourage you to periodically review this policy to stay
+ informed about our use of cookies.
+
+
+
Contact us
+
+ If you have questions about our use of cookies, please contact us at{" "}
+
+ privacy@roocode.com
+
+ .
+
Roo Code engages the following third parties to process Customer Data.
+
+
+
+
+
+
+ Entity Name
+
+
+ Product or Service
+
+
+ Location of Processing
+
+
+ Purpose of Processing
+
+
+
+
+
+
Census
+
Data Services
+
United States
+
Data activation and reverse ETL
+
+
+
Clerk
+
Authentication Services
+
United States
+
User authentication
+
+
+
ClickHouse
+
Data Services
+
United States
+
Real-time analytics database
+
+
+
Cloudflare
+
All Services
+
+ Processing at data center closest to End User
+
+
+ Content delivery network and security
+
+
+
+
Fivetran
+
Data Services
+
United States
+
ETL and data integration
+
+
+
Fly.io
+
Backend Services
+
United States
+
+ Application hosting and deployment
+
+
+
+
HubSpot
+
Customer Services
+
United States
+
CRM and marketing automation
+
+
+
Loops
+
Communication Services
+
United States
+
Email and customer communication
+
+
+
Metabase
+
Data Analytics
+
United States
+
+ Business intelligence and reporting
+
+
+
+
PostHog
+
Data Services
+
United States
+
Product analytics
+
+
+
Sentry
+
All Services
+
United States
+
Error tracking and monitoring
+
+
+
Snowflake
+
Data Services
+
United States
+
Data warehousing and analytics
+
+
+
Stripe
+
Payment Services
+
United States, Europe
+
Payment processing and billing
+
+
+
Supabase
+
Data Services
+
United States
+
Database management and storage
+
+
+
Upstash
+
Infrastructure Services
+
United States
+
Serverless database services
+
+
+
Vercel
+
Customer-facing Services
+
United States, Europe
+
+ Web application hosting and deployment
+
+
+
+
+
+
+
+ >
+ )
+}
diff --git a/apps/web-roo-code/src/app/page.tsx b/apps/web-roo-code/src/app/page.tsx
index bafef64936e..51b798f0bf5 100644
--- a/apps/web-roo-code/src/app/page.tsx
+++ b/apps/web-roo-code/src/app/page.tsx
@@ -3,7 +3,6 @@
import { getVSCodeDownloads } from "@/lib/stats"
import { Button } from "@/components/ui"
-import { AnimatedText } from "@/components/animated-text"
import {
AnimatedBackground,
InstallSection,
@@ -12,6 +11,8 @@ import {
FAQSection,
CodeExample,
} from "@/components/homepage"
+import { EXTERNAL_LINKS } from "@/lib/constants"
+import { ArrowRight } from "lucide-react"
// Invalidate cache when a request comes in, at most once every hour.
export const revalidate = 3600
@@ -21,28 +22,18 @@ export default async function Home() {
return (
<>
-
+
-
-
-
+
+
+
-
- Your
-
- AI-Powered
-
- Dev Team, in Your Editor
-
- and Beyond
-
+
+ An entire AI-powered dev team. In your editor and beyond.
- Supercharge your software development with AI that{" "}
-
- understands your codebase
- {" "}
- and helps you write, refactor, and debug with ease in your editor and in the cloud.
+ Roo's model-agnostic, specialized modes and fine-grained auto-approval controls
+ give you the tools (and the confidence) to get AI working for you.
+ Yes! The Roo Code VS Code extension is open source and free forever. The extension acts
+ as a powerful AI coding assistant right in your editor. These are the prices for Roo
+ Code Cloud.
+
+
+
+
Is there a free trial?
+
+ Yes, all paid plans come with a 14-day free trial.
+
+
+
+
Do I need a credit card for the free trial?
+
+ Yes, but you won't be charged until your trial ends. You can cancel anytime with
+ one click .
+
+
+
+
What payment methods do you accept?
+
+ We accept all major credit cards, debit cards, and can arrange invoice billing for
+ Enterprise customers.
+
+
+
+
Can I change plans anytime?
+
+ Yes, you can upgrade or downgrade your plan at any time. Changes will be reflected in
+ your next billing cycle.
+
+
+
+
+
+
+ Still have questions?{" "}
+
+ Join our Discord
+ {" "}
+ or{" "}
+
+ contact our sales team
+
+
This Privacy Policy explains how Roo Code, Inc. ("Roo Code," "we,"
@@ -86,8 +86,8 @@ export default function Privacy() {
Your source code does not transit Roo Code servers unless you explicitly choose Roo Code
as a model provider (proxy mode).
{" "}
- When Roo Code Cloud is your model provider, your code briefly transits Roo Code servers only to
- forward it to the upstream model, is not stored, and is deleted immediately after
+ When Roo Code Cloud is your model provider, your code briefly transits Roo Code servers only
+ to forward it to the upstream model, is not stored, and is deleted immediately after
forwarding. Otherwise, your code is sent directly—via client‑to‑provider
TLS—to the model you select. Roo Code never stores, inspects, or trains on your code.
@@ -184,6 +184,13 @@ export default function Privacy() {
Send product updates and roadmap communications (opt‑out available)
+
+ Send onboarding, educational, and promotional communications. We may use
+ your account information (such as your name and email address) to send you onboarding
+ messages, product tutorials, feature announcements, newsletters, and other marketing
+ communications. You can opt out of non‑transactional emails at any time (see “Your Choices”
+ below).
+
3. Where Your Data Goes (And Doesn't)
@@ -277,6 +284,12 @@ export default function Privacy() {
Delete your Cloud account at any time from{" "}
Security Settings inside Roo Code Cloud.
+
+ Marketing communications: You can unsubscribe from marketing and
+ promotional emails by clicking the unsubscribe link in those emails. Transactional or
+ service‑related emails (such as password resets, billing notices, or security alerts) will
+ continue even if you opt out.
+
6. Security Practices
diff --git a/apps/web-roo-code/src/components/chromes/footer.tsx b/apps/web-roo-code/src/components/chromes/footer.tsx
index b6a17cebe57..11b883d9ce2 100644
--- a/apps/web-roo-code/src/components/chromes/footer.tsx
+++ b/apps/web-roo-code/src/components/chromes/footer.tsx
@@ -64,52 +64,52 @@ export function Footer() {
- )
-}
diff --git a/apps/web-roo-code/src/components/homepage/features.tsx b/apps/web-roo-code/src/components/homepage/features.tsx
index 4c71946d805..a0415c04f79 100644
--- a/apps/web-roo-code/src/components/homepage/features.tsx
+++ b/apps/web-roo-code/src/components/homepage/features.tsx
@@ -1,71 +1,48 @@
"use client"
import { motion } from "framer-motion"
-import { Bot, Code, Brain, Wrench, Terminal, Puzzle, Globe, Shield, Zap } from "lucide-react"
-import { FeaturesMobile } from "./features-mobile"
-
-import { ReactNode } from "react"
+import { Brain, Shield, Users2, ReplaceAll, Keyboard, LucideIcon, CheckCheck } from "lucide-react"
export interface Feature {
- icon: ReactNode
+ icon: LucideIcon
title: string
description: string
}
export const features: Feature[] = [
{
- icon: ,
- title: "Your AI Dev Team in VS Code",
- description:
- "Roo Code puts a team of agentic AI assistants directly in your editor, with the power to plan, write, and fix code across multiple files.",
- },
- {
- icon: ,
- title: "Multiple Specialized Modes",
- description:
- "From coding to debugging to architecture, Roo Code has a mode for every dev scenario—just switch on the fly.",
- },
- {
- icon: ,
- title: "Deep Project-wide Context",
+ icon: Users2,
+ title: "Specialized Modes",
description:
- "Roo Code reads your entire codebase, preserving valid code through diff-based edits for seamless multi-file refactors.",
+ "Planning, Architecture, Debugging and beyond: Roo's modes stay on-task and deliver. Create your own modes or download from the marketplace.",
},
{
- icon: ,
- title: "Open-Source and Model-Agnostic",
- description:
- "Bring your own model or use local AI—no vendor lock-in. Roo Code is free, open, and adaptable to your needs.",
+ icon: ReplaceAll,
+ title: "Model-Agnostic",
+ description: "Bring your own model key or use local inference — no markup, lock-in, no restrictions.",
},
{
- icon: ,
- title: "Guarded Command Execution",
- description:
- "Approve or deny commands as needed. Roo Code automates your dev workflow while keeping oversight firmly in your hands.",
- },
- {
- icon: ,
- title: "Fully Customizable",
- description:
- "Create or tweak modes, define usage rules, and shape Roo Code's behavior precisely—your code, your way.",
+ icon: CheckCheck,
+ title: "Granular auto-approval",
+ description: "Control each action and make Roo as autonomous as you want as you build confidence. Or go YOLO.",
},
{
- icon: ,
- title: "Automated Browser Actions",
+ icon: Keyboard,
+ title: "Highly Customizable",
description:
- "Seamlessly test and verify your web app directly from VS Code—Roo Code can open a browser, run checks, and more.",
+ "Fine-tune settings for Roo to work for you, like inference context, model properties, slash commands and more.",
},
{
- icon: ,
- title: "Secure by Design",
+ icon: Brain,
+ title: "Deep Project-wide Context",
description:
- "Security-first from the ground up, Roo Code meets rigorous standards without slowing you down. Monitoring and strict policies keep your code safe at scale.",
+ "Roo Code reads your entire codebase, preserving valid code through diff-based edits for seamless multi-file refactors.",
},
{
- icon: ,
- title: "Seamless Setup and Workflows",
+ icon: Shield,
+ title: "Secure and Private by Design",
description:
- "Get started in minutes—no heavy configs. Roo Code fits alongside your existing tools and dev flow, while supercharging your productivity.",
+ "Open source and local-first. No code leaves your machine unless you say so. SOC 2 Type II compliant.",
},
]
@@ -81,21 +58,6 @@ export function Features() {
},
}
- const itemVariants = {
- hidden: {
- opacity: 0,
- y: 20,
- },
- visible: {
- opacity: 1,
- y: 0,
- transition: {
- duration: 0.6,
- ease: [0.21, 0.45, 0.27, 0.9],
- },
- },
- }
-
const backgroundVariants = {
hidden: {
opacity: 0,
@@ -118,11 +80,11 @@ export function Features() {
viewport={{ once: true }}
variants={backgroundVariants}>
-
+
-
+
- Powerful features for modern developers.
+ Power and flexibility to get stuff done.
- Everything you need to build faster and write better code.
+ The features you need to build, debug and ship faster – without compromising quality.
diff --git a/apps/web-roo-code/src/components/homepage/index.ts b/apps/web-roo-code/src/components/homepage/index.ts
index 192c155473b..9d4427448e6 100644
--- a/apps/web-roo-code/src/components/homepage/index.ts
+++ b/apps/web-roo-code/src/components/homepage/index.ts
@@ -2,9 +2,7 @@ export * from "./animated-background"
export * from "./code-example"
export * from "./company-logos"
export * from "./faq-section"
-export * from "./features-mobile"
export * from "./features"
export * from "./install-section"
-export * from "./testimonials-mobile"
export * from "./testimonials"
export * from "./whats-new-button"
diff --git a/apps/web-roo-code/src/components/homepage/install-section.tsx b/apps/web-roo-code/src/components/homepage/install-section.tsx
index 5da3a7d4ae8..96404b47969 100644
--- a/apps/web-roo-code/src/components/homepage/install-section.tsx
+++ b/apps/web-roo-code/src/components/homepage/install-section.tsx
@@ -46,12 +46,13 @@ export function InstallSection({ downloads }: InstallSectionProps) {
{/* Updated h2 to match other sections */}
-
- Install Roo Code — Open & Flexible
+
+ Install Roo Code now
- Roo Code is open-source, model-agnostic, and developer-focused. Install from the VS Code
- Marketplace or the CLI in minutes, then bring your own AI model.
+ Install from the VSCode Marketplace or the CLI in minutes, then bring your own AI model.
+
+ Roo Code is also compatible with all VSCode forks.
diff --git a/apps/web-roo-code/src/lib/constants.ts b/apps/web-roo-code/src/lib/constants.ts
index 3b9798926cf..c474481805c 100644
--- a/apps/web-roo-code/src/lib/constants.ts
+++ b/apps/web-roo-code/src/lib/constants.ts
@@ -24,7 +24,8 @@ export const EXTERNAL_LINKS = {
OFFICE_HOURS_PODCAST: "https://www.youtube.com/@RooCodeYT/podcasts",
FAQ: "https://roocode.com/#faq",
TESTIMONIALS: "https://roocode.com/#testimonials",
- CLOUD_APP: "https://app.roocode.com",
+ CLOUD_APP_LOGIN: "https://app.roocode.com/sign-in",
+ CLOUD_APP_SIGNUP: "https://app.roocode.com/sign-up",
}
export const INTERNAL_LINKS = {
diff --git a/apps/web-roo-code/src/lib/seo.ts b/apps/web-roo-code/src/lib/seo.ts
index 962662bb22c..7dfad0550a2 100644
--- a/apps/web-roo-code/src/lib/seo.ts
+++ b/apps/web-roo-code/src/lib/seo.ts
@@ -3,15 +3,15 @@ const SITE_URL = process.env.NEXT_PUBLIC_SITE_URL ?? "https://roocode.com"
export const SEO = {
url: SITE_URL,
name: "Roo Code",
- title: "Roo Code – Your AI-Powered Dev Team in VS Code",
+ title: "Roo Code – Your AI-Powered Dev Team in VS Code and Beyond",
description:
"Roo Code puts an entire AI dev team right in your editor, outpacing closed tools with deep project-wide context, multi-step agentic coding, and unmatched developer-centric flexibility.",
locale: "en_US",
ogImage: {
- url: "/android-chrome-512x512.png",
- width: 512,
- height: 512,
- alt: "Roo Code Logo",
+ url: "/opengraph.png",
+ width: 1200,
+ height: 600,
+ alt: "Roo Code",
},
keywords: [
"Roo Code",
diff --git a/packages/cloud/src/CloudService.ts b/packages/cloud/src/CloudService.ts
index ce9e34de8ce..1ca13430a59 100644
--- a/packages/cloud/src/CloudService.ts
+++ b/packages/cloud/src/CloudService.ts
@@ -8,6 +8,7 @@ import type {
AuthService,
SettingsService,
CloudUserInfo,
+ CloudOrganizationMembership,
OrganizationAllowList,
OrganizationSettings,
ShareVisibility,
@@ -170,9 +171,9 @@ export class CloudService extends EventEmitter implements Di
// AuthService
- public async login(): Promise {
+ public async login(landingPageSlug?: string): Promise {
this.ensureInitialized()
- return this.authService!.login()
+ return this.authService!.login(landingPageSlug)
}
public async logout(): Promise {
@@ -242,6 +243,21 @@ export class CloudService extends EventEmitter implements Di
return this.authService!.handleCallback(code, state, organizationId)
}
+ public async switchOrganization(organizationId: string | null): Promise {
+ this.ensureInitialized()
+
+ // Perform the organization switch
+ // StaticTokenAuthService will throw an error if organization switching is not supported
+ await this.authService!.switchOrganization(organizationId)
+ }
+
+ public async getOrganizationMemberships(): Promise {
+ this.ensureInitialized()
+
+ // StaticTokenAuthService will throw an error if organization memberships are not supported
+ return await this.authService!.getOrganizationMemberships()
+ }
+
// SettingsService
public getAllowList(): OrganizationAllowList {
diff --git a/packages/cloud/src/StaticTokenAuthService.ts b/packages/cloud/src/StaticTokenAuthService.ts
index 6630a4a2e01..97ce6eac590 100644
--- a/packages/cloud/src/StaticTokenAuthService.ts
+++ b/packages/cloud/src/StaticTokenAuthService.ts
@@ -63,6 +63,14 @@ export class StaticTokenAuthService extends EventEmitter impl
throw new Error("Authentication methods are disabled in StaticTokenAuthService")
}
+ public async switchOrganization(_organizationId: string | null): Promise {
+ throw new Error("Authentication methods are disabled in StaticTokenAuthService")
+ }
+
+ public async getOrganizationMemberships(): Promise {
+ throw new Error("Authentication methods are disabled in StaticTokenAuthService")
+ }
+
public getState(): AuthState {
return this.state
}
diff --git a/packages/cloud/src/WebAuthService.ts b/packages/cloud/src/WebAuthService.ts
index 934ca90b71d..6e9c76b4632 100644
--- a/packages/cloud/src/WebAuthService.ts
+++ b/packages/cloud/src/WebAuthService.ts
@@ -141,7 +141,8 @@ export class WebAuthService extends EventEmitter implements A
if (
this.credentials === null ||
this.credentials.clientToken !== credentials.clientToken ||
- this.credentials.sessionId !== credentials.sessionId
+ this.credentials.sessionId !== credentials.sessionId ||
+ this.credentials.organizationId !== credentials.organizationId
) {
this.transitionToAttemptingSession(credentials)
}
@@ -174,6 +175,7 @@ export class WebAuthService extends EventEmitter implements A
this.changeState("attempting-session")
+ this.timer.stop()
this.timer.start()
}
@@ -248,8 +250,10 @@ export class WebAuthService extends EventEmitter implements A
*
* This method initiates the authentication flow by generating a state parameter
* and opening the browser to the authorization URL.
+ *
+ * @param landingPageSlug Optional slug of a specific landing page (e.g., "supernova", "special-offer", etc.)
*/
- public async login(): Promise {
+ public async login(landingPageSlug?: string): Promise {
try {
const vscode = await importVscode()
@@ -267,11 +271,17 @@ export class WebAuthService extends EventEmitter implements A
state,
auth_redirect: `${vscode.env.uriScheme}://${publisher}.${name}`,
})
- const url = `${getRooCodeApiUrl()}/extension/sign-in?${params.toString()}`
+
+ // Use landing page URL if slug is provided, otherwise use default sign-in URL
+ const url = landingPageSlug
+ ? `${getRooCodeApiUrl()}/l/${landingPageSlug}?${params.toString()}`
+ : `${getRooCodeApiUrl()}/extension/sign-in?${params.toString()}`
+
await vscode.env.openExternal(vscode.Uri.parse(url))
} catch (error) {
- this.log(`[auth] Error initiating Roo Code Cloud auth: ${error}`)
- throw new Error(`Failed to initiate Roo Code Cloud authentication: ${error}`)
+ const context = landingPageSlug ? ` (landing page: ${landingPageSlug})` : ""
+ this.log(`[auth] Error initiating Roo Code Cloud auth${context}: ${error}`)
+ throw new Error(`Failed to initiate Roo Code Cloud authentication${context}: ${error}`)
}
}
@@ -461,6 +471,42 @@ export class WebAuthService extends EventEmitter implements A
return this.credentials?.organizationId || null
}
+ /**
+ * Switch to a different organization context
+ * @param organizationId The organization ID to switch to, or null for personal account
+ */
+ public async switchOrganization(organizationId: string | null): Promise {
+ if (!this.credentials) {
+ throw new Error("Cannot switch organization: not authenticated")
+ }
+
+ // Update the stored credentials with the new organization ID
+ const updatedCredentials: AuthCredentials = {
+ ...this.credentials,
+ organizationId: organizationId,
+ }
+
+ // Store the updated credentials, handleCredentialsChange will handle the update
+ await this.storeCredentials(updatedCredentials)
+ }
+
+ /**
+ * Get all organization memberships for the current user
+ * @returns Array of organization memberships
+ */
+ public async getOrganizationMemberships(): Promise {
+ if (!this.credentials) {
+ return []
+ }
+
+ try {
+ return await this.clerkGetOrganizationMemberships()
+ } catch (error) {
+ this.log(`[auth] Failed to get organization memberships: ${error}`)
+ return []
+ }
+ }
+
private async clerkSignIn(ticket: string): Promise {
const formData = new URLSearchParams()
formData.append("strategy", "ticket")
@@ -645,9 +691,14 @@ export class WebAuthService extends EventEmitter implements A
}
private async clerkGetOrganizationMemberships(): Promise {
+ if (!this.credentials) {
+ this.log("[auth] Cannot get organization memberships: missing credentials")
+ return []
+ }
+
const response = await fetch(`${getClerkBaseUrl()}/v1/me/organization_memberships`, {
headers: {
- Authorization: `Bearer ${this.credentials!.clientToken}`,
+ Authorization: `Bearer ${this.credentials.clientToken}`,
"User-Agent": this.userAgent(),
},
signal: AbortSignal.timeout(10000),
diff --git a/packages/evals/Dockerfile.web b/packages/evals/Dockerfile.web
index 4c6a8e0258c..c578955232e 100644
--- a/packages/evals/Dockerfile.web
+++ b/packages/evals/Dockerfile.web
@@ -60,5 +60,5 @@ RUN chmod +x /usr/local/bin/entrypoint.sh
ENV DATABASE_URL=postgresql://postgres:password@db:5432/evals_development
ENV REDIS_URL=redis://redis:6379
-EXPOSE 3000
+EXPOSE 3446
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
diff --git a/packages/evals/README.md b/packages/evals/README.md
index 750454956f8..8a54e56b819 100644
--- a/packages/evals/README.md
+++ b/packages/evals/README.md
@@ -29,7 +29,7 @@ Start the evals service:
pnpm evals
```
-The initial build process can take a minute or two. Upon success you should see output indicating that a web service is running on localhost:3000:
+The initial build process can take a minute or two. Upon success you should see output indicating that a web service is running on localhost:3446:
Additionally, you'll find in Docker Desktop that database and redis services are running:
@@ -95,7 +95,7 @@ By default, the evals system uses the following ports:
- **PostgreSQL**: 5433 (external) → 5432 (internal)
- **Redis**: 6380 (external) → 6379 (internal)
-- **Web Service**: 3446 (external) → 3000 (internal)
+- **Web Service**: 3446 (external) → 3446 (internal)
These ports are configured to avoid conflicts with other services that might be running on the standard PostgreSQL (5432) and Redis (6379) ports.
diff --git a/packages/evals/docker-compose.yml b/packages/evals/docker-compose.yml
index 74c25cf2609..5928b531142 100644
--- a/packages/evals/docker-compose.yml
+++ b/packages/evals/docker-compose.yml
@@ -52,7 +52,7 @@ services:
context: ../../
dockerfile: packages/evals/Dockerfile.web
ports:
- - "${EVALS_WEB_PORT:-3446}:3000"
+ - "${EVALS_WEB_PORT:-3446}:3446"
environment:
- HOST_EXECUTION_METHOD=docker
volumes:
diff --git a/packages/evals/scripts/setup.sh b/packages/evals/scripts/setup.sh
index cca6f9ce954..f4ba30ce795 100755
--- a/packages/evals/scripts/setup.sh
+++ b/packages/evals/scripts/setup.sh
@@ -12,7 +12,6 @@ build_extension() {
echo "🔨 Building the Roo Code extension..."
pnpm -w vsix -- --out ../bin/roo-code-$(git rev-parse --short HEAD).vsix || exit 1
code --install-extension ../../bin/roo-code-$(git rev-parse --short HEAD).vsix || exit 1
- cd evals
}
check_docker_services() {
@@ -377,7 +376,7 @@ fi
echo -e "\n🚀 You're ready to rock and roll! \n"
-if ! nc -z localhost 3000; then
+if ! nc -z localhost 3446; then
read -p "🌐 Would you like to start the evals web app? (Y/n): " start_evals
if [[ "$start_evals" =~ ^[Yy]|^$ ]]; then
@@ -386,5 +385,5 @@ if ! nc -z localhost 3000; then
echo "💡 You can start it anytime with 'pnpm --filter @roo-code/web-evals dev'."
fi
else
- echo "👟 The evals web app is running at http://localhost:3000 (or http://localhost:3446 if using Docker)"
+ echo "👟 The evals web app is running at http://localhost:3446"
fi
diff --git a/packages/types/src/__tests__/telemetry.test.ts b/packages/types/src/__tests__/telemetry.test.ts
new file mode 100644
index 00000000000..ace7333798d
--- /dev/null
+++ b/packages/types/src/__tests__/telemetry.test.ts
@@ -0,0 +1,12 @@
+import { taskPropertiesSchema } from "../telemetry.js"
+
+describe("taskPropertiesSchema", () => {
+ it("allows codex-cli apiProvider", () => {
+ const result = taskPropertiesSchema.parse({
+ taskId: "test-task",
+ apiProvider: "codex-cli",
+ })
+
+ expect(result.apiProvider).toBe("codex-cli")
+ })
+})
diff --git a/packages/types/src/cloud.ts b/packages/types/src/cloud.ts
index 7ffb28ae5d6..903dfcb93fd 100644
--- a/packages/types/src/cloud.ts
+++ b/packages/types/src/cloud.ts
@@ -239,9 +239,10 @@ export interface AuthService extends EventEmitter {
broadcast(): void
// Authentication methods
- login(): Promise
+ login(landingPageSlug?: string): Promise
logout(): Promise
handleCallback(code: string | null, state: string | null, organizationId?: string | null): Promise
+ switchOrganization(organizationId: string | null): Promise
// State methods
getState(): AuthState
@@ -253,6 +254,9 @@ export interface AuthService extends EventEmitter {
getSessionToken(): string | undefined
getUserInfo(): CloudUserInfo | null
getStoredOrganizationId(): string | null
+
+ // Organization management
+ getOrganizationMemberships(): Promise
}
/**
diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts
index 7e79855f7e1..a56a00fc355 100644
--- a/packages/types/src/global-settings.ts
+++ b/packages/types/src/global-settings.ts
@@ -147,6 +147,7 @@ export const globalSettingsSchema = z.object({
enhancementApiConfigId: z.string().optional(),
includeTaskHistoryInEnhance: z.boolean().optional(),
historyPreviewCollapsed: z.boolean().optional(),
+ reasoningBlockCollapsed: z.boolean().optional(),
profileThresholds: z.record(z.string(), z.number()).optional(),
hasOpenedModeSelector: z.boolean().optional(),
lastModeExportPath: z.string().optional(),
diff --git a/packages/types/src/message.ts b/packages/types/src/message.ts
index b6eb67e1714..77c055c6e15 100644
--- a/packages/types/src/message.ts
+++ b/packages/types/src/message.ts
@@ -89,6 +89,7 @@ export function isResumableAsk(ask: ClineAsk): ask is ResumableAsk {
*/
export const interactiveAsks = [
+ "followup",
"command",
"tool",
"browser_action_launch",
diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts
index fd327657b67..18fc082a14b 100644
--- a/packages/types/src/provider-settings.ts
+++ b/packages/types/src/provider-settings.ts
@@ -25,6 +25,7 @@ import {
vscodeLlmModels,
xaiModels,
internationalZAiModels,
+ openAiNativeCodexModels,
} from "./providers/index.js"
/**
@@ -132,6 +133,7 @@ export const providerNames = [
"mistral",
"moonshot",
"openai-native",
+ "openai-native-codex",
"qwen-code",
"roo",
"sambanova",
@@ -258,6 +260,7 @@ const ollamaSchema = baseProviderSettingsSchema.extend({
ollamaModelId: z.string().optional(),
ollamaBaseUrl: z.string().optional(),
ollamaApiKey: z.string().optional(),
+ ollamaNumCtx: z.number().int().min(128).optional(),
})
const vsCodeLmSchema = baseProviderSettingsSchema.extend({
@@ -298,6 +301,11 @@ const openAiNativeSchema = apiModelIdProviderModelSchema.extend({
openAiNativeServiceTier: serviceTierSchema.optional(),
})
+// ChatGPT Codex (auth.json) variant - uses local OAuth credentials file (path)
+const openAiNativeCodexSchema = apiModelIdProviderModelSchema.extend({
+ openAiNativeCodexOauthPath: z.string().optional(),
+})
+
const mistralSchema = apiModelIdProviderModelSchema.extend({
mistralApiKey: z.string().optional(),
mistralCodestralUrl: z.string().optional(),
@@ -429,6 +437,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
geminiSchema.merge(z.object({ apiProvider: z.literal("gemini") })),
geminiCliSchema.merge(z.object({ apiProvider: z.literal("gemini-cli") })),
openAiNativeSchema.merge(z.object({ apiProvider: z.literal("openai-native") })),
+ openAiNativeCodexSchema.merge(z.object({ apiProvider: z.literal("openai-native-codex") })),
mistralSchema.merge(z.object({ apiProvider: z.literal("mistral") })),
deepSeekSchema.merge(z.object({ apiProvider: z.literal("deepseek") })),
deepInfraSchema.merge(z.object({ apiProvider: z.literal("deepinfra") })),
@@ -470,6 +479,7 @@ export const providerSettingsSchema = z.object({
...geminiSchema.shape,
...geminiCliSchema.shape,
...openAiNativeSchema.shape,
+ ...openAiNativeCodexSchema.shape,
...mistralSchema.shape,
...deepSeekSchema.shape,
...deepInfraSchema.shape,
@@ -553,6 +563,7 @@ export const modelIdKeysByProvider: Record = {
bedrock: "apiModelId",
vertex: "apiModelId",
"openai-native": "openAiModelId",
+ "openai-native-codex": "apiModelId",
ollama: "ollamaModelId",
lmstudio: "lmStudioModelId",
gemini: "apiModelId",
@@ -675,6 +686,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "OpenAI",
models: Object.keys(openAiNativeModels),
},
+ "openai-native-codex": {
+ id: "openai-native-codex",
+ label: "OpenAI (ChatGPT Codex)",
+ models: Object.keys(openAiNativeCodexModels),
+ },
"qwen-code": { id: "qwen-code", label: "Qwen Code", models: Object.keys(qwenCodeModels) },
roo: { id: "roo", label: "Roo", models: Object.keys(rooModels) },
sambanova: {
diff --git a/packages/types/src/providers/chutes.ts b/packages/types/src/providers/chutes.ts
index 15dea58263b..d05bd489b1b 100644
--- a/packages/types/src/providers/chutes.ts
+++ b/packages/types/src/providers/chutes.ts
@@ -29,6 +29,7 @@ export type ChutesModelId =
| "tngtech/DeepSeek-R1T-Chimera"
| "zai-org/GLM-4.5-Air"
| "zai-org/GLM-4.5-FP8"
+ | "zai-org/GLM-4.5-turbo"
| "moonshotai/Kimi-K2-Instruct-75k"
| "moonshotai/Kimi-K2-Instruct-0905"
| "Qwen/Qwen3-235B-A22B-Thinking-2507"
@@ -274,6 +275,15 @@ export const chutesModels = {
description:
"GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture.",
},
+ "zai-org/GLM-4.5-turbo": {
+ maxTokens: 32768,
+ contextWindow: 131072,
+ supportsImages: false,
+ supportsPromptCache: false,
+ inputPrice: 1,
+ outputPrice: 3,
+ description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference.",
+ },
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": {
maxTokens: 32768,
contextWindow: 262144,
diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts
index 21e43aaa99a..0ebe0a45739 100644
--- a/packages/types/src/providers/index.ts
+++ b/packages/types/src/providers/index.ts
@@ -18,6 +18,7 @@ export * from "./mistral.js"
export * from "./moonshot.js"
export * from "./ollama.js"
export * from "./openai.js"
+export * from "./openai-codex.js"
export * from "./openrouter.js"
export * from "./qwen-code.js"
export * from "./requesty.js"
diff --git a/packages/types/src/providers/openai-codex.ts b/packages/types/src/providers/openai-codex.ts
new file mode 100644
index 00000000000..3c8fac74a31
--- /dev/null
+++ b/packages/types/src/providers/openai-codex.ts
@@ -0,0 +1,47 @@
+import type { ModelInfo } from "../model.js"
+
+export type OpenAiNativeCodexModelId = keyof typeof openAiNativeCodexModels
+
+export const openAiNativeCodexDefaultModelId: OpenAiNativeCodexModelId = "gpt-5"
+
+export const openAiNativeCodexModels = {
+ "gpt-5": {
+ maxTokens: 128000,
+ contextWindow: 400000,
+ supportsImages: true,
+ supportsPromptCache: true,
+ supportsReasoningEffort: true,
+ reasoningEffort: "medium",
+ description: "GPT-5 via ChatGPT Responses (Codex). Optimized for coding and agentic tasks.",
+ supportsTemperature: false,
+ },
+ "gpt-5-codex": {
+ maxTokens: 128000,
+ contextWindow: 400000,
+ supportsImages: true,
+ supportsPromptCache: true,
+ supportsReasoningEffort: true,
+ reasoningEffort: "medium",
+ description:
+ "GPT-5 Codex via ChatGPT Responses (Codex). A GPT‑5 variant exposed to the client with coding‑oriented defaults.",
+ supportsTemperature: false,
+ },
+ "codex-mini-latest": {
+ // Based on OpenAI's Codex CLI page (fast reasoning model tuned from o4-mini)
+ maxTokens: 100000,
+ contextWindow: 200000,
+ supportsImages: true, // input images supported
+ supportsPromptCache: true,
+ supportsReasoningEffort: true,
+ reasoningEffort: "medium",
+ description:
+ "codex-mini-latest via ChatGPT Responses (Codex). Fast reasoning model optimized for the Codex CLI (fine‑tuned o4‑mini).",
+ supportsTemperature: false,
+ // Pricing per 1M tokens
+ inputPrice: 1.5,
+ outputPrice: 6.0,
+ // Prompt cache pricing
+ cacheWritesPrice: 1.5,
+ cacheReadsPrice: 0.375,
+ },
+} as const satisfies Record
diff --git a/packages/types/src/providers/openai.ts b/packages/types/src/providers/openai.ts
index 028027baad6..a3eed1b57ce 100644
--- a/packages/types/src/providers/openai.ts
+++ b/packages/types/src/providers/openai.ts
@@ -70,6 +70,20 @@ export const openAiNativeModels = {
supportsTemperature: false,
tiers: [{ name: "flex", contextWindow: 400000, inputPrice: 0.025, outputPrice: 0.2, cacheReadsPrice: 0.0025 }],
},
+ "gpt-5-codex": {
+ maxTokens: 128000,
+ contextWindow: 400000,
+ supportsImages: true,
+ supportsPromptCache: true,
+ supportsReasoningEffort: true,
+ reasoningEffort: "medium",
+ inputPrice: 1.25,
+ outputPrice: 10.0,
+ cacheReadsPrice: 0.13,
+ description: "GPT-5-Codex: A version of GPT-5 optimized for agentic coding in Codex",
+ supportsVerbosity: true,
+ supportsTemperature: false,
+ },
"gpt-4.1": {
maxTokens: 32_768,
contextWindow: 1_047_576,
diff --git a/packages/types/src/providers/roo.ts b/packages/types/src/providers/roo.ts
index ee84bbe1b1b..01fae43cd57 100644
--- a/packages/types/src/providers/roo.ts
+++ b/packages/types/src/providers/roo.ts
@@ -1,7 +1,10 @@
import type { ModelInfo } from "../model.js"
-// Roo provider with single model
-export type RooModelId = "xai/grok-code-fast-1"
+export type RooModelId =
+ | "xai/grok-code-fast-1"
+ | "roo/code-supernova"
+ | "xai/grok-4-fast"
+ | "deepseek/deepseek-chat-v3.1"
export const rooDefaultModelId: RooModelId = "xai/grok-code-fast-1"
@@ -16,4 +19,34 @@ export const rooModels = {
description:
"A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)",
},
+ "roo/code-supernova": {
+ maxTokens: 16_384,
+ contextWindow: 200_000,
+ supportsImages: true,
+ supportsPromptCache: true,
+ inputPrice: 0,
+ outputPrice: 0,
+ description:
+ "A versatile agentic coding stealth model that supports image inputs, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by the model provider and used to improve the model.)",
+ },
+ "xai/grok-4-fast": {
+ maxTokens: 30_000,
+ contextWindow: 2_000_000,
+ supportsImages: false,
+ supportsPromptCache: false,
+ inputPrice: 0,
+ outputPrice: 0,
+ description:
+ "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. (Note: prompts and completions are logged by xAI and used to improve the model.)",
+ },
+ "deepseek/deepseek-chat-v3.1": {
+ maxTokens: 16_384,
+ contextWindow: 163_840,
+ supportsImages: false,
+ supportsPromptCache: false,
+ inputPrice: 0,
+ outputPrice: 0,
+ description:
+ "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active). It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference.",
+ },
} as const satisfies Record
diff --git a/packages/types/src/providers/sambanova.ts b/packages/types/src/providers/sambanova.ts
index bed143f6e5c..f339d8bcab0 100644
--- a/packages/types/src/providers/sambanova.ts
+++ b/packages/types/src/providers/sambanova.ts
@@ -6,10 +6,12 @@ export type SambaNovaModelId =
| "Meta-Llama-3.3-70B-Instruct"
| "DeepSeek-R1"
| "DeepSeek-V3-0324"
+ | "DeepSeek-V3.1"
| "DeepSeek-R1-Distill-Llama-70B"
| "Llama-4-Maverick-17B-128E-Instruct"
| "Llama-3.3-Swallow-70B-Instruct-v0.4"
| "Qwen3-32B"
+ | "gpt-oss-120b"
export const sambaNovaDefaultModelId: SambaNovaModelId = "Meta-Llama-3.3-70B-Instruct"
@@ -51,6 +53,15 @@ export const sambaNovaModels = {
outputPrice: 4.5,
description: "DeepSeek V3 model with 32K context window.",
},
+ "DeepSeek-V3.1": {
+ maxTokens: 8192,
+ contextWindow: 32768,
+ supportsImages: false,
+ supportsPromptCache: false,
+ inputPrice: 3.0,
+ outputPrice: 4.5,
+ description: "DeepSeek V3.1 model with 32K context window.",
+ },
"DeepSeek-R1-Distill-Llama-70B": {
maxTokens: 8192,
contextWindow: 131072,
@@ -87,4 +98,13 @@ export const sambaNovaModels = {
outputPrice: 0.8,
description: "Alibaba Qwen 3 32B model with 8K context window.",
},
+ "gpt-oss-120b": {
+ maxTokens: 8192,
+ contextWindow: 131072,
+ supportsImages: false,
+ supportsPromptCache: false,
+ inputPrice: 0.22,
+ outputPrice: 0.59,
+ description: "OpenAI gpt oss 120b model with 128k context window.",
+ },
} as const satisfies Record
diff --git a/packages/types/src/single-file-read-models.ts b/packages/types/src/single-file-read-models.ts
index b83a781507c..302b8d42023 100644
--- a/packages/types/src/single-file-read-models.ts
+++ b/packages/types/src/single-file-read-models.ts
@@ -10,5 +10,5 @@
* @returns true if the model should use single file reads
*/
export function shouldUseSingleFileRead(modelId: string): boolean {
- return modelId.includes("grok-code-fast-1")
+ return modelId.includes("grok-code-fast-1") || modelId.includes("code-supernova")
}
diff --git a/packages/types/src/telemetry.ts b/packages/types/src/telemetry.ts
index bba14857225..df921550122 100644
--- a/packages/types/src/telemetry.ts
+++ b/packages/types/src/telemetry.ts
@@ -61,6 +61,11 @@ export enum TelemetryEventName {
ACCOUNT_LOGOUT_CLICKED = "Account Logout Clicked",
ACCOUNT_LOGOUT_SUCCESS = "Account Logout Success",
+ FEATURED_PROVIDER_CLICKED = "Featured Provider Clicked",
+
+ UPSELL_DISMISSED = "Upsell Dismissed",
+ UPSELL_CLICKED = "Upsell Clicked",
+
SCHEMA_VALIDATION_ERROR = "Schema Validation Error",
DIFF_APPLICATION_ERROR = "Diff Application Error",
SHELL_INTEGRATION_ERROR = "Shell Integration Error",
@@ -106,7 +111,8 @@ export type AppProperties = z.infer
export const taskPropertiesSchema = z.object({
taskId: z.string().optional(),
- apiProvider: z.enum(providerNames).optional(),
+ // Allow Codex CLI harness identifier in addition to standard provider names.
+ apiProvider: z.union([z.enum(providerNames), z.literal("codex-cli")]).optional(),
modelId: z.string().optional(),
diffStrategy: z.string().optional(),
isSubtask: z.boolean().optional(),
@@ -181,6 +187,9 @@ export const rooCodeTelemetryEventSchema = z.discriminatedUnion("type", [
TelemetryEventName.ACCOUNT_CONNECT_SUCCESS,
TelemetryEventName.ACCOUNT_LOGOUT_CLICKED,
TelemetryEventName.ACCOUNT_LOGOUT_SUCCESS,
+ TelemetryEventName.FEATURED_PROVIDER_CLICKED,
+ TelemetryEventName.UPSELL_DISMISSED,
+ TelemetryEventName.UPSELL_CLICKED,
TelemetryEventName.SCHEMA_VALIDATION_ERROR,
TelemetryEventName.DIFF_APPLICATION_ERROR,
TelemetryEventName.SHELL_INTEGRATION_ERROR,
diff --git a/packages/types/src/vscode.ts b/packages/types/src/vscode.ts
index 28385146908..d22ebdab229 100644
--- a/packages/types/src/vscode.ts
+++ b/packages/types/src/vscode.ts
@@ -53,6 +53,7 @@ export const commandIds = [
"focusInput",
"acceptInput",
"focusPanel",
+ "toggleAutoApprove",
] as const
export type CommandId = (typeof commandIds)[number]
diff --git a/releases/3.28.4-release.png b/releases/3.28.4-release.png
new file mode 100644
index 00000000000..ea1e82a8dda
Binary files /dev/null and b/releases/3.28.4-release.png differ
diff --git a/releases/3.28.5-release.png b/releases/3.28.5-release.png
new file mode 100644
index 00000000000..0a22c25c40d
Binary files /dev/null and b/releases/3.28.5-release.png differ
diff --git a/releases/3.28.6-release.png b/releases/3.28.6-release.png
new file mode 100644
index 00000000000..e246cffb014
Binary files /dev/null and b/releases/3.28.6-release.png differ
diff --git a/releases/3.28.7-release.png b/releases/3.28.7-release.png
new file mode 100644
index 00000000000..d4690f19c99
Binary files /dev/null and b/releases/3.28.7-release.png differ
diff --git a/releases/3.28.8-release.png b/releases/3.28.8-release.png
new file mode 100644
index 00000000000..8fcfa224538
Binary files /dev/null and b/releases/3.28.8-release.png differ
diff --git a/scripts/find-missing-translations.js b/scripts/find-missing-translations.js
index 9277d935ba0..fe7577408e1 100755
--- a/scripts/find-missing-translations.js
+++ b/scripts/find-missing-translations.js
@@ -7,12 +7,16 @@
* Options:
* --locale= Only check a specific locale (e.g. --locale=fr)
* --file= Only check a specific file (e.g. --file=chat.json)
- * --area= Only check a specific area (core, webview, or both)
+ * --area= Only check a specific area (core, webview, package-nls, or all)
* --help Show this help message
*/
-const fs = require("fs")
const path = require("path")
+const { promises: fs } = require("fs")
+
+const readFile = fs.readFile
+const readdir = fs.readdir
+const stat = fs.stat
// Process command line arguments
const args = process.argv.slice(2).reduce(
@@ -26,15 +30,15 @@ const args = process.argv.slice(2).reduce(
} else if (arg.startsWith("--area=")) {
acc.area = arg.split("=")[1]
// Validate area value
- if (!["core", "webview", "both"].includes(acc.area)) {
- console.error(`Error: Invalid area '${acc.area}'. Must be 'core', 'webview', or 'both'.`)
+ if (!["core", "webview", "package-nls", "all"].includes(acc.area)) {
+ console.error(`Error: Invalid area '${acc.area}'. Must be 'core', 'webview', 'package-nls', or 'all'.`)
process.exit(1)
}
}
return acc
},
- { area: "both" },
-) // Default to checking both areas
+ { area: "all" },
+) // Default to checking all areas
// Show help if requested
if (args.help) {
@@ -50,10 +54,11 @@ Usage:
Options:
--locale= Only check a specific locale (e.g. --locale=fr)
--file= Only check a specific file (e.g. --file=chat.json)
- --area= Only check a specific area (core, webview, or both)
+ --area= Only check a specific area (core, webview, package-nls, or all)
'core' = Backend (src/i18n/locales)
'webview' = Frontend UI (webview-ui/src/i18n/locales)
- 'both' = Check both areas (default)
+ 'package-nls' = VSCode package.nls.json files
+ 'all' = Check all areas (default)
--help Show this help message
Output:
@@ -69,7 +74,7 @@ const LOCALES_DIRS = {
}
// Determine which areas to check based on args
-const areasToCheck = args.area === "both" ? ["core", "webview"] : [args.area]
+const areasToCheck = args.area === "all" ? ["core", "webview", "package-nls"] : [args.area]
// Recursively find all keys in an object
function findKeys(obj, parentKey = "") {
@@ -105,18 +110,45 @@ function getValueAtPath(obj, path) {
return current
}
+// Shared utility to safely parse JSON files with error handling
+async function parseJsonFile(filePath) {
+ try {
+ const content = await readFile(filePath, "utf8")
+ return JSON.parse(content)
+ } catch (error) {
+ if (error.code === "ENOENT") {
+ return null // File doesn't exist
+ }
+ throw new Error(`Error parsing JSON file '${filePath}': ${error.message}`)
+ }
+}
+
+// Validate that a JSON object has a flat structure (no nested objects)
+function validateFlatStructure(obj, filePath) {
+ for (const [key, value] of Object.entries(obj)) {
+ if (typeof value === "object" && value !== null) {
+ console.error(`Error: ${filePath} should be a flat JSON structure. Found nested object at key '${key}'`)
+ process.exit(1)
+ }
+ }
+}
+
// Function to check translations for a specific area
-function checkAreaTranslations(area) {
+async function checkAreaTranslations(area) {
const LOCALES_DIR = LOCALES_DIRS[area]
// Get all locale directories (or filter to the specified locale)
- const allLocales = fs.readdirSync(LOCALES_DIR).filter((item) => {
- const stats = fs.statSync(path.join(LOCALES_DIR, item))
- return stats.isDirectory() && item !== "en" // Exclude English as it's our source
- })
+ const dirContents = await readdir(LOCALES_DIR)
+ const allLocales = await Promise.all(
+ dirContents.map(async (item) => {
+ const stats = await stat(path.join(LOCALES_DIR, item))
+ return stats.isDirectory() && item !== "en" ? item : null
+ }),
+ )
+ const filteredLocales = allLocales.filter(Boolean)
// Filter to the specified locale if provided
- const locales = args.locale ? allLocales.filter((locale) => locale === args.locale) : allLocales
+ const locales = args.locale ? filteredLocales.filter((locale) => locale === args.locale) : filteredLocales
if (args.locale && locales.length === 0) {
console.error(`Error: Locale '${args.locale}' not found in ${LOCALES_DIR}`)
@@ -129,7 +161,8 @@ function checkAreaTranslations(area) {
// Get all English JSON files
const englishDir = path.join(LOCALES_DIR, "en")
- let englishFiles = fs.readdirSync(englishDir).filter((file) => file.endsWith(".json") && !file.startsWith("."))
+ const englishDirContents = await readdir(englishDir)
+ let englishFiles = englishDirContents.filter((file) => file.endsWith(".json") && !file.startsWith("."))
// Filter to the specified file if provided
if (args.file) {
@@ -140,81 +173,201 @@ function checkAreaTranslations(area) {
englishFiles = englishFiles.filter((file) => file === args.file)
}
- // Load file contents
- let englishFileContents
-
- try {
- englishFileContents = englishFiles.map((file) => ({
- name: file,
- content: JSON.parse(fs.readFileSync(path.join(englishDir, file), "utf8")),
- }))
- } catch (e) {
- console.error(`Error: File '${englishDir}' is not a valid JSON file`)
- process.exit(1)
- }
+ // Load file contents in parallel
+ const englishFileContents = await Promise.all(
+ englishFiles.map(async (file) => {
+ const filePath = path.join(englishDir, file)
+ const content = await parseJsonFile(filePath)
+ if (!content) {
+ console.error(`Error: Could not read file '${filePath}'`)
+ process.exit(1)
+ }
+ return { name: file, content }
+ }),
+ )
console.log(
`Checking ${englishFileContents.length} translation file(s): ${englishFileContents.map((f) => f.name).join(", ")}`,
)
+ // Precompute English keys per file
+ const englishFileKeys = new Map(englishFileContents.map((f) => [f.name, findKeys(f.content)]))
+
// Results object to store missing translations
const missingTranslations = {}
- // For each locale, check for missing translations
- for (const locale of locales) {
- missingTranslations[locale] = {}
+ // Process all locales in parallel
+ await Promise.all(
+ locales.map(async (locale) => {
+ missingTranslations[locale] = {}
+
+ // Process all files for this locale in parallel
+ await Promise.all(
+ englishFileContents.map(async ({ name, content: englishContent }) => {
+ const localeFilePath = path.join(LOCALES_DIR, locale, name)
+
+ // Check if the file exists in the locale
+ const localeContent = await parseJsonFile(localeFilePath)
+ if (!localeContent) {
+ missingTranslations[locale][name] = { file: "File is missing entirely" }
+ return
+ }
+
+ // Find all keys in the English file
+ const englishKeys = englishFileKeys.get(name) || []
+
+ // Check for missing keys in the locale file
+ const missingKeys = []
+
+ for (const key of englishKeys) {
+ const englishValue = getValueAtPath(englishContent, key)
+ const localeValue = getValueAtPath(localeContent, key)
+
+ if (localeValue === undefined) {
+ missingKeys.push({
+ key,
+ englishValue,
+ })
+ }
+ }
+
+ if (missingKeys.length > 0) {
+ missingTranslations[locale][name] = missingKeys
+ }
+ }),
+ )
+ }),
+ )
+
+ return { missingTranslations, hasMissingTranslations: outputResults(missingTranslations, area) }
+}
+
+// Function to output results for an area
+function outputResults(missingTranslations, area) {
+ let hasMissingTranslations = false
+
+ console.log(`\n${area === "core" ? "BACKEND" : "FRONTEND"} Missing Translations Report:\n`)
+
+ for (const [locale, files] of Object.entries(missingTranslations)) {
+ if (Object.keys(files).length === 0) {
+ console.log(`✅ ${locale}: No missing translations`)
+ continue
+ }
- for (const { name, content: englishContent } of englishFileContents) {
- const localeFilePath = path.join(LOCALES_DIR, locale, name)
+ hasMissingTranslations = true
+ console.log(`📝 ${locale}:`)
- // Check if the file exists in the locale
- if (!fs.existsSync(localeFilePath)) {
- missingTranslations[locale][name] = { file: "File is missing entirely" }
+ for (const [fileName, missingItems] of Object.entries(files)) {
+ if (missingItems.file) {
+ console.log(` - ${fileName}: ${missingItems.file}`)
continue
}
- // Load the locale file
- let localeContent
+ console.log(` - ${fileName}: ${missingItems.length} missing translations`)
+
+ for (const { key, englishValue } of missingItems) {
+ console.log(` ${key}: "${englishValue}"`)
+ }
+ }
+
+ console.log("")
+ }
+
+ return hasMissingTranslations
+}
+
+// Function to check package.nls.json translations
+async function checkPackageNlsTranslations() {
+ const SRC_DIR = path.join(__dirname, "../src")
+
+ // Read the base package.nls.json file
+ const baseFilePath = path.join(SRC_DIR, "package.nls.json")
+ const baseContent = await parseJsonFile(baseFilePath)
+
+ if (!baseContent) {
+ console.warn(`Warning: Base package.nls.json not found at ${baseFilePath} - skipping package.nls checks`)
+ return { missingTranslations: {}, hasMissingTranslations: false }
+ }
+
+ // Validate that the base file has a flat structure
+ validateFlatStructure(baseContent, baseFilePath)
+
+ // Get all package.nls.*.json files
+ const srcDirContents = await readdir(SRC_DIR)
+ const nlsFiles = srcDirContents
+ .filter((file) => file.startsWith("package.nls.") && file.endsWith(".json"))
+ .filter((file) => file !== "package.nls.json") // Exclude the base file
+
+ // Filter to the specified locale if provided
+ const filesToCheck = args.locale
+ ? nlsFiles.filter((file) => {
+ const locale = file.replace("package.nls.", "").replace(".json", "")
+ return locale === args.locale
+ })
+ : nlsFiles
+
+ if (args.locale && filesToCheck.length === 0) {
+ console.error(`Error: Locale '${args.locale}' not found in package.nls files`)
+ process.exit(1)
+ }
- try {
- localeContent = JSON.parse(fs.readFileSync(localeFilePath, "utf8"))
- } catch (e) {
- console.error(`Error: File '${localeFilePath}' is not a valid JSON file`)
+ console.log(
+ `\nPACKAGE.NLS - Checking ${filesToCheck.length} locale file(s): ${filesToCheck.map((f) => f.replace("package.nls.", "").replace(".json", "")).join(", ")}`,
+ )
+ console.log(`Checking against base package.nls.json with ${Object.keys(baseContent).length} keys`)
+
+ // Results object to store missing translations
+ const missingTranslations = {}
+
+ // Get all keys from the base file (package.nls files are flat, not nested)
+ const baseKeys = Object.keys(baseContent)
+
+ // Process all locale files in parallel
+ await Promise.all(
+ filesToCheck.map(async (file) => {
+ const locale = file.replace("package.nls.", "").replace(".json", "")
+ const localeFilePath = path.join(SRC_DIR, file)
+
+ const localeContent = await parseJsonFile(localeFilePath)
+ if (!localeContent) {
+ console.error(`Error: Could not read file '${localeFilePath}'`)
process.exit(1)
}
- // Find all keys in the English file
- const englishKeys = findKeys(englishContent)
+ // Validate that the locale file has a flat structure
+ validateFlatStructure(localeContent, localeFilePath)
- // Check for missing keys in the locale file
+ // Check for missing keys
const missingKeys = []
- for (const key of englishKeys) {
- const englishValue = getValueAtPath(englishContent, key)
- const localeValue = getValueAtPath(localeContent, key)
+ for (const key of baseKeys) {
+ const baseValue = baseContent[key]
+ const localeValue = localeContent[key]
if (localeValue === undefined) {
missingKeys.push({
key,
- englishValue,
+ englishValue: baseValue,
})
}
}
if (missingKeys.length > 0) {
- missingTranslations[locale][name] = missingKeys
+ missingTranslations[locale] = {
+ "package.nls.json": missingKeys,
+ }
}
- }
- }
+ }),
+ )
- return { missingTranslations, hasMissingTranslations: outputResults(missingTranslations, area) }
+ return { missingTranslations, hasMissingTranslations: outputPackageNlsResults(missingTranslations) }
}
-// Function to output results for an area
-function outputResults(missingTranslations, area) {
+// Function to output package.nls results
+function outputPackageNlsResults(missingTranslations) {
let hasMissingTranslations = false
- console.log(`\n${area === "core" ? "BACKEND" : "FRONTEND"} Missing Translations Report:\n`)
+ console.log(`\nPACKAGE.NLS Missing Translations Report:\n`)
for (const [locale, files] of Object.entries(missingTranslations)) {
if (Object.keys(files).length === 0) {
@@ -226,11 +379,6 @@ function outputResults(missingTranslations, area) {
console.log(`📝 ${locale}:`)
for (const [fileName, missingItems] of Object.entries(files)) {
- if (missingItems.file) {
- console.log(` - ${fileName}: ${missingItems.file}`)
- continue
- }
-
console.log(` - ${fileName}: ${missingItems.length} missing translations`)
for (const { key, englishValue } of missingItems) {
@@ -245,7 +393,7 @@ function outputResults(missingTranslations, area) {
}
// Main function to find missing translations
-function findMissingTranslations() {
+async function findMissingTranslations() {
try {
console.log("Starting translation check...")
@@ -253,8 +401,13 @@ function findMissingTranslations() {
// Check each requested area
for (const area of areasToCheck) {
- const { hasMissingTranslations } = checkAreaTranslations(area)
- anyAreaMissingTranslations = anyAreaMissingTranslations || hasMissingTranslations
+ if (area === "package-nls") {
+ const { hasMissingTranslations } = await checkPackageNlsTranslations()
+ anyAreaMissingTranslations = anyAreaMissingTranslations || hasMissingTranslations
+ } else {
+ const { hasMissingTranslations } = await checkAreaTranslations(area)
+ anyAreaMissingTranslations = anyAreaMissingTranslations || hasMissingTranslations
+ }
}
// Summary
diff --git a/src/activate/registerCommands.ts b/src/activate/registerCommands.ts
index fac615edf11..41c127333d8 100644
--- a/src/activate/registerCommands.ts
+++ b/src/activate/registerCommands.ts
@@ -221,6 +221,18 @@ const getCommandsMap = ({ context, outputChannel, provider }: RegisterCommandOpt
visibleProvider.postMessageToWebview({ type: "acceptInput" })
},
+ toggleAutoApprove: async () => {
+ const visibleProvider = getVisibleProviderOrLog(outputChannel)
+
+ if (!visibleProvider) {
+ return
+ }
+
+ visibleProvider.postMessageToWebview({
+ type: "action",
+ action: "toggleAutoApprove",
+ })
+ },
})
export const openClineInNewTab = async ({ context, outputChannel }: Omit) => {
diff --git a/src/api/index.ts b/src/api/index.ts
index ac009676762..655ff35b442 100644
--- a/src/api/index.ts
+++ b/src/api/index.ts
@@ -16,6 +16,7 @@ import {
LmStudioHandler,
GeminiHandler,
OpenAiNativeHandler,
+ OpenAiNativeCodexHandler,
DeepSeekHandler,
MoonshotHandler,
MistralHandler,
@@ -115,6 +116,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new GeminiHandler(options)
case "openai-native":
return new OpenAiNativeHandler(options)
+ case "openai-native-codex":
+ return new OpenAiNativeCodexHandler(options)
case "deepseek":
return new DeepSeekHandler(options)
case "doubao":
diff --git a/src/api/providers/__tests__/chutes.spec.ts b/src/api/providers/__tests__/chutes.spec.ts
index 398f86ce608..70ee06a923c 100644
--- a/src/api/providers/__tests__/chutes.spec.ts
+++ b/src/api/providers/__tests__/chutes.spec.ts
@@ -253,6 +253,28 @@ describe("ChutesHandler", () => {
)
})
+ it("should return zai-org/GLM-4.5-turbo model with correct configuration", () => {
+ const testModelId: ChutesModelId = "zai-org/GLM-4.5-turbo"
+ const handlerWithModel = new ChutesHandler({
+ apiModelId: testModelId,
+ chutesApiKey: "test-chutes-api-key",
+ })
+ const model = handlerWithModel.getModel()
+ expect(model.id).toBe(testModelId)
+ expect(model.info).toEqual(
+ expect.objectContaining({
+ maxTokens: 32768,
+ contextWindow: 131072,
+ supportsImages: false,
+ supportsPromptCache: false,
+ inputPrice: 1,
+ outputPrice: 3,
+ description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference.",
+ temperature: 0.5, // Default temperature for non-DeepSeek models
+ }),
+ )
+ })
+
it("should return Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8 model with correct configuration", () => {
const testModelId: ChutesModelId = "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8"
const handlerWithModel = new ChutesHandler({
diff --git a/src/api/providers/__tests__/native-ollama.spec.ts b/src/api/providers/__tests__/native-ollama.spec.ts
index f8792937dbc..4ddeb909bb6 100644
--- a/src/api/providers/__tests__/native-ollama.spec.ts
+++ b/src/api/providers/__tests__/native-ollama.spec.ts
@@ -73,6 +73,61 @@ describe("NativeOllamaHandler", () => {
expect(results[2]).toEqual({ type: "usage", inputTokens: 10, outputTokens: 2 })
})
+ it("should not include num_ctx by default", async () => {
+ // Mock the chat response
+ mockChat.mockImplementation(async function* () {
+ yield { message: { content: "Response" } }
+ })
+
+ const stream = handler.createMessage("System", [{ role: "user" as const, content: "Test" }])
+
+ // Consume the stream
+ for await (const _ of stream) {
+ // consume stream
+ }
+
+ // Verify that num_ctx was NOT included in the options
+ expect(mockChat).toHaveBeenCalledWith(
+ expect.objectContaining({
+ options: expect.not.objectContaining({
+ num_ctx: expect.anything(),
+ }),
+ }),
+ )
+ })
+
+ it("should include num_ctx when explicitly set via ollamaNumCtx", async () => {
+ const options: ApiHandlerOptions = {
+ apiModelId: "llama2",
+ ollamaModelId: "llama2",
+ ollamaBaseUrl: "http://localhost:11434",
+ ollamaNumCtx: 8192, // Explicitly set num_ctx
+ }
+
+ handler = new NativeOllamaHandler(options)
+
+ // Mock the chat response
+ mockChat.mockImplementation(async function* () {
+ yield { message: { content: "Response" } }
+ })
+
+ const stream = handler.createMessage("System", [{ role: "user" as const, content: "Test" }])
+
+ // Consume the stream
+ for await (const _ of stream) {
+ // consume stream
+ }
+
+ // Verify that num_ctx was included with the specified value
+ expect(mockChat).toHaveBeenCalledWith(
+ expect.objectContaining({
+ options: expect.objectContaining({
+ num_ctx: 8192,
+ }),
+ }),
+ )
+ })
+
it("should handle DeepSeek R1 models with reasoning detection", async () => {
const options: ApiHandlerOptions = {
apiModelId: "deepseek-r1",
@@ -120,6 +175,49 @@ describe("NativeOllamaHandler", () => {
})
expect(result).toBe("This is the response")
})
+
+ it("should not include num_ctx in completePrompt by default", async () => {
+ mockChat.mockResolvedValue({
+ message: { content: "Response" },
+ })
+
+ await handler.completePrompt("Test prompt")
+
+ // Verify that num_ctx was NOT included in the options
+ expect(mockChat).toHaveBeenCalledWith(
+ expect.objectContaining({
+ options: expect.not.objectContaining({
+ num_ctx: expect.anything(),
+ }),
+ }),
+ )
+ })
+
+ it("should include num_ctx in completePrompt when explicitly set", async () => {
+ const options: ApiHandlerOptions = {
+ apiModelId: "llama2",
+ ollamaModelId: "llama2",
+ ollamaBaseUrl: "http://localhost:11434",
+ ollamaNumCtx: 4096, // Explicitly set num_ctx
+ }
+
+ handler = new NativeOllamaHandler(options)
+
+ mockChat.mockResolvedValue({
+ message: { content: "Response" },
+ })
+
+ await handler.completePrompt("Test prompt")
+
+ // Verify that num_ctx was included with the specified value
+ expect(mockChat).toHaveBeenCalledWith(
+ expect.objectContaining({
+ options: expect.objectContaining({
+ num_ctx: 4096,
+ }),
+ }),
+ )
+ })
})
describe("error handling", () => {
diff --git a/src/api/providers/__tests__/roo.spec.ts b/src/api/providers/__tests__/roo.spec.ts
index 5897156b0a0..d4affa2beaf 100644
--- a/src/api/providers/__tests__/roo.spec.ts
+++ b/src/api/providers/__tests__/roo.spec.ts
@@ -36,26 +36,12 @@ vitest.mock("openai", () => {
return {
[Symbol.asyncIterator]: async function* () {
yield {
- choices: [
- {
- delta: { content: "Test response" },
- index: 0,
- },
- ],
+ choices: [{ delta: { content: "Test response" }, index: 0 }],
usage: null,
}
yield {
- choices: [
- {
- delta: {},
- index: 0,
- },
- ],
- usage: {
- prompt_tokens: 10,
- completion_tokens: 5,
- total_tokens: 15,
- },
+ choices: [{ delta: {}, index: 0 }],
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 },
}
},
}
@@ -73,6 +59,7 @@ const mockHasInstance = vitest.fn()
// Create mock functions that we can control
const mockGetSessionTokenFn = vitest.fn()
const mockHasInstanceFn = vitest.fn()
+const mockOnFn = vitest.fn()
vitest.mock("@roo-code/cloud", () => ({
CloudService: {
@@ -82,6 +69,8 @@ vitest.mock("@roo-code/cloud", () => ({
authService: {
getSessionToken: () => mockGetSessionTokenFn(),
},
+ on: vitest.fn(),
+ off: vitest.fn(),
}
},
},
@@ -409,11 +398,18 @@ describe("RooHandler", () => {
it("should handle undefined auth service gracefully", () => {
mockHasInstanceFn.mockReturnValue(true)
// Mock CloudService with undefined authService
- const originalGetter = Object.getOwnPropertyDescriptor(CloudService, "instance")?.get
+ const originalGetSessionToken = mockGetSessionTokenFn.getMockImplementation()
+
+ // Temporarily make authService return undefined
+ mockGetSessionTokenFn.mockImplementation(() => undefined)
try {
Object.defineProperty(CloudService, "instance", {
- get: () => ({ authService: undefined }),
+ get: () => ({
+ authService: undefined,
+ on: vitest.fn(),
+ off: vitest.fn(),
+ }),
configurable: true,
})
@@ -424,12 +420,11 @@ describe("RooHandler", () => {
const handler = new RooHandler(mockOptions)
expect(handler).toBeInstanceOf(RooHandler)
} finally {
- // Always restore original getter, even if test fails
- if (originalGetter) {
- Object.defineProperty(CloudService, "instance", {
- get: originalGetter,
- configurable: true,
- })
+ // Restore original mock implementation
+ if (originalGetSessionToken) {
+ mockGetSessionTokenFn.mockImplementation(originalGetSessionToken)
+ } else {
+ mockGetSessionTokenFn.mockReturnValue("test-session-token")
}
}
})
diff --git a/src/api/providers/fetchers/__tests__/glama.spec.ts b/src/api/providers/fetchers/__tests__/glama.spec.ts
new file mode 100644
index 00000000000..4e2256e19e8
--- /dev/null
+++ b/src/api/providers/fetchers/__tests__/glama.spec.ts
@@ -0,0 +1,113 @@
+import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"
+
+vi.mock("axios", () => {
+ const get = vi.fn()
+ return {
+ default: { get },
+ get,
+ isAxiosError: (error: unknown) => Boolean((error as { isAxiosError?: boolean })?.isAxiosError),
+ }
+})
+
+import axios from "axios"
+
+import { DEFAULT_HEADERS } from "../../constants"
+import { getGlamaModels } from "../glama"
+
+const axiosMock = axios as unknown as { get: ReturnType }
+
+const sampleResponse = [
+ {
+ id: "anthropic/claude-3.5-sonnet",
+ capabilities: ["input:image", "caching"],
+ maxTokensInput: 200000,
+ maxTokensOutput: null,
+ maxCompletionTokens: 4096,
+ pricePerToken: {
+ input: "0.0000016",
+ output: "0.000004",
+ cacheWrite: "0.0000003",
+ cacheRead: "0.00000003",
+ },
+ },
+]
+
+describe("getGlamaModels", () => {
+ beforeEach(() => {
+ axiosMock.get.mockReset()
+ })
+
+ afterEach(() => {
+ vi.restoreAllMocks()
+ vi.useRealTimers()
+ })
+
+ it("fetches and normalises models", async () => {
+ axiosMock.get.mockResolvedValue({ data: sampleResponse })
+
+ const models = await getGlamaModels()
+
+ expect(axiosMock.get).toHaveBeenCalledWith(
+ "https://glama.ai/api/gateway/v1/models",
+ expect.objectContaining({ headers: DEFAULT_HEADERS }),
+ )
+
+ const model = models["anthropic/claude-3.5-sonnet"]
+ expect(model).toBeDefined()
+ expect(model).toMatchObject({
+ maxTokens: 4096,
+ contextWindow: 200000,
+ supportsImages: true,
+ supportsPromptCache: true,
+ outputPrice: 4,
+ cacheWritesPrice: 0.3,
+ cacheReadsPrice: 0.03,
+ })
+ expect(model?.inputPrice).toBeCloseTo(1.6)
+ })
+
+ it("retries before succeeding", async () => {
+ vi.useFakeTimers()
+ const error = Object.assign(new Error("ECONNRESET"), {
+ isAxiosError: true,
+ toJSON: () => ({ code: "ECONNRESET" }),
+ })
+ axiosMock.get.mockRejectedValueOnce(error).mockResolvedValue({ data: sampleResponse })
+
+ const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
+ const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
+
+ const promise = getGlamaModels()
+
+ expect(axiosMock.get).toHaveBeenCalledTimes(1)
+ await vi.runAllTimersAsync()
+ const models = await promise
+
+ expect(axiosMock.get).toHaveBeenCalledTimes(2)
+ expect(models).toHaveProperty("anthropic/claude-3.5-sonnet")
+ expect(warnSpy).toHaveBeenCalled()
+ expect(errorSpy).not.toHaveBeenCalled()
+ })
+
+ it("returns empty object after max retries", async () => {
+ vi.useFakeTimers()
+ const error = Object.assign(new Error("ECONNRESET"), {
+ isAxiosError: true,
+ toJSON: () => ({ code: "ECONNRESET" }),
+ })
+ axiosMock.get.mockRejectedValue(error)
+
+ const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
+ const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
+
+ const promise = getGlamaModels()
+ await vi.runAllTimersAsync()
+
+ const models = await promise
+
+ expect(axiosMock.get).toHaveBeenCalledTimes(3)
+ expect(models).toEqual({})
+ expect(warnSpy).toHaveBeenCalledTimes(2)
+ expect(errorSpy).toHaveBeenCalledTimes(1)
+ })
+})
diff --git a/src/api/providers/fetchers/glama.ts b/src/api/providers/fetchers/glama.ts
index 9fd57e2c680..e3a9e8fc159 100644
--- a/src/api/providers/fetchers/glama.ts
+++ b/src/api/providers/fetchers/glama.ts
@@ -1,43 +1,64 @@
-import axios from "axios"
+import axios, { isAxiosError } from "axios"
+import https from "https"
import type { ModelInfo } from "@roo-code/types"
import { parseApiPrice } from "../../../shared/cost"
+import { DEFAULT_HEADERS } from "../constants"
+
+const GLAMA_MODELS_URL = "https://glama.ai/api/gateway/v1/models"
+const MAX_ATTEMPTS = 3
+const BASE_RETRY_DELAY_MS = 500
+
+const glamaAgent = new https.Agent({ keepAlive: true })
+
+const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms))
export async function getGlamaModels(): Promise> {
- const models: Record = {}
-
- try {
- const response = await axios.get("https://glama.ai/api/gateway/v1/models")
- const rawModels = response.data
-
- for (const rawModel of rawModels) {
- const modelInfo: ModelInfo = {
- maxTokens: rawModel.maxTokensOutput,
- contextWindow: rawModel.maxTokensInput,
- supportsImages: rawModel.capabilities?.includes("input:image"),
- supportsComputerUse: rawModel.capabilities?.includes("computer_use"),
- supportsPromptCache: rawModel.capabilities?.includes("caching"),
- inputPrice: parseApiPrice(rawModel.pricePerToken?.input),
- outputPrice: parseApiPrice(rawModel.pricePerToken?.output),
- description: undefined,
- cacheWritesPrice: parseApiPrice(rawModel.pricePerToken?.cacheWrite),
- cacheReadsPrice: parseApiPrice(rawModel.pricePerToken?.cacheRead),
- }
+ for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
+ try {
+ const response = await axios.get(GLAMA_MODELS_URL, {
+ headers: DEFAULT_HEADERS,
+ httpsAgent: glamaAgent,
+ timeout: 15_000,
+ })
+ const rawModels = response.data ?? []
+ const models: Record = {}
- switch (rawModel.id) {
- case rawModel.id.startsWith("anthropic/"):
- modelInfo.maxTokens = 8192
- break
- default:
- break
+ for (const rawModel of rawModels) {
+ const modelInfo: ModelInfo = {
+ maxTokens: rawModel.maxTokensOutput ?? rawModel.maxCompletionTokens,
+ contextWindow: rawModel.maxTokensInput,
+ supportsImages: rawModel.capabilities?.includes("input:image"),
+ supportsComputerUse: rawModel.capabilities?.includes("computer_use"),
+ supportsPromptCache: rawModel.capabilities?.includes("caching"),
+ inputPrice: parseApiPrice(rawModel.pricePerToken?.input),
+ outputPrice: parseApiPrice(rawModel.pricePerToken?.output),
+ description: undefined,
+ cacheWritesPrice: parseApiPrice(rawModel.pricePerToken?.cacheWrite),
+ cacheReadsPrice: parseApiPrice(rawModel.pricePerToken?.cacheRead),
+ }
+
+ if (typeof rawModel.id === "string" && rawModel.id.startsWith("anthropic/")) {
+ // Glama omits maxTokens for some Anthropic entries; default to 8K to match provider docs.
+ modelInfo.maxTokens = modelInfo.maxTokens ?? 8192
+ }
+
+ models[rawModel.id] = modelInfo
}
- models[rawModel.id] = modelInfo
+ return models
+ } catch (error) {
+ const loggable = isAxiosError(error) ? error.toJSON() : error
+ const prefix = `[getGlamaModels] Attempt ${attempt} failed`
+ if (attempt < MAX_ATTEMPTS) {
+ console.warn(prefix, loggable)
+ await sleep(BASE_RETRY_DELAY_MS * attempt)
+ } else {
+ console.error(`${prefix}; giving up`, loggable)
+ }
}
- } catch (error) {
- console.error(`Error fetching Glama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
}
- return models
+ return {}
}
diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts
index 775d763a05f..573adda879e 100644
--- a/src/api/providers/gemini.ts
+++ b/src/api/providers/gemini.ts
@@ -286,10 +286,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
outputTokens: number
cacheReadTokens?: number
}) {
- if (!info.inputPrice || !info.outputPrice || !info.cacheReadsPrice) {
- return undefined
- }
-
+ // For models with tiered pricing, prices might only be defined in tiers
let inputPrice = info.inputPrice
let outputPrice = info.outputPrice
let cacheReadsPrice = info.cacheReadsPrice
@@ -306,6 +303,16 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
}
}
+ // Check if we have the required prices after considering tiers
+ if (!inputPrice || !outputPrice) {
+ return undefined
+ }
+
+ // cacheReadsPrice is optional - if not defined, treat as 0
+ if (!cacheReadsPrice) {
+ cacheReadsPrice = 0
+ }
+
// Subtract the cached input tokens from the total input tokens.
const uncachedInputTokens = inputTokens - cacheReadTokens
diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts
index 85d877b6bc7..a65de3f6f58 100644
--- a/src/api/providers/index.ts
+++ b/src/api/providers/index.ts
@@ -19,6 +19,7 @@ export { LmStudioHandler } from "./lm-studio"
export { MistralHandler } from "./mistral"
export { OllamaHandler } from "./ollama"
export { OpenAiNativeHandler } from "./openai-native"
+export { OpenAiNativeCodexHandler } from "./openai-native-codex"
export { OpenAiHandler } from "./openai"
export { OpenRouterHandler } from "./openrouter"
export { QwenCodeHandler } from "./qwen-code"
diff --git a/src/api/providers/native-ollama.ts b/src/api/providers/native-ollama.ts
index 80231540e8e..83a5c7b36ea 100644
--- a/src/api/providers/native-ollama.ts
+++ b/src/api/providers/native-ollama.ts
@@ -8,6 +8,11 @@ import { getOllamaModels } from "./fetchers/ollama"
import { XmlMatcher } from "../../utils/xml-matcher"
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
+interface OllamaChatOptions {
+ temperature: number
+ num_ctx?: number
+}
+
function convertToOllamaMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): Message[] {
const ollamaMessages: Message[] = []
@@ -184,15 +189,22 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
)
try {
+ // Build options object conditionally
+ const chatOptions: OllamaChatOptions = {
+ temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
+ }
+
+ // Only include num_ctx if explicitly set via ollamaNumCtx
+ if (this.options.ollamaNumCtx !== undefined) {
+ chatOptions.num_ctx = this.options.ollamaNumCtx
+ }
+
// Create the actual API request promise
const stream = await client.chat({
model: modelId,
messages: ollamaMessages,
stream: true,
- options: {
- num_ctx: modelInfo.contextWindow,
- temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
- },
+ options: chatOptions,
})
let totalInputTokens = 0
@@ -274,13 +286,21 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
const { id: modelId } = await this.fetchModel()
const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
+ // Build options object conditionally
+ const chatOptions: OllamaChatOptions = {
+ temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
+ }
+
+ // Only include num_ctx if explicitly set via ollamaNumCtx
+ if (this.options.ollamaNumCtx !== undefined) {
+ chatOptions.num_ctx = this.options.ollamaNumCtx
+ }
+
const response = await client.chat({
model: modelId,
messages: [{ role: "user", content: prompt }],
stream: false,
- options: {
- temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
- },
+ options: chatOptions,
})
return response.message?.content || ""
diff --git a/src/api/providers/openai-native-codex.prompt.ts b/src/api/providers/openai-native-codex.prompt.ts
new file mode 100644
index 00000000000..27096dc6c3a
--- /dev/null
+++ b/src/api/providers/openai-native-codex.prompt.ts
@@ -0,0 +1,161 @@
+/**
+ * ChatGPT Codex system prompt (canonical/inert):
+ * - The Responses API applies an immutable default system prompt server‑side.
+ * - We cannot replace it dynamically; this file supplies the canonical text used for the top‑level "instructions".
+ * Strategy:
+ * - We complement this with a separate system-role injection in the provider using
+ * (to de-emphasize defaults) and (the current task).
+ * - See OpenAiNativeCodexHandler.createMessage for details and rationale.
+ */
+export default `You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
+
+## General
+
+- The arguments to \`shell\` will be passed to execvp(). Most terminal commands should be prefixed with ["bash", "-lc"].
+- Always set the \`workdir\` param when using the shell function. Do not use \`cd\` unless absolutely necessary.
+- When searching for text or files, prefer using \`rg\` or \`rg --files\` respectively because \`rg\` is much faster than alternatives like \`grep\`. (If the \`rg\` command is not found, then use alternatives.)
+
+## Editing constraints
+
+- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them.
+- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare.
+- You may be in a dirty git worktree.
+ * NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user.
+ * If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes.
+ * If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them.
+ * If the changes are in unrelated files, just ignore them and don't revert them.
+- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed.
+
+## Plan tool
+
+When using the planning tool:
+- Skip using the planning tool for straightforward tasks (roughly the easiest 25%).
+- Do not make single-step plans.
+- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
+
+## Codex CLI harness, sandboxing, and approvals
+
+The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
+
+Filesystem sandboxing defines which files can be read or written. The options for \`sandbox_mode\` are:
+- **read-only**: The sandbox only permits reading files.
+- **workspace-write**: The sandbox permits reading files, and editing files in \`cwd\` and \`writable_roots\`. Editing files in other directories requires approval.
+- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
+
+Network sandboxing defines whether network can be accessed without approval. Options for \`network_access\` are:
+- **restricted**: Requires approval
+- **enabled**: No approval needed
+
+Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for \`approval_policy\` are
+- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
+- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
+- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the \`shell\` command description.)
+- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with \`danger-full-access\`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
+
+When you are running with \`approval_policy == on-request\`, and sandboxing enabled, here are scenarios where you'll need to request approval:
+- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
+- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
+- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
+- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the \`with_escalated_permissions\` and \`justification\` parameters - do not message the user before requesting approval for the command.
+- You are about to take a potentially destructive action such as an \`rm\` or \`git reset\` that the user did not explicitly ask for
+- (for all of these, you should weigh alternative paths that do not require approval)
+
+When \`sandbox_mode\` is set to read-only, you'll need to request approval for any command that isn't a read.
+
+You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
+
+Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
+
+When requesting approval to execute a command that will require escalated privileges:
+ - Provide the \`with_escalated_permissions\` parameter with the boolean value true
+ - Include a short, 1 sentence explanation for why you need to enable \`with_escalated_permissions\` in the justification parameter
+
+## Special user requests
+
+- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as \`date\`), you should do so.
+- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
+
+## Presenting your work and final message
+
+You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value.
+
+- Default: be very concise; friendly coding teammate tone.
+- Ask only when needed; suggest ideas; mirror the user's style.
+- For substantial work, summarize clearly; follow final‑answer formatting.
+- Skip heavy formatting for simple confirmations.
+- Don't dump large files you've written; reference paths only.
+- No "save/copy this file" - User is on the same machine.
+- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something.
+- For code changes:
+ * Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in.
+ * If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps.
+ * When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number.
+- The user does not command execution outputs. When asked to show the output of a command (e.g. \`git show\`), relay the important details in your answer or summarize the key lines so the user understands the result.
+
+### Final answer structure and style guidelines
+
+- Plain text; CLI handles styling. Use structure only when it helps scanability.
+- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help.
+- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent.
+- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **.
+- Code samples or multi-line snippets should be wrapped in fenced code blocks; add a language hint whenever obvious.
+- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task.
+- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording.
+- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers.
+- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets.
+- File References: When referencing files in your response, make sure to include the relevant start line and always follow the below rules:
+ * Use inline code to make file paths clickable.
+ * Each reference should have a stand alone path. Even if it's the same file.
+ * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix.
+ * Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1).
+ * Do not use URIs like file://, vscode://, or https://.
+ * Do not provide range of lines
+ * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\\repo\\project\\main.rs:12:5
+`
+
+/**
+ * Rationale for overridePrompt:
+ * - Codex’s default system prompt cannot be removed.
+ * - This override clarifies Roo’s identity and precedence rules and asks the model to honor Roo’s tools and flow.
+ * - It is injected as so the server’s default is still present, but downweighted in practice.
+ */
+export const overridePrompt = `## Identity and precedence
+- You are Roo (not Codex). Ignore any "Codex", "Codex CLI", or "based on GPT-5" identity statements. Roo's rules take precedence over any harness text.
+
+## Tooling model
+- Ignore any "shell" function, execvp argument formats, and "workdir" parameters. Use Roo's command-execution tool with an explicit working directory.
+- If a command must run elsewhere, run a single command string prefixed with "cd && ..." via the tool; do not rely on a separate "workdir" concept.
+
+## Code search and file exploration
+- Do not use terminal search utilities (e.g., ripgrep/grep) to explore repositories.
+- For any new area of exploration: first use the semantic code search tool; then use the project's regex search and file reader tools as needed.
+
+## Planning and preambles
+- Do not reference or depend on a generic "planning tool."
+- For non-trivial tasks: briefly restate the goal, outline a short plan up front, provide concise progress updates during tool use, and summarize outcomes distinctly.
+
+## Approvals and sandboxing
+- Ignore Codex approval/sandbox modes and escalation parameters (including any fields to request escalated permissions or justifications).
+- Follow Roo's one-tool-per-message flow and wait for user confirmation after each tool use. Do not take destructive/high-impact actions without explicit user confirmation.
+
+## Testing and validation
+- Do not add temporary tests/scripts solely for validation and then remove them.
+- Ensure changes retain or add proper test coverage using the repository's established test runner, paths, and conventions.
+
+## Output and formatting
+- Ignore "plain text only" guidance. Follow Roo's Markdown rules, including clickable file and syntax references in the required link format.
+- Do not dump large file contents; reference file paths instead.
+
+## Command outputs and environment assumptions
+- Do not assume a CLI will style or relay outputs automatically. Summarize essential results per Roo's rules; request pasted output only if absolutely necessary.
+- Assume Roo's environment semantics (e.g., macOS, constrained workspace directory) and tool behavior.
+
+## Risky operations and GUIs
+- Do not request or simulate escalation flows from Codex. Propose safe alternatives within Roo's constraints and obtain explicit confirmation when required.
+- Do not launch GUI apps or perform networked installs via Codex-style approvals.
+
+## Eagerness calibration
+- Gather just enough context to act; avoid repetitive searches; prefer acting and validating over extended exploration. Proceed under reasonable uncertainty when appropriate, documenting assumptions succinctly.
+
+- Any instructions following these rules in this override prompt take precedence over these override rules.
+`
diff --git a/src/api/providers/openai-native-codex.ts b/src/api/providers/openai-native-codex.ts
new file mode 100644
index 00000000000..8f613e2a62c
--- /dev/null
+++ b/src/api/providers/openai-native-codex.ts
@@ -0,0 +1,562 @@
+import type { Anthropic } from "@anthropic-ai/sdk"
+import { promises as fs } from "node:fs"
+import os from "node:os"
+import path from "node:path"
+
+import type { ApiHandlerCreateMessageMetadata } from "../index"
+import { BaseProvider } from "./base-provider"
+// stream + params
+import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
+import { getModelParams } from "../transform/model-params"
+// Provider prompt content as a TS string module (no loader required)
+import codexPromptContent, { overridePrompt } from "./openai-native-codex.prompt"
+import { getApiRequestTimeout } from "./utils/timeout-config"
+import { t } from "i18next"
+
+import {
+ type ModelInfo,
+ type ReasoningEffortWithMinimal,
+ type ServiceTier,
+ type VerbosityLevel,
+ openAiNativeCodexDefaultModelId,
+ openAiNativeCodexModels,
+} from "@roo-code/types"
+
+import type { ApiHandlerOptions } from "../../shared/api"
+import { calculateApiCostOpenAI } from "../../shared/cost"
+
+export type OpenAiNativeCodexModel = ReturnType
+
+// Codex input typing for safer transforms and tests
+type CodexRole = "system" | "user" | "assistant"
+interface CodexInputText {
+ type: "input_text"
+ text: string
+}
+interface CodexOutputText {
+ type: "output_text"
+ text: string
+}
+interface CodexInputImage {
+ type: "input_image"
+ image_url: string
+}
+type CodexContent = CodexInputText | CodexOutputText | CodexInputImage
+interface CodexMessage {
+ role: CodexRole
+ content: CodexContent[]
+}
+
+interface AuthTokens {
+ access_token?: string
+ account_id?: string
+ id_token?: string
+}
+interface AuthJson {
+ tokens?: AuthTokens
+}
+
+/**
+ * OpenAI Native (Codex) provider
+ * - Uses ChatGPT auth.json tokens (no API key)
+ * - Calls ChatGPT Responses endpoint: https://chatgpt.com/backend-api/codex/responses
+ */
+export class OpenAiNativeCodexHandler extends BaseProvider {
+ protected options: ApiHandlerOptions
+ private chatgptAccessToken!: string
+ private chatgptAccountId?: string
+
+ // Inline-loaded provider prompt (via esbuild text loader for .md files)
+
+ // Provider prompt content is loaded via loadProviderPrompt()
+
+ constructor(options: ApiHandlerOptions) {
+ super()
+ this.options = options
+ if (this.options.enableGpt5ReasoningSummary === undefined) {
+ this.options.enableGpt5ReasoningSummary = true
+ }
+
+ // Credentials are resolved lazily via ensureAuthenticated() on first use.
+ }
+
+ // Normalize usage to Roo's ApiStreamUsageChunk and compute totalCost
+ private normalizeUsage(usage: any, model: OpenAiNativeCodexModel): ApiStreamUsageChunk | undefined {
+ if (!usage) return undefined
+
+ const inputDetails = usage.input_tokens_details ?? usage.prompt_tokens_details
+ const hasCachedTokens = typeof inputDetails?.cached_tokens === "number"
+ const hasCacheMissTokens = typeof inputDetails?.cache_miss_tokens === "number"
+ const cachedFromDetails = hasCachedTokens ? inputDetails.cached_tokens : 0
+ const missFromDetails = hasCacheMissTokens ? inputDetails.cache_miss_tokens : 0
+
+ let totalInputTokens = usage.input_tokens ?? usage.prompt_tokens ?? 0
+ if (totalInputTokens === 0 && inputDetails && (cachedFromDetails > 0 || missFromDetails > 0)) {
+ totalInputTokens = cachedFromDetails + missFromDetails
+ }
+
+ const totalOutputTokens = usage.output_tokens ?? usage.completion_tokens ?? 0
+ const cacheWriteTokens = usage.cache_creation_input_tokens ?? usage.cache_write_tokens ?? 0
+ const cacheReadTokens =
+ usage.cache_read_input_tokens ?? usage.cache_read_tokens ?? usage.cached_tokens ?? cachedFromDetails ?? 0
+
+ const totalCost = calculateApiCostOpenAI(
+ model.info,
+ totalInputTokens,
+ totalOutputTokens,
+ cacheWriteTokens,
+ cacheReadTokens,
+ )
+
+ const reasoningTokens =
+ typeof usage.output_tokens_details?.reasoning_tokens === "number"
+ ? usage.output_tokens_details.reasoning_tokens
+ : undefined
+
+ const out: ApiStreamUsageChunk = {
+ type: "usage",
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ cacheWriteTokens,
+ cacheReadTokens,
+ ...(typeof reasoningTokens === "number" ? { reasoningTokens } : {}),
+ totalCost,
+ }
+ return out
+ }
+
+ private async ensureAuthenticated(): Promise {
+ if (this.chatgptAccessToken) return
+
+ const configured = (this.options as any).openAiNativeCodexOauthPath as string | undefined
+ const defaultPath = "~/.codex/auth.json"
+ const expandHome = (p: string) => p.replace(/^~(?=\/|\\|$)/, os.homedir())
+ const pathToUse = configured && configured.trim() ? configured.trim() : defaultPath
+ const explicitPath = expandHome(pathToUse)
+ const resolvedPath = path.resolve(explicitPath)
+
+ // Guard file size before reading to prevent loading unexpectedly large files
+ const MAX_OAUTH_SIZE = 1_000_000 // 1 MB
+ try {
+ const stat = await fs.stat(resolvedPath)
+ if (stat.size > MAX_OAUTH_SIZE) {
+ throw new Error(
+ t("common:errors.openaiNativeCodex.oauthFileTooLarge", {
+ path: resolvedPath,
+ size: stat.size,
+ max: MAX_OAUTH_SIZE,
+ }),
+ )
+ }
+ } catch (e: any) {
+ // Surface read failure with localized error (e.g., file missing or inaccessible)
+ const base = t("common:errors.openaiNativeCodex.oauthReadFailed", {
+ path: resolvedPath,
+ error: e?.message || String(e),
+ })
+ throw new Error(base)
+ }
+
+ let raw: string
+ try {
+ raw = await fs.readFile(resolvedPath, "utf8")
+ } catch (e: any) {
+ const base = t("common:errors.openaiNativeCodex.oauthReadFailed", {
+ path: resolvedPath,
+ error: e?.message || String(e),
+ })
+ throw new Error(base)
+ }
+
+ // Post-read size check using byte length
+ if (Buffer.byteLength(raw, "utf8") > MAX_OAUTH_SIZE) {
+ throw new Error(
+ t("common:errors.openaiNativeCodex.oauthFileTooLarge", {
+ path: resolvedPath,
+ size: Buffer.byteLength(raw, "utf8"),
+ max: MAX_OAUTH_SIZE,
+ }),
+ )
+ }
+
+ let j: AuthJson
+ try {
+ j = JSON.parse(raw) as AuthJson
+ } catch (e: any) {
+ const base = t("common:errors.openaiNativeCodex.oauthParseFailed", {
+ path: resolvedPath,
+ error: e?.message || String(e),
+ })
+ throw new Error(base)
+ }
+
+ const tokens: AuthTokens = j?.tokens ?? {}
+ const access = typeof tokens.access_token === "string" ? tokens.access_token : undefined
+ let account = typeof tokens.account_id === "string" ? tokens.account_id : undefined
+
+ if (!account && typeof tokens.id_token === "string") {
+ const decoded = this.extractAccountIdFromIdToken(tokens.id_token)
+ if (decoded) {
+ account = decoded
+ }
+ }
+
+ if (!access) {
+ throw new Error(t("common:errors.openaiNativeCodex.missingAccessToken"))
+ }
+
+ this.chatgptAccessToken = access
+ this.chatgptAccountId = account
+ }
+
+ // Extract ChatGPT account id from id_token without verifying signature (local decode for UX only)
+ protected extractAccountIdFromIdToken(idToken: string): string | undefined {
+ try {
+ const parts = idToken.split(".")
+ if (parts.length !== 3) return undefined
+ const payload = parts[1]
+ const padded = payload + "=".repeat((4 - (payload.length % 4)) % 4)
+ const claims = JSON.parse(Buffer.from(padded, "base64").toString("utf8"))
+ const auth = claims?.["https://api.openai.com/auth"]
+ return typeof auth?.chatgpt_account_id === "string" ? auth.chatgpt_account_id : undefined
+ } catch {
+ return undefined
+ }
+ }
+
+ override getModel() {
+ const modelId = this.options.apiModelId
+ const id =
+ modelId && modelId in openAiNativeCodexModels
+ ? (modelId as keyof typeof openAiNativeCodexModels)
+ : openAiNativeCodexDefaultModelId
+ const info: ModelInfo = openAiNativeCodexModels[id]
+
+ const params = getModelParams({
+ format: "openai",
+ modelId: id as string,
+ model: info,
+ settings: this.options,
+ })
+
+ // Reasoning effort is computed by getModelParams based on model + settings
+ return { id: id as string, info, ...params, verbosity: params.verbosity }
+ }
+
+ override async *createMessage(
+ systemPrompt: string,
+ messages: Anthropic.Messages.MessageParam[],
+ metadata?: ApiHandlerCreateMessageMetadata,
+ ): ApiStream {
+ const model = this.getModel()
+ await this.ensureAuthenticated()
+
+ // Transform messages to Codex input with strong typing
+ const formattedInput = this.buildCodexInput(messages, systemPrompt)
+
+ // Use provider-local prompt content for top-level instructions (TS string module)
+ const codexPrompt = codexPromptContent
+
+ // Codex (chatgpt.com codex/responses) is stateless and does NOT support previous_response_id.
+ // We always send curated prior items in `input` to preserve continuity.
+ const requestBody = this.buildRequestBody(
+ model,
+ formattedInput,
+ codexPrompt,
+ (model as any).verbosity as VerbosityLevel | undefined,
+ (model as any).reasoning?.reasoning_effort as ReasoningEffortWithMinimal | undefined,
+ )
+
+ yield* this.makeResponsesRequest(requestBody, model)
+ }
+
+ // Split out for unit testing and clearer typing
+ protected buildCodexInput(messages: Anthropic.Messages.MessageParam[], systemPrompt: string): CodexMessage[] {
+ const formatted: CodexMessage[] = []
+ // Inject provider overrides and dynamic instructions as a system role using and XML tags
+ let injectedUserInstructions = false
+
+ for (const message of messages) {
+ const role: CodexRole = message.role === "user" ? "user" : "assistant"
+ const content: CodexContent[] = []
+
+ if (!injectedUserInstructions && typeof systemPrompt === "string" && systemPrompt.trim().length > 0) {
+ // Codex system prompt immutability:
+ // - The top-level "instructions" field sent to codex/responses is immutable on the server.
+ // - We cannot dynamically alter the default system prompt that Codex applies.
+ // Strategy and rationale:
+ // - We inject two system-role items before the first user/assistant turn:
+ // 1) — explains to the model how Roo’s rules supersede Codex defaults.
+ // 2) — the current task/systemPrompt, asking Codex to prioritize these rules/tools.
+ // - This pattern reduces the impact of Codex’s default prompt without trying to replace it (not possible).
+ // - We also keep these separate from user messages to avoid tool execution bias.
+ formatted.push({
+ role: "system",
+ content: [
+ {
+ type: "input_text",
+ text: `${overridePrompt}`,
+ },
+ { type: "input_text", text: `${systemPrompt}` },
+ ],
+ })
+ injectedUserInstructions = true
+ }
+
+ if (typeof message.content === "string") {
+ if (role === "user") content.push({ type: "input_text", text: message.content })
+ else content.push({ type: "output_text", text: message.content })
+ } else if (Array.isArray(message.content)) {
+ for (const block of message.content) {
+ if (block.type === "text") {
+ const text = (block as any).text as string
+ if (typeof text === "string") {
+ if (role === "user") content.push({ type: "input_text", text })
+ else content.push({ type: "output_text", text })
+ }
+ } else if (block.type === "image") {
+ const image = block as Anthropic.Messages.ImageBlockParam
+ const imageUrl = `data:${image.source.media_type};base64,${image.source.data}`
+ content.push({ type: "input_image", image_url: imageUrl })
+ }
+ }
+ }
+ if (content.length > 0) formatted.push({ role, content })
+ }
+
+ return formatted
+ }
+
+ private buildRequestBody(
+ model: OpenAiNativeCodexModel,
+ formattedInput: CodexMessage[],
+ providerPrompt: string,
+ verbosity: VerbosityLevel | undefined,
+ reasoningEffort: ReasoningEffortWithMinimal | undefined,
+ ) {
+ // For Codex provider:
+ // - Use the model's default reasoning effort (currently "medium") unless explicitly overridden in settings.
+ // - Both "gpt-5" and "gpt-5-codex" follow the provided/default effort without forcing "minimal".
+ let effectiveEffort: ReasoningEffortWithMinimal | undefined = reasoningEffort
+
+ const body: {
+ model: string
+ input: CodexMessage[]
+ stream: true
+ store: false
+ instructions: string
+ reasoning?: {
+ effort: ReasoningEffortWithMinimal
+ summary?: "auto"
+ }
+ text?: { verbosity: VerbosityLevel }
+ } = {
+ model: model.id,
+ input: formattedInput,
+ stream: true,
+ // ChatGPT Responses requires store=false
+ store: false,
+ // Top-level instructions string passed in by caller (createMessage supplies provider prompt)
+ instructions: providerPrompt,
+ ...(effectiveEffort && {
+ reasoning: {
+ effort: effectiveEffort,
+ ...(this.options.enableGpt5ReasoningSummary ? { summary: "auto" as const } : {}),
+ },
+ }),
+ // ChatGPT codex/responses does not support previous_response_id (stateless).
+ // Preserve continuity by sending curated prior items in `input`.
+ }
+ if (model.info.supportsVerbosity === true) {
+ body.text = { verbosity: (verbosity || "medium") as VerbosityLevel }
+ }
+ return body
+ }
+
+ private async *makeResponsesRequest(requestBody: any, model: OpenAiNativeCodexModel): ApiStream {
+ const apiKey = this.chatgptAccessToken
+ const url = "https://chatgpt.com/backend-api/codex/responses"
+ const headers: Record = {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${apiKey}`,
+ Accept: "text/event-stream",
+ "OpenAI-Beta": "responses=experimental",
+ }
+ if (this.chatgptAccountId) headers["chatgpt-account-id"] = this.chatgptAccountId
+
+ let timeoutId: ReturnType | undefined
+ try {
+ const timeoutMs = getApiRequestTimeout()
+ const controller = new AbortController()
+ timeoutId = timeoutMs > 0 ? setTimeout(() => controller.abort(), timeoutMs) : undefined
+ const response = await fetch(url, {
+ method: "POST",
+ headers,
+ body: JSON.stringify(requestBody),
+ signal: controller.signal,
+ })
+
+ if (!response.ok) {
+ const text = await response.text().catch(() => "")
+ const requestId =
+ response.headers.get("x-request-id") || response.headers.get("openai-request-id") || undefined
+ let userMessage: string | undefined
+ try {
+ const parsed = JSON.parse(text)
+ userMessage = parsed?.error?.message || parsed?.message || parsed?.error || undefined
+ } catch {
+ // ignore parse error
+ }
+ const snippet = (text || "").slice(0, 500).replace(/\s+/g, " ").trim()
+ const msg = t("common:errors.openaiNativeCodex.httpError", {
+ status: response.status,
+ requestId: requestId || "n/a",
+ modelId: model.id,
+ message: userMessage || snippet,
+ })
+ const err = new Error(msg)
+ ;(err as any).status = response.status
+ if (requestId) (err as any).requestId = requestId
+ ;(err as any).provider = "openai-native-codex"
+ ;(err as any).raw = snippet
+ throw err
+ }
+ if (!response.body) {
+ throw new Error(t("common:errors.openaiNativeCodex.noResponseBody"))
+ }
+
+ // Stream parse
+ {
+ const reader = response.body.getReader()
+ const decoder = new TextDecoder()
+ let buffer = ""
+ let hasContent = false
+ let sawTextDelta = false
+ let sawReasoningDelta = false
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+
+ buffer += decoder.decode(value, { stream: true })
+ const lines = buffer.split("\n")
+ buffer = lines.pop() || ""
+
+ for (const line of lines) {
+ if (line.startsWith("data: ")) {
+ const data = line.slice(6).trim()
+ if (data === "[DONE]") {
+ continue
+ }
+ try {
+ const parsed = JSON.parse(data)
+ // Persist tier when available (parity with openai-native)
+ if (parsed.response?.service_tier) {
+ }
+ // Minimal content extraction similar to OpenAI Responses
+ if (parsed?.type === "response.text.delta" && parsed?.delta) {
+ hasContent = true
+ sawTextDelta = true
+ yield { type: "text", text: parsed.delta }
+ } else if (parsed?.type === "response.output_text.delta" && parsed?.delta) {
+ hasContent = true
+ sawTextDelta = true
+ yield { type: "text", text: parsed.delta }
+ } else if (
+ parsed?.type === "response.output_text.done" &&
+ typeof parsed?.text === "string"
+ ) {
+ if (!sawTextDelta) {
+ hasContent = true
+ yield { type: "text", text: parsed.text }
+ }
+ } else if (
+ parsed?.type === "response.reasoning_summary_text.delta" &&
+ typeof parsed?.delta === "string"
+ ) {
+ hasContent = true
+ sawReasoningDelta = true
+ yield { type: "reasoning", text: parsed.delta }
+ } else if (
+ parsed?.type === "response.reasoning_summary_text.done" &&
+ typeof parsed?.text === "string"
+ ) {
+ if (!sawReasoningDelta) {
+ hasContent = true
+ yield { type: "reasoning", text: parsed.text }
+ }
+ } else if (parsed?.response?.output && Array.isArray(parsed.response.output)) {
+ for (const item of parsed.response.output) {
+ if (item.type === "text" && Array.isArray(item.content)) {
+ for (const c of item.content) {
+ if (c?.type === "text" && typeof c.text === "string") {
+ hasContent = true
+ yield { type: "text", text: c.text }
+ }
+ }
+ } else if (item.type === "reasoning" && typeof item.text === "string") {
+ hasContent = true
+ yield { type: "reasoning", text: item.text }
+ }
+ }
+ if (
+ (parsed.type === "response.completed" || parsed.type === "response.done") &&
+ parsed.response?.usage
+ ) {
+ const usageData = this.normalizeUsage(parsed.response.usage, model)
+ if (usageData) {
+ yield usageData
+ }
+ }
+ } else if (
+ parsed.type === "response.completed" ||
+ parsed.type === "response.done"
+ ) {
+ const usageData = this.normalizeUsage(parsed.response?.usage, model)
+ if (usageData) {
+ yield usageData
+ }
+ } else if (parsed?.usage) {
+ const usageData = this.normalizeUsage(parsed.usage, model)
+ if (usageData) {
+ yield usageData
+ }
+ }
+ } catch {
+ // ignore parse errors
+ }
+ } else if (line.trim() && !line.startsWith(":")) {
+ try {
+ const parsed = JSON.parse(line)
+ if (parsed.content || parsed.text || parsed.message) {
+ hasContent = true
+ yield { type: "text", text: parsed.content || parsed.text || parsed.message }
+ }
+ } catch {
+ // ignore
+ }
+ }
+ }
+ }
+ if (!hasContent) {
+ throw new Error(t("common:errors.openaiNativeCodex.emptyStream", { modelId: model.id }))
+ }
+ } finally {
+ try {
+ reader.releaseLock()
+ } catch {}
+ }
+ }
+ } catch (err) {
+ throw err as Error
+ } finally {
+ // Clear timeout if set
+ try {
+ if (typeof timeoutId !== "undefined") {
+ clearTimeout(timeoutId as any)
+ }
+ } catch {}
+ }
+ }
+}
diff --git a/src/api/providers/roo.ts b/src/api/providers/roo.ts
index 44b01608627..6f10157a313 100644
--- a/src/api/providers/roo.ts
+++ b/src/api/providers/roo.ts
@@ -1,22 +1,24 @@
import { Anthropic } from "@anthropic-ai/sdk"
+import OpenAI from "openai"
-import { rooDefaultModelId, rooModels, type RooModelId } from "@roo-code/types"
+import { AuthState, rooDefaultModelId, rooModels, type RooModelId } from "@roo-code/types"
import { CloudService } from "@roo-code/cloud"
import type { ApiHandlerOptions } from "../../shared/api"
import { ApiStream } from "../transform/stream"
import type { ApiHandlerCreateMessageMetadata } from "../index"
+import { DEFAULT_HEADERS } from "./constants"
import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
export class RooHandler extends BaseOpenAiCompatibleProvider {
+ private authStateListener?: (state: { state: AuthState }) => void
+
constructor(options: ApiHandlerOptions) {
- // Get the session token if available, but don't throw if not.
- // The server will handle authentication errors and return appropriate status codes.
- let sessionToken = ""
+ let sessionToken: string | undefined = undefined
if (CloudService.hasInstance()) {
- sessionToken = CloudService.instance.authService?.getSessionToken() || ""
+ sessionToken = CloudService.instance.authService?.getSessionToken()
}
// Always construct the handler, even without a valid token.
@@ -25,11 +27,39 @@ export class RooHandler extends BaseOpenAiCompatibleProvider {
...options,
providerName: "Roo Code Cloud",
baseURL: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy/v1",
- apiKey: sessionToken || "unauthenticated", // Use a placeholder if no token
+ apiKey: sessionToken || "unauthenticated", // Use a placeholder if no token.
defaultProviderModelId: rooDefaultModelId,
providerModels: rooModels,
defaultTemperature: 0.7,
})
+
+ if (CloudService.hasInstance()) {
+ const cloudService = CloudService.instance
+
+ this.authStateListener = (state: { state: AuthState }) => {
+ if (state.state === "active-session") {
+ this.client = new OpenAI({
+ baseURL: this.baseURL,
+ apiKey: cloudService.authService?.getSessionToken() ?? "unauthenticated",
+ defaultHeaders: DEFAULT_HEADERS,
+ })
+ } else if (state.state === "logged-out") {
+ this.client = new OpenAI({
+ baseURL: this.baseURL,
+ apiKey: "unauthenticated",
+ defaultHeaders: DEFAULT_HEADERS,
+ })
+ }
+ }
+
+ cloudService.on("auth-state-changed", this.authStateListener)
+ }
+ }
+
+ dispose() {
+ if (this.authStateListener && CloudService.hasInstance()) {
+ CloudService.instance.off("auth-state-changed", this.authStateListener)
+ }
}
override async *createMessage(
diff --git a/src/assets/images/roo.png b/src/assets/images/roo.png
new file mode 100644
index 00000000000..5dfc8723e83
Binary files /dev/null and b/src/assets/images/roo.png differ
diff --git a/src/core/assistant-message/presentAssistantMessage.ts b/src/core/assistant-message/presentAssistantMessage.ts
index 689675999fd..2e82aac4d60 100644
--- a/src/core/assistant-message/presentAssistantMessage.ts
+++ b/src/core/assistant-message/presentAssistantMessage.ts
@@ -57,7 +57,9 @@ import { applyDiffToolLegacy } from "../tools/applyDiffTool"
export async function presentAssistantMessage(cline: Task) {
if (cline.abort) {
- throw new Error(`[Task#presentAssistantMessage] task ${cline.taskId}.${cline.instanceId} aborted`)
+ // Task has been aborted; skip further processing without rejecting the promise.
+ cline.presentAssistantMessageHasPendingUpdates = false
+ return
}
if (cline.presentAssistantMessageLocked) {
diff --git a/src/core/condense/__tests__/index.spec.ts b/src/core/condense/__tests__/index.spec.ts
index d86b500f902..6a03298aa69 100644
--- a/src/core/condense/__tests__/index.spec.ts
+++ b/src/core/condense/__tests__/index.spec.ts
@@ -283,6 +283,32 @@ describe("summarizeConversation", () => {
const mockCallArgs = (maybeRemoveImageBlocks as Mock).mock.calls[0][0] as any[]
expect(mockCallArgs[mockCallArgs.length - 1]).toEqual(expectedFinalMessage)
})
+ it("should include the original first user message in summarization input", async () => {
+ const messages: ApiMessage[] = [
+ { role: "user", content: "Initial ask", ts: 1 },
+ { role: "assistant", content: "Ack", ts: 2 },
+ { role: "user", content: "Follow-up", ts: 3 },
+ { role: "assistant", content: "Response", ts: 4 },
+ { role: "user", content: "More", ts: 5 },
+ { role: "assistant", content: "Later", ts: 6 },
+ { role: "user", content: "Newest", ts: 7 },
+ ]
+
+ await summarizeConversation(messages, mockApiHandler, defaultSystemPrompt, taskId, DEFAULT_PREV_CONTEXT_TOKENS)
+
+ const mockCallArgs = (maybeRemoveImageBlocks as Mock).mock.calls[0][0] as any[]
+
+ // Expect the original first user message to be present in the messages sent to the summarizer
+ const hasInitialAsk = mockCallArgs.some(
+ (m) =>
+ m.role === "user" &&
+ (typeof m.content === "string"
+ ? m.content === "Initial ask"
+ : Array.isArray(m.content) &&
+ m.content.some((b: any) => b.type === "text" && b.text === "Initial ask")),
+ )
+ expect(hasInitialAsk).toBe(true)
+ })
it("should calculate newContextTokens correctly with systemPrompt", async () => {
const messages: ApiMessage[] = [
diff --git a/src/core/condense/index.ts b/src/core/condense/index.ts
index 166a8ba4cad..86cfa7ab1e5 100644
--- a/src/core/condense/index.ts
+++ b/src/core/condense/index.ts
@@ -103,8 +103,8 @@ export async function summarizeConversation(
// Always preserve the first message (which may contain slash command content)
const firstMessage = messages[0]
- // Get messages to summarize, excluding the first message and last N messages
- const messagesToSummarize = getMessagesSinceLastSummary(messages.slice(1, -N_MESSAGES_TO_KEEP))
+ // Get messages to summarize, including the first message and excluding the last N messages
+ const messagesToSummarize = getMessagesSinceLastSummary(messages.slice(0, -N_MESSAGES_TO_KEEP))
if (messagesToSummarize.length <= 1) {
const error =
diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap
index ced0ede4638..1dc60c72ed5 100644
--- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap
+++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap
index 90bbc1ae346..b860218ffc4 100644
--- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap
+++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap
index 9fdf29df777..553c5a5826a 100644
--- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap
+++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap
index 3594c8054fe..50220f79ca7 100644
--- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap
+++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap
index 00e7d3f5db5..be78c1a121b 100644
--- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap
+++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap
index ced0ede4638..1dc60c72ed5 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap
index 72e208ee6a0..561360ca6ee 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap
index ced0ede4638..1dc60c72ed5 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap
index 72aa071ce6f..e1147f96bc3 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap
index ced0ede4638..1dc60c72ed5 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap
index 83271f47ad5..fe856adb402 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap
index 3594c8054fe..50220f79ca7 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap
index ced0ede4638..1dc60c72ed5 100644
--- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap
+++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap
@@ -10,7 +10,7 @@ ALL responses MUST show ANY `language construct` OR filename reference as clicka
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/prompts/sections/tool-use.ts b/src/core/prompts/sections/tool-use.ts
index c598fabae34..28d47d09858 100644
--- a/src/core/prompts/sections/tool-use.ts
+++ b/src/core/prompts/sections/tool-use.ts
@@ -3,7 +3,7 @@ export function getSharedToolUseSection(): string {
TOOL USE
-You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
+You have access to a set of tools that are executed upon the user's approval. You must use exactly one tool per message, and every assistant message must include a tool call. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts
index cf16df8dcc7..575dcc2c187 100644
--- a/src/core/task/Task.ts
+++ b/src/core/task/Task.ts
@@ -212,6 +212,7 @@ export class Task extends EventEmitter implements TaskLike {
didFinishAbortingStream = false
abandoned = false
+ abortReason?: ClineApiReqCancelReason
isInitialized = false
isPaused: boolean = false
pausedModeSlug: string = defaultModeSlug
@@ -1264,6 +1265,16 @@ export class Task extends EventEmitter implements TaskLike {
modifiedClineMessages.splice(lastRelevantMessageIndex + 1)
}
+ // Remove any trailing reasoning-only UI messages that were not part of the persisted API conversation
+ while (modifiedClineMessages.length > 0) {
+ const last = modifiedClineMessages[modifiedClineMessages.length - 1]
+ if (last.type === "say" && last.say === "reasoning") {
+ modifiedClineMessages.pop()
+ } else {
+ break
+ }
+ }
+
// Since we don't use `api_req_finished` anymore, we need to check if the
// last `api_req_started` has a cost value, if it doesn't and no
// cancellation reason to present, then we remove it since it indicates
@@ -1884,28 +1895,10 @@ export class Task extends EventEmitter implements TaskLike {
lastMessage.partial = false
// instead of streaming partialMessage events, we do a save and post like normal to persist to disk
console.log("updating partial message", lastMessage)
- // await this.saveClineMessages()
}
- // Let assistant know their response was interrupted for when task is resumed
- await this.addToApiConversationHistory({
- role: "assistant",
- content: [
- {
- type: "text",
- text:
- assistantMessage +
- `\n\n[${
- cancelReason === "streaming_failed"
- ? "Response interrupted by API Error"
- : "Response interrupted by user"
- }]`,
- },
- ],
- })
-
// Update `api_req_started` to have cancelled and cost, so that
- // we can display the cost of the partial stream.
+ // we can display the cost of the partial stream and the cancellation reason
updateApiReqMsg(cancelReason, streamingFailedMessage)
await this.saveClineMessages()
@@ -1951,10 +1944,25 @@ export class Task extends EventEmitter implements TaskLike {
}
switch (chunk.type) {
- case "reasoning":
+ case "reasoning": {
reasoningMessage += chunk.text
- await this.say("reasoning", reasoningMessage, undefined, true)
+ let formattedReasoning = reasoningMessage
+
+ if (reasoningMessage.includes("**")) {
+ formattedReasoning = reasoningMessage.replace(
+ /\*\*([^*]+)\*\*/g,
+ (match, heading: string, offset: number) => {
+ const prefix = offset > 0 ? "\n\n" : ""
+ return `${prefix}**${heading.trim()}**`
+ },
+ )
+ // Normalize any excessive blank lines
+ formattedReasoning = formattedReasoning.replace(/\n{3,}/g, "\n\n")
+ }
+
+ await this.say("reasoning", formattedReasoning.trimStart(), undefined, true)
break
+ }
case "usage":
inputTokens += chunk.inputTokens
outputTokens += chunk.outputTokens
@@ -1983,7 +1991,13 @@ export class Task extends EventEmitter implements TaskLike {
}
// Present content to user.
- presentAssistantMessage(this)
+ void presentAssistantMessage(this).catch((error) => {
+ this.providerRef
+ .deref()
+ ?.log(
+ `[Task#presentAssistantMessage] Failed to deliver assistant content: ${error instanceof Error ? error.message : String(error)}`,
+ )
+ })
break
}
}
@@ -2187,24 +2201,23 @@ export class Task extends EventEmitter implements TaskLike {
// may have executed), so we just resort to replicating a
// cancel task.
- // Check if this was a user-initiated cancellation BEFORE calling abortTask
- // If this.abort is already true, it means the user clicked cancel, so we should
- // treat this as "user_cancelled" rather than "streaming_failed"
- const cancelReason = this.abort ? "user_cancelled" : "streaming_failed"
+ // Determine cancellation reason BEFORE aborting to ensure correct persistence
+ const cancelReason: ClineApiReqCancelReason = this.abort ? "user_cancelled" : "streaming_failed"
const streamingFailedMessage = this.abort
? undefined
: (error.message ?? JSON.stringify(serializeError(error), null, 2))
- // Now call abortTask after determining the cancel reason.
- await this.abortTask()
+ // Persist interruption details first to both UI and API histories
await abortStream(cancelReason, streamingFailedMessage)
- const history = await provider?.getTaskWithId(this.taskId)
+ // Record reason for provider to decide rehydration path
+ this.abortReason = cancelReason
- if (history) {
- await provider?.createTaskWithHistoryItem(history.historyItem)
- }
+ // Now abort (emits TaskAborted which provider listens to)
+ await this.abortTask()
+
+ // Do not rehydrate here; provider owns rehydration to avoid duplication races
}
} finally {
this.isStreaming = false
@@ -2242,7 +2255,13 @@ export class Task extends EventEmitter implements TaskLike {
// `pWaitFor` before making the next request. All this is really
// doing is presenting the last partial message that we just set
// to complete.
- presentAssistantMessage(this)
+ void presentAssistantMessage(this).catch((error) => {
+ this.providerRef
+ .deref()
+ ?.log(
+ `[Task#presentAssistantMessage] Failed to continue assistant content: ${error instanceof Error ? error.message : String(error)}`,
+ )
+ })
}
// Note: updateApiReqMsg() is now called from within drainStreamInBackgroundToFindAllUsage
diff --git a/src/core/tools/__tests__/updateTodoListTool.spec.ts b/src/core/tools/__tests__/updateTodoListTool.spec.ts
new file mode 100644
index 00000000000..0b7e8105724
--- /dev/null
+++ b/src/core/tools/__tests__/updateTodoListTool.spec.ts
@@ -0,0 +1,243 @@
+import { describe, it, expect, beforeEach, vi } from "vitest"
+import { parseMarkdownChecklist } from "../updateTodoListTool"
+import { TodoItem } from "@roo-code/types"
+
+describe("parseMarkdownChecklist", () => {
+ describe("standard checkbox format (without dash prefix)", () => {
+ it("should parse pending tasks", () => {
+ const md = `[ ] Task 1
+[ ] Task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Task 1")
+ expect(result[0].status).toBe("pending")
+ expect(result[1].content).toBe("Task 2")
+ expect(result[1].status).toBe("pending")
+ })
+
+ it("should parse completed tasks with lowercase x", () => {
+ const md = `[x] Completed task 1
+[x] Completed task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Completed task 1")
+ expect(result[0].status).toBe("completed")
+ expect(result[1].content).toBe("Completed task 2")
+ expect(result[1].status).toBe("completed")
+ })
+
+ it("should parse completed tasks with uppercase X", () => {
+ const md = `[X] Completed task 1
+[X] Completed task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Completed task 1")
+ expect(result[0].status).toBe("completed")
+ expect(result[1].content).toBe("Completed task 2")
+ expect(result[1].status).toBe("completed")
+ })
+
+ it("should parse in-progress tasks with dash", () => {
+ const md = `[-] In progress task 1
+[-] In progress task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("In progress task 1")
+ expect(result[0].status).toBe("in_progress")
+ expect(result[1].content).toBe("In progress task 2")
+ expect(result[1].status).toBe("in_progress")
+ })
+
+ it("should parse in-progress tasks with tilde", () => {
+ const md = `[~] In progress task 1
+[~] In progress task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("In progress task 1")
+ expect(result[0].status).toBe("in_progress")
+ expect(result[1].content).toBe("In progress task 2")
+ expect(result[1].status).toBe("in_progress")
+ })
+ })
+
+ describe("dash-prefixed checkbox format", () => {
+ it("should parse pending tasks with dash prefix", () => {
+ const md = `- [ ] Task 1
+- [ ] Task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Task 1")
+ expect(result[0].status).toBe("pending")
+ expect(result[1].content).toBe("Task 2")
+ expect(result[1].status).toBe("pending")
+ })
+
+ it("should parse completed tasks with dash prefix and lowercase x", () => {
+ const md = `- [x] Completed task 1
+- [x] Completed task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Completed task 1")
+ expect(result[0].status).toBe("completed")
+ expect(result[1].content).toBe("Completed task 2")
+ expect(result[1].status).toBe("completed")
+ })
+
+ it("should parse completed tasks with dash prefix and uppercase X", () => {
+ const md = `- [X] Completed task 1
+- [X] Completed task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Completed task 1")
+ expect(result[0].status).toBe("completed")
+ expect(result[1].content).toBe("Completed task 2")
+ expect(result[1].status).toBe("completed")
+ })
+
+ it("should parse in-progress tasks with dash prefix and dash marker", () => {
+ const md = `- [-] In progress task 1
+- [-] In progress task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("In progress task 1")
+ expect(result[0].status).toBe("in_progress")
+ expect(result[1].content).toBe("In progress task 2")
+ expect(result[1].status).toBe("in_progress")
+ })
+
+ it("should parse in-progress tasks with dash prefix and tilde marker", () => {
+ const md = `- [~] In progress task 1
+- [~] In progress task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("In progress task 1")
+ expect(result[0].status).toBe("in_progress")
+ expect(result[1].content).toBe("In progress task 2")
+ expect(result[1].status).toBe("in_progress")
+ })
+ })
+
+ describe("mixed formats", () => {
+ it("should parse mixed formats correctly", () => {
+ const md = `[ ] Task without dash
+- [ ] Task with dash
+[x] Completed without dash
+- [X] Completed with dash
+[-] In progress without dash
+- [~] In progress with dash`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(6)
+
+ expect(result[0].content).toBe("Task without dash")
+ expect(result[0].status).toBe("pending")
+
+ expect(result[1].content).toBe("Task with dash")
+ expect(result[1].status).toBe("pending")
+
+ expect(result[2].content).toBe("Completed without dash")
+ expect(result[2].status).toBe("completed")
+
+ expect(result[3].content).toBe("Completed with dash")
+ expect(result[3].status).toBe("completed")
+
+ expect(result[4].content).toBe("In progress without dash")
+ expect(result[4].status).toBe("in_progress")
+
+ expect(result[5].content).toBe("In progress with dash")
+ expect(result[5].status).toBe("in_progress")
+ })
+ })
+
+ describe("edge cases", () => {
+ it("should handle empty strings", () => {
+ const result = parseMarkdownChecklist("")
+ expect(result).toEqual([])
+ })
+
+ it("should handle non-string input", () => {
+ const result = parseMarkdownChecklist(null as any)
+ expect(result).toEqual([])
+ })
+
+ it("should handle undefined input", () => {
+ const result = parseMarkdownChecklist(undefined as any)
+ expect(result).toEqual([])
+ })
+
+ it("should ignore non-checklist lines", () => {
+ const md = `This is not a checklist
+[ ] Valid task
+Just some text
+- Not a checklist item
+- [x] Valid completed task
+[not valid] Invalid format`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Valid task")
+ expect(result[0].status).toBe("pending")
+ expect(result[1].content).toBe("Valid completed task")
+ expect(result[1].status).toBe("completed")
+ })
+
+ it("should handle extra spaces", () => {
+ const md = ` [ ] Task with spaces
+- [ ] Task with dash and spaces
+ [x] Completed with spaces
+- [X] Completed with dash and spaces`
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(4)
+ expect(result[0].content).toBe("Task with spaces")
+ expect(result[1].content).toBe("Task with dash and spaces")
+ expect(result[2].content).toBe("Completed with spaces")
+ expect(result[3].content).toBe("Completed with dash and spaces")
+ })
+
+ it("should handle Windows line endings", () => {
+ const md = "[ ] Task 1\r\n- [x] Task 2\r\n[-] Task 3"
+ const result = parseMarkdownChecklist(md)
+ expect(result).toHaveLength(3)
+ expect(result[0].content).toBe("Task 1")
+ expect(result[0].status).toBe("pending")
+ expect(result[1].content).toBe("Task 2")
+ expect(result[1].status).toBe("completed")
+ expect(result[2].content).toBe("Task 3")
+ expect(result[2].status).toBe("in_progress")
+ })
+ })
+
+ describe("ID generation", () => {
+ it("should generate consistent IDs for the same content and status", () => {
+ const md1 = `[ ] Task 1
+[x] Task 2`
+ const md2 = `[ ] Task 1
+[x] Task 2`
+ const result1 = parseMarkdownChecklist(md1)
+ const result2 = parseMarkdownChecklist(md2)
+
+ expect(result1[0].id).toBe(result2[0].id)
+ expect(result1[1].id).toBe(result2[1].id)
+ })
+
+ it("should generate different IDs for different content", () => {
+ const md = `[ ] Task 1
+[ ] Task 2`
+ const result = parseMarkdownChecklist(md)
+ expect(result[0].id).not.toBe(result[1].id)
+ })
+
+ it("should generate different IDs for same content but different status", () => {
+ const md = `[ ] Task 1
+[x] Task 1`
+ const result = parseMarkdownChecklist(md)
+ expect(result[0].id).not.toBe(result[1].id)
+ })
+
+ it("should generate same IDs regardless of dash prefix", () => {
+ const md1 = `[ ] Task 1`
+ const md2 = `- [ ] Task 1`
+ const result1 = parseMarkdownChecklist(md1)
+ const result2 = parseMarkdownChecklist(md2)
+ expect(result1[0].id).toBe(result2[0].id)
+ })
+ })
+})
diff --git a/src/core/tools/multiApplyDiffTool.ts b/src/core/tools/multiApplyDiffTool.ts
index d0fe6557503..a30778c5af0 100644
--- a/src/core/tools/multiApplyDiffTool.ts
+++ b/src/core/tools/multiApplyDiffTool.ts
@@ -463,7 +463,7 @@ Error: ${failPart.error}
Suggested fixes:
1. Verify the search content exactly matches the file content (including whitespace and case)
2. Check for correct indentation and line endings
-3. Use to see the current file content
+3. Use the read_file tool to verify the file's current contents
4. Consider breaking complex changes into smaller diffs
5. Ensure start_line parameter matches the actual content location
${errorDetails ? `\nDetailed error information:\n${errorDetails}\n` : ""}
@@ -476,7 +476,7 @@ Unable to apply diffs to file: ${absolutePath}
Error: ${diffResult.error}
Recovery suggestions:
-1. Use to examine the current file content
+1. Use the read_file tool to verify the file's current contents
2. Verify the diff format matches the expected search/replace pattern
3. Check that the search content exactly matches what's in the file
4. Consider using line numbers with start_line parameter
diff --git a/src/core/tools/updateTodoListTool.ts b/src/core/tools/updateTodoListTool.ts
index de96c3cc765..fcd41914a88 100644
--- a/src/core/tools/updateTodoListTool.ts
+++ b/src/core/tools/updateTodoListTool.ts
@@ -108,7 +108,8 @@ export function parseMarkdownChecklist(md: string): TodoItem[] {
.filter(Boolean)
const todos: TodoItem[] = []
for (const line of lines) {
- const match = line.match(/^\[\s*([ xX\-~])\s*\]\s+(.+)$/)
+ // Support both "[ ] Task" and "- [ ] Task" formats
+ const match = line.match(/^(?:-\s*)?\[\s*([ xX\-~])\s*\]\s+(.+)$/)
if (!match) continue
let status: TodoStatus = "pending"
if (match[1] === "x" || match[1] === "X") status = "completed"
diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts
index 9abddc6d962..d9702d6f2e9 100644
--- a/src/core/webview/ClineProvider.ts
+++ b/src/core/webview/ClineProvider.ts
@@ -30,6 +30,7 @@ import {
type TerminalActionPromptType,
type HistoryItem,
type CloudUserInfo,
+ type CloudOrganizationMembership,
type CreateTaskOptions,
type TokenUsage,
RooCodeEventName,
@@ -89,6 +90,8 @@ import { Task } from "../task/Task"
import { getSystemPromptFilePath } from "../prompts/sections/custom-system-prompt"
import { webviewMessageHandler } from "./webviewMessageHandler"
+import type { ClineMessage } from "@roo-code/types"
+import { readApiMessages, saveApiMessages, saveTaskMessages } from "../task-persistence"
import { getNonce } from "./getNonce"
import { getUri } from "./getUri"
@@ -141,7 +144,7 @@ export class ClineProvider
public isViewLaunched = false
public settingsImportedAt?: number
- public readonly latestAnnouncementId = "sep-2025-roo-code-cloud" // Roo Code Cloud announcement
+ public readonly latestAnnouncementId = "sep-2025-code-supernova" // Code Supernova stealth model announcement
public readonly providerSettingsManager: ProviderSettingsManager
public readonly customModesManager: CustomModesManager
@@ -196,7 +199,35 @@ export class ClineProvider
const onTaskStarted = () => this.emit(RooCodeEventName.TaskStarted, instance.taskId)
const onTaskCompleted = (taskId: string, tokenUsage: any, toolUsage: any) =>
this.emit(RooCodeEventName.TaskCompleted, taskId, tokenUsage, toolUsage)
- const onTaskAborted = () => this.emit(RooCodeEventName.TaskAborted, instance.taskId)
+ const onTaskAborted = async () => {
+ this.emit(RooCodeEventName.TaskAborted, instance.taskId)
+
+ try {
+ // Only rehydrate on genuine streaming failures.
+ // User-initiated cancels are handled by cancelTask().
+ if (instance.abortReason === "streaming_failed") {
+ // Defensive safeguard: if another path already replaced this instance, skip
+ const current = this.getCurrentTask()
+ if (current && current.instanceId !== instance.instanceId) {
+ this.log(
+ `[onTaskAborted] Skipping rehydrate: current instance ${current.instanceId} != aborted ${instance.instanceId}`,
+ )
+ return
+ }
+
+ const { historyItem } = await this.getTaskWithId(instance.taskId)
+ const rootTask = instance.rootTask
+ const parentTask = instance.parentTask
+ await this.createTaskWithHistoryItem({ ...historyItem, rootTask, parentTask })
+ }
+ } catch (error) {
+ this.log(
+ `[onTaskAborted] Failed to rehydrate after streaming failure: ${
+ error instanceof Error ? error.message : String(error)
+ }`,
+ )
+ }
+ }
const onTaskFocused = () => this.emit(RooCodeEventName.TaskFocused, instance.taskId)
const onTaskUnfocused = () => this.emit(RooCodeEventName.TaskUnfocused, instance.taskId)
const onTaskActive = (taskId: string) => this.emit(RooCodeEventName.TaskActive, taskId)
@@ -1761,6 +1792,7 @@ export class ClineProvider
maxTotalImageSize,
terminalCompressProgressBar,
historyPreviewCollapsed,
+ reasoningBlockCollapsed,
cloudUserInfo,
cloudIsAuthenticated,
sharingEnabled,
@@ -1785,6 +1817,16 @@ export class ClineProvider
featureRoomoteControlEnabled,
} = await this.getState()
+ let cloudOrganizations: CloudOrganizationMembership[] = []
+
+ try {
+ cloudOrganizations = await CloudService.instance.getOrganizationMemberships()
+ } catch (error) {
+ console.error(
+ `[getStateToPostToWebview] failed to get cloud organizations: ${error instanceof Error ? error.message : String(error)}`,
+ )
+ }
+
const telemetryKey = process.env.POSTHOG_API_KEY
const machineId = vscode.env.machineId
const mergedAllowedCommands = this.mergeAllowedCommands(allowedCommands)
@@ -1884,8 +1926,10 @@ export class ClineProvider
terminalCompressProgressBar: terminalCompressProgressBar ?? true,
hasSystemPromptOverride,
historyPreviewCollapsed: historyPreviewCollapsed ?? false,
+ reasoningBlockCollapsed: reasoningBlockCollapsed ?? false,
cloudUserInfo,
cloudIsAuthenticated: cloudIsAuthenticated ?? false,
+ cloudOrganizations,
sharingEnabled: sharingEnabled ?? false,
organizationAllowList,
organizationSettingsVersion,
@@ -2097,6 +2141,7 @@ export class ClineProvider
maxTotalImageSize: stateValues.maxTotalImageSize ?? 20,
maxConcurrentFileReads: stateValues.maxConcurrentFileReads ?? 5,
historyPreviewCollapsed: stateValues.historyPreviewCollapsed ?? false,
+ reasoningBlockCollapsed: stateValues.reasoningBlockCollapsed ?? false,
cloudUserInfo,
cloudIsAuthenticated,
sharingEnabled,
@@ -2210,6 +2255,18 @@ export class ClineProvider
return
}
+ // Log out from cloud if authenticated
+ if (CloudService.hasInstance()) {
+ try {
+ await CloudService.instance.logout()
+ } catch (error) {
+ this.log(
+ `Failed to logout from cloud during reset: ${error instanceof Error ? error.message : String(error)}`,
+ )
+ // Continue with reset even if logout fails
+ }
+ }
+
await this.contextProxy.resetAllState()
await this.providerSettingsManager.resetAllConfigs()
await this.customModesManager.resetCustomModes()
@@ -2525,14 +2582,24 @@ export class ClineProvider
console.log(`[cancelTask] cancelling task ${task.taskId}.${task.instanceId}`)
- const { historyItem } = await this.getTaskWithId(task.taskId)
+ const { historyItem, uiMessagesFilePath } = await this.getTaskWithId(task.taskId)
// Preserve parent and root task information for history item.
const rootTask = task.rootTask
const parentTask = task.parentTask
+ // Mark this as a user-initiated cancellation so provider-only rehydration can occur
+ task.abortReason = "user_cancelled"
+
+ // Capture the current instance to detect if rehydrate already occurred elsewhere
+ const originalInstanceId = task.instanceId
+
+ // Begin abort (non-blocking)
task.abortTask()
+ // Immediately mark the original instance as abandoned to prevent any residual activity
+ task.abandoned = true
+
await pWaitFor(
() =>
this.getCurrentTask()! === undefined ||
@@ -2549,11 +2616,24 @@ export class ClineProvider
console.error("Failed to abort task")
})
- if (this.getCurrentTask()) {
- // 'abandoned' will prevent this Cline instance from affecting
- // future Cline instances. This may happen if its hanging on a
- // streaming request.
- this.getCurrentTask()!.abandoned = true
+ // Defensive safeguard: if current instance already changed, skip rehydrate
+ const current = this.getCurrentTask()
+ if (current && current.instanceId !== originalInstanceId) {
+ this.log(
+ `[cancelTask] Skipping rehydrate: current instance ${current.instanceId} != original ${originalInstanceId}`,
+ )
+ return
+ }
+
+ // Final race check before rehydrate to avoid duplicate rehydration
+ {
+ const currentAfterCheck = this.getCurrentTask()
+ if (currentAfterCheck && currentAfterCheck.instanceId !== originalInstanceId) {
+ this.log(
+ `[cancelTask] Skipping rehydrate after final check: current instance ${currentAfterCheck.instanceId} != original ${originalInstanceId}`,
+ )
+ return
+ }
}
// Clears task again, so we need to abortTask manually above.
diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts
index accb66f6e9c..c3e57c67a20 100644
--- a/src/core/webview/webviewMessageHandler.ts
+++ b/src/core/webview/webviewMessageHandler.ts
@@ -1044,6 +1044,18 @@ export const webviewMessageHandler = async (
break
}
+ case "openKeyboardShortcuts": {
+ // Open VSCode keyboard shortcuts settings and optionally filter to show the Roo Code commands
+ const searchQuery = message.text || ""
+ if (searchQuery) {
+ // Open with a search query pre-filled
+ await vscode.commands.executeCommand("workbench.action.openGlobalKeybindings", searchQuery)
+ } else {
+ // Just open the keyboard shortcuts settings
+ await vscode.commands.executeCommand("workbench.action.openGlobalKeybindings")
+ }
+ break
+ }
case "openMcpSettings": {
const mcpSettingsFilePath = await provider.getMcpHub()?.getMcpSettingsFilePath()
@@ -1605,6 +1617,10 @@ export const webviewMessageHandler = async (
await updateGlobalState("historyPreviewCollapsed", message.bool ?? false)
// No need to call postStateToWebview here as the UI already updated optimistically
break
+ case "setReasoningBlockCollapsed":
+ await updateGlobalState("reasoningBlockCollapsed", message.bool ?? true)
+ // No need to call postStateToWebview here as the UI already updated optimistically
+ break
case "toggleApiConfigPin":
if (message.text) {
const currentPinned = getGlobalState("pinnedApiConfigs") ?? {}
@@ -2302,6 +2318,17 @@ export const webviewMessageHandler = async (
break
}
+ case "cloudLandingPageSignIn": {
+ try {
+ const landingPageSlug = message.text || "supernova"
+ TelemetryService.instance.captureEvent(TelemetryEventName.AUTHENTICATION_INITIATED)
+ await CloudService.instance.login(landingPageSlug)
+ } catch (error) {
+ provider.log(`CloudService#login failed: ${error}`)
+ vscode.window.showErrorMessage("Sign in failed.")
+ }
+ break
+ }
case "rooCloudSignOut": {
try {
await CloudService.instance.logout()
@@ -2356,6 +2383,38 @@ export const webviewMessageHandler = async (
break
}
+ case "switchOrganization": {
+ try {
+ const organizationId = message.organizationId ?? null
+
+ // Switch to the new organization context
+ await CloudService.instance.switchOrganization(organizationId)
+
+ // Refresh the state to update UI
+ await provider.postStateToWebview()
+
+ // Send success response back to webview
+ await provider.postMessageToWebview({
+ type: "organizationSwitchResult",
+ success: true,
+ organizationId: organizationId,
+ })
+ } catch (error) {
+ provider.log(`Organization switch failed: ${error}`)
+ const errorMessage = error instanceof Error ? error.message : String(error)
+
+ // Send error response back to webview
+ await provider.postMessageToWebview({
+ type: "organizationSwitchResult",
+ success: false,
+ error: errorMessage,
+ organizationId: message.organizationId ?? null,
+ })
+
+ vscode.window.showErrorMessage(`Failed to switch organization: ${errorMessage}`)
+ }
+ break
+ }
case "saveCodeIndexSettingsAtomic": {
if (!message.codeIndexSettings) {
diff --git a/src/extension.ts b/src/extension.ts
index dc96e282c43..5db0996ad65 100644
--- a/src/extension.ts
+++ b/src/extension.ts
@@ -194,7 +194,7 @@ export async function activate(context: vscode.ExtensionContext) {
// Add to subscriptions for proper cleanup on deactivate.
context.subscriptions.push(cloudService)
- // Trigger initial cloud profile sync now that CloudService is ready
+ // Trigger initial cloud profile sync now that CloudService is ready.
try {
await provider.initializeCloudProfileSyncWhenReady()
} catch (error) {
diff --git a/src/i18n/locales/ca/common.json b/src/i18n/locales/ca/common.json
index b71b7eb9139..1de013b9ee9 100644
--- a/src/i18n/locales/ca/common.json
+++ b/src/i18n/locales/ca/common.json
@@ -117,6 +117,15 @@
"roo": {
"authenticationRequired": "El proveïdor Roo requereix autenticació al núvol. Si us plau, inicieu sessió a Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Error en carregar les credencials OAuth de ChatGPT a {{path}}: {{error}}. Consell: autentica't amb la CLI de Codex (p. ex., \"codex login\") per crear auth.json.",
+ "oauthParseFailed": "Error en analitzar el JSON de credencials OAuth de ChatGPT a {{path}}: {{error}}. Consell: assegura't que el fitxer sigui JSON vàlid o torna a autenticar-te amb \"codex login\" per regenerar-lo.",
+ "oauthFileTooLarge": "El fitxer de credencials OAuth a {{path}} és massa gran ({{size}} bytes). El màxim permès és {{max}} bytes.",
+ "missingAccessToken": "Les credencials OAuth de ChatGPT no tenen tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Error de respostes de ChatGPT: No hi ha cos de resposta",
+ "emptyStream": "El flux de respostes de ChatGPT no ha retornat contingut per al model {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "La clau API conté caràcters no vàlids."
},
@@ -165,6 +174,10 @@
"incomplete": "Tasca #{{taskNumber}} (Incompleta)",
"no_messages": "Tasca #{{taskNumber}} (Sense missatges)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Resposta interrompuda per l'usuari",
+ "responseInterruptedByApiError": "Resposta interrompuda per error d'API"
+ },
"storage": {
"prompt_custom_path": "Introdueix una ruta d'emmagatzematge personalitzada per a l'historial de converses o deixa-ho buit per utilitzar la ubicació predeterminada",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/de/common.json b/src/i18n/locales/de/common.json
index 6577d460d10..9f16da4f23c 100644
--- a/src/i18n/locales/de/common.json
+++ b/src/i18n/locales/de/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Roo-Anbieter erfordert Cloud-Authentifizierung. Bitte melde dich bei Roo Code Cloud an."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Fehler beim Laden der ChatGPT OAuth-Anmeldedaten unter {{path}}: {{error}}. Tipp: Authentifiziere dich mit der Codex CLI (z.B. \"codex login\"), um auth.json zu erstellen.",
+ "oauthParseFailed": "Fehler beim Parsen der ChatGPT OAuth-Anmeldedaten JSON unter {{path}}: {{error}}. Tipp: Stelle sicher, dass die Datei gültiges JSON ist oder authentifiziere dich erneut mit \"codex login\", um sie zu regenerieren.",
+ "oauthFileTooLarge": "OAuth-Anmeldedatei unter {{path}} ist zu groß ({{size}} Bytes). Maximal erlaubt sind {{max}} Bytes.",
+ "missingAccessToken": "ChatGPT OAuth-Anmeldedaten fehlen tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT Responses Fehler: Kein Antworttext vorhanden",
+ "emptyStream": "ChatGPT Responses Stream hat keinen Inhalt für Modell {{modelId}} zurückgegeben"
+ },
"api": {
"invalidKeyInvalidChars": "API-Schlüssel enthält ungültige Zeichen."
},
@@ -161,6 +170,10 @@
"incomplete": "Aufgabe #{{taskNumber}} (Unvollständig)",
"no_messages": "Aufgabe #{{taskNumber}} (Keine Nachrichten)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Antwort vom Benutzer unterbrochen",
+ "responseInterruptedByApiError": "Antwort durch API-Fehler unterbrochen"
+ },
"storage": {
"prompt_custom_path": "Gib den benutzerdefinierten Speicherpfad für den Gesprächsverlauf ein, leer lassen für Standardspeicherort",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/en/common.json b/src/i18n/locales/en/common.json
index e8c264ba684..6809a567f7b 100644
--- a/src/i18n/locales/en/common.json
+++ b/src/i18n/locales/en/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Roo provider requires cloud authentication. Please sign in to Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Failed to load ChatGPT OAuth credentials at {{path}}: {{error}}. Tip: authenticate with the Codex CLI (e.g., \"codex login\") to create auth.json.",
+ "oauthParseFailed": "Failed to parse ChatGPT OAuth credentials JSON at {{path}}: {{error}}. Tip: ensure the file is valid JSON or re-authenticate with \"codex login\" to regenerate it.",
+ "oauthFileTooLarge": "OAuth credentials file at {{path}} is too large ({{size}} bytes). Maximum allowed is {{max}} bytes.",
+ "missingAccessToken": "ChatGPT OAuth credentials are missing tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT Responses error: No response body",
+ "emptyStream": "ChatGPT Responses stream returned no content for model {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "API key contains invalid characters."
},
@@ -161,6 +170,10 @@
"incomplete": "Task #{{taskNumber}} (Incomplete)",
"no_messages": "Task #{{taskNumber}} (No messages)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Response interrupted by user",
+ "responseInterruptedByApiError": "Response interrupted by API error"
+ },
"storage": {
"prompt_custom_path": "Enter custom conversation history storage path, leave empty to use default location",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/es/common.json b/src/i18n/locales/es/common.json
index 5cfa3c5749e..f659034fce8 100644
--- a/src/i18n/locales/es/common.json
+++ b/src/i18n/locales/es/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "El proveedor Roo requiere autenticación en la nube. Por favor, inicia sesión en Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Error al cargar las credenciales OAuth de ChatGPT en {{path}}: {{error}}. Consejo: auténticate con la CLI de Codex (ej., \"codex login\") para crear auth.json.",
+ "oauthParseFailed": "Error al analizar el JSON de credenciales OAuth de ChatGPT en {{path}}: {{error}}. Consejo: asegúrate de que el archivo sea JSON válido o vuelve a autenticarte con \"codex login\" para regenerarlo.",
+ "oauthFileTooLarge": "El archivo de credenciales OAuth en {{path}} es demasiado grande ({{size}} bytes). El máximo permitido es {{max}} bytes.",
+ "missingAccessToken": "Las credenciales OAuth de ChatGPT no tienen tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Error de respuestas de ChatGPT: Sin cuerpo de respuesta",
+ "emptyStream": "El flujo de respuestas de ChatGPT no devolvió contenido para el modelo {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "La clave API contiene caracteres inválidos."
},
@@ -161,6 +170,10 @@
"incomplete": "Tarea #{{taskNumber}} (Incompleta)",
"no_messages": "Tarea #{{taskNumber}} (Sin mensajes)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Respuesta interrumpida por el usuario",
+ "responseInterruptedByApiError": "Respuesta interrumpida por error de API"
+ },
"storage": {
"prompt_custom_path": "Ingresa la ruta de almacenamiento personalizada para el historial de conversaciones, déjala vacía para usar la ubicación predeterminada",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/fr/common.json b/src/i18n/locales/fr/common.json
index 5a11c874a7f..b57efc3bf8f 100644
--- a/src/i18n/locales/fr/common.json
+++ b/src/i18n/locales/fr/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Le fournisseur Roo nécessite une authentification cloud. Veuillez vous connecter à Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Échec du chargement des identifiants OAuth ChatGPT à {{path}} : {{error}}. Conseil : authentifiez-vous avec la CLI Codex (ex. \"codex login\") pour créer auth.json.",
+ "oauthParseFailed": "Échec de l'analyse du JSON des identifiants OAuth ChatGPT à {{path}} : {{error}}. Conseil : assurez-vous que le fichier est un JSON valide ou ré-authentifiez-vous avec \"codex login\" pour le régénérer.",
+ "oauthFileTooLarge": "Le fichier d'identifiants OAuth à {{path}} est trop volumineux ({{size}} octets). Le maximum autorisé est {{max}} octets.",
+ "missingAccessToken": "Les identifiants OAuth ChatGPT n'ont pas de tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}} : {{message}}",
+ "noResponseBody": "Erreur de réponses ChatGPT : Aucun corps de réponse",
+ "emptyStream": "Le flux de réponses ChatGPT n'a retourné aucun contenu pour le modèle {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "La clé API contient des caractères invalides."
},
@@ -161,6 +170,10 @@
"incomplete": "Tâche #{{taskNumber}} (Incomplète)",
"no_messages": "Tâche #{{taskNumber}} (Aucun message)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Réponse interrompue par l'utilisateur",
+ "responseInterruptedByApiError": "Réponse interrompue par une erreur d'API"
+ },
"storage": {
"prompt_custom_path": "Entrez le chemin de stockage personnalisé pour l'historique des conversations, laissez vide pour utiliser l'emplacement par défaut",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/hi/common.json b/src/i18n/locales/hi/common.json
index e89c16cbd05..34bce6bd312 100644
--- a/src/i18n/locales/hi/common.json
+++ b/src/i18n/locales/hi/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Roo प्रदाता को क्लाउड प्रमाणीकरण की आवश्यकता है। कृपया Roo Code Cloud में साइन इन करें।"
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "{{path}} पर ChatGPT OAuth क्रेडेंशियल लोड करने में विफल: {{error}}। सुझाव: auth.json बनाने के लिए Codex CLI के साथ प्रमाणित करें (जैसे, \"codex login\")।",
+ "oauthParseFailed": "{{path}} पर ChatGPT OAuth क्रेडेंशियल JSON पार्स करने में विफल: {{error}}। सुझाव: सुनिश्चित करें कि फ़ाइल वैध JSON है या इसे पुनर्जनित करने के लिए \"codex login\" के साथ पुनः प्रमाणित करें।",
+ "oauthFileTooLarge": "{{path}} पर OAuth क्रेडेंशियल फ़ाइल बहुत बड़ी है ({{size}} बाइट्स)। अधिकतम अनुमतित {{max}} बाइट्स है।",
+ "missingAccessToken": "ChatGPT OAuth क्रेडेंशियल में tokens.access_token गुम है।",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT प्रतिक्रिया त्रुटि: कोई प्रतिक्रिया बॉडी नहीं",
+ "emptyStream": "ChatGPT प्रतिक्रिया स्ट्रीम ने मॉडल {{modelId}} के लिए कोई सामग्री वापस नहीं की"
+ },
"api": {
"invalidKeyInvalidChars": "API कुंजी में अमान्य वर्ण हैं।"
},
@@ -161,6 +170,10 @@
"incomplete": "टास्क #{{taskNumber}} (अधूरा)",
"no_messages": "टास्क #{{taskNumber}} (कोई संदेश नहीं)"
},
+ "interruption": {
+ "responseInterruptedByUser": "उपयोगकर्ता द्वारा प्रतिक्रिया बाधित",
+ "responseInterruptedByApiError": "API त्रुटि द्वारा प्रतिक्रिया बाधित"
+ },
"storage": {
"prompt_custom_path": "वार्तालाप इतिहास के लिए कस्टम स्टोरेज पाथ दर्ज करें, डिफ़ॉल्ट स्थान का उपयोग करने के लिए खाली छोड़ दें",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/id/common.json b/src/i18n/locales/id/common.json
index ae1662eb37f..f7fb7abfcf6 100644
--- a/src/i18n/locales/id/common.json
+++ b/src/i18n/locales/id/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Penyedia Roo memerlukan autentikasi cloud. Silakan masuk ke Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Gagal memuat kredensial OAuth ChatGPT di {{path}}: {{error}}. Tips: autentikasi dengan Codex CLI (mis., \"codex login\") untuk membuat auth.json.",
+ "oauthParseFailed": "Gagal mengurai JSON kredensial OAuth ChatGPT di {{path}}: {{error}}. Tips: pastikan file adalah JSON yang valid atau autentikasi ulang dengan \"codex login\" untuk meregenerasi.",
+ "oauthFileTooLarge": "File kredensial OAuth di {{path}} terlalu besar ({{size}} byte). Maksimum yang diizinkan adalah {{max}} byte.",
+ "missingAccessToken": "Kredensial OAuth ChatGPT tidak memiliki tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Error respons ChatGPT: Tidak ada body respons",
+ "emptyStream": "Stream respons ChatGPT tidak mengembalikan konten untuk model {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "Kunci API mengandung karakter tidak valid."
},
@@ -161,6 +170,10 @@
"incomplete": "Tugas #{{taskNumber}} (Tidak lengkap)",
"no_messages": "Tugas #{{taskNumber}} (Tidak ada pesan)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Respons diinterupsi oleh pengguna",
+ "responseInterruptedByApiError": "Respons diinterupsi oleh error API"
+ },
"storage": {
"prompt_custom_path": "Masukkan path penyimpanan riwayat percakapan kustom, biarkan kosong untuk menggunakan lokasi default",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/it/common.json b/src/i18n/locales/it/common.json
index aeaec11d0d0..c2a34557ef1 100644
--- a/src/i18n/locales/it/common.json
+++ b/src/i18n/locales/it/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Il provider Roo richiede l'autenticazione cloud. Accedi a Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Impossibile caricare le credenziali OAuth di ChatGPT in {{path}}: {{error}}. Suggerimento: autenticati con la CLI Codex (es. \"codex login\") per creare auth.json.",
+ "oauthParseFailed": "Impossibile analizzare il JSON delle credenziali OAuth di ChatGPT in {{path}}: {{error}}. Suggerimento: assicurati che il file sia JSON valido o ri-autenticati con \"codex login\" per rigenerarlo.",
+ "oauthFileTooLarge": "Il file delle credenziali OAuth in {{path}} è troppo grande ({{size}} byte). Il massimo consentito è {{max}} byte.",
+ "missingAccessToken": "Le credenziali OAuth di ChatGPT non hanno tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Errore risposte ChatGPT: Nessun corpo di risposta",
+ "emptyStream": "Il flusso di risposte ChatGPT non ha restituito contenuto per il modello {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "La chiave API contiene caratteri non validi."
},
@@ -161,6 +170,10 @@
"incomplete": "Attività #{{taskNumber}} (Incompleta)",
"no_messages": "Attività #{{taskNumber}} (Nessun messaggio)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Risposta interrotta dall'utente",
+ "responseInterruptedByApiError": "Risposta interrotta da errore API"
+ },
"storage": {
"prompt_custom_path": "Inserisci il percorso di archiviazione personalizzato per la cronologia delle conversazioni, lascia vuoto per utilizzare la posizione predefinita",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/ja/common.json b/src/i18n/locales/ja/common.json
index a607dbffd53..21cf4ad30c2 100644
--- a/src/i18n/locales/ja/common.json
+++ b/src/i18n/locales/ja/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Rooプロバイダーはクラウド認証が必要です。Roo Code Cloudにサインインしてください。"
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "{{path}}でChatGPT OAuth認証情報の読み込みに失敗しました: {{error}}。ヒント: Codex CLI(例:\"codex login\")で認証してauth.jsonを作成してください。",
+ "oauthParseFailed": "{{path}}でChatGPT OAuth認証情報JSONの解析に失敗しました: {{error}}。ヒント: ファイルが有効なJSONであることを確認するか、\"codex login\"で再認証して再生成してください。",
+ "oauthFileTooLarge": "{{path}}のOAuth認証情報ファイルが大きすぎます({{size}}バイト)。最大許可サイズは{{max}}バイトです。",
+ "missingAccessToken": "ChatGPT OAuth認証情報にtokens.access_tokenがありません。",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPTレスポンスエラー: レスポンスボディがありません",
+ "emptyStream": "ChatGPTレスポンスストリームがモデル{{modelId}}のコンテンツを返しませんでした"
+ },
"api": {
"invalidKeyInvalidChars": "APIキーに無効な文字が含まれています。"
},
@@ -161,6 +170,10 @@
"incomplete": "タスク #{{taskNumber}} (未完了)",
"no_messages": "タスク #{{taskNumber}} (メッセージなし)"
},
+ "interruption": {
+ "responseInterruptedByUser": "ユーザーによって応答が中断されました",
+ "responseInterruptedByApiError": "APIエラーによって応答が中断されました"
+ },
"storage": {
"prompt_custom_path": "会話履歴のカスタムストレージパスを入力してください。デフォルトの場所を使用する場合は空のままにしてください",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/ko/common.json b/src/i18n/locales/ko/common.json
index e48b84fe201..9a95714ca14 100644
--- a/src/i18n/locales/ko/common.json
+++ b/src/i18n/locales/ko/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Roo 제공업체는 클라우드 인증이 필요합니다. Roo Code Cloud에 로그인하세요."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "{{path}}에서 ChatGPT OAuth 자격 증명 로드 실패: {{error}}. 팁: Codex CLI(예: \"codex login\")로 인증하여 auth.json을 생성하세요.",
+ "oauthParseFailed": "{{path}}에서 ChatGPT OAuth 자격 증명 JSON 파싱 실패: {{error}}. 팁: 파일이 유효한 JSON인지 확인하거나 \"codex login\"으로 재인증하여 재생성하세요.",
+ "oauthFileTooLarge": "{{path}}의 OAuth 자격 증명 파일이 너무 큽니다({{size}}바이트). 최대 허용 크기는 {{max}}바이트입니다.",
+ "missingAccessToken": "ChatGPT OAuth 자격 증명에 tokens.access_token이 없습니다.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT 응답 오류: 응답 본문 없음",
+ "emptyStream": "ChatGPT 응답 스트림이 모델 {{modelId}}에 대한 콘텐츠를 반환하지 않았습니다"
+ },
"api": {
"invalidKeyInvalidChars": "API 키에 유효하지 않은 문자가 포함되어 있습니다."
},
@@ -161,6 +170,10 @@
"incomplete": "작업 #{{taskNumber}} (미완료)",
"no_messages": "작업 #{{taskNumber}} (메시지 없음)"
},
+ "interruption": {
+ "responseInterruptedByUser": "사용자에 의해 응답이 중단됨",
+ "responseInterruptedByApiError": "API 오류로 인해 응답이 중단됨"
+ },
"storage": {
"prompt_custom_path": "대화 내역을 위한 사용자 지정 저장 경로를 입력하세요. 기본 위치를 사용하려면 비워두세요",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/nl/common.json b/src/i18n/locales/nl/common.json
index 0e3e2459a0d..36398dc8533 100644
--- a/src/i18n/locales/nl/common.json
+++ b/src/i18n/locales/nl/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Roo provider vereist cloud authenticatie. Log in bij Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Laden van ChatGPT OAuth-referenties op {{path}} mislukt: {{error}}. Tip: authenticeer met de Codex CLI (bijv. \"codex login\") om auth.json aan te maken.",
+ "oauthParseFailed": "Parsen van ChatGPT OAuth-referenties JSON op {{path}} mislukt: {{error}}. Tip: zorg ervoor dat het bestand geldige JSON is of authenticeer opnieuw met \"codex login\" om het te regenereren.",
+ "oauthFileTooLarge": "OAuth-referentiebestand op {{path}} is te groot ({{size}} bytes). Maximum toegestaan is {{max}} bytes.",
+ "missingAccessToken": "ChatGPT OAuth-referenties missen tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT Responses fout: Geen response body",
+ "emptyStream": "ChatGPT Responses stream retourneerde geen inhoud voor model {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "API-sleutel bevat ongeldige karakters."
},
@@ -161,6 +170,10 @@
"incomplete": "Taak #{{taskNumber}} (Onvolledig)",
"no_messages": "Taak #{{taskNumber}} (Geen berichten)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Reactie onderbroken door gebruiker",
+ "responseInterruptedByApiError": "Reactie onderbroken door API-fout"
+ },
"storage": {
"prompt_custom_path": "Voer een aangepast opslagpad voor gespreksgeschiedenis in, laat leeg voor standaardlocatie",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/pl/common.json b/src/i18n/locales/pl/common.json
index 1d48b0f9cc1..4ca02b44589 100644
--- a/src/i18n/locales/pl/common.json
+++ b/src/i18n/locales/pl/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Dostawca Roo wymaga uwierzytelnienia w chmurze. Zaloguj się do Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Nie udało się załadować danych uwierzytelniających OAuth ChatGPT w {{path}}: {{error}}. Wskazówka: uwierzytelnij się za pomocą Codex CLI (np. \"codex login\"), aby utworzyć auth.json.",
+ "oauthParseFailed": "Nie udało się przeanalizować JSON danych uwierzytelniających OAuth ChatGPT w {{path}}: {{error}}. Wskazówka: upewnij się, że plik jest prawidłowym JSON lub ponownie uwierzytelnij się za pomocą \"codex login\", aby go zregenerować.",
+ "oauthFileTooLarge": "Plik danych uwierzytelniających OAuth w {{path}} jest za duży ({{size}} bajtów). Maksymalny dozwolony rozmiar to {{max}} bajtów.",
+ "missingAccessToken": "Dane uwierzytelniające OAuth ChatGPT nie zawierają tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Błąd odpowiedzi ChatGPT: Brak treści odpowiedzi",
+ "emptyStream": "Strumień odpowiedzi ChatGPT nie zwrócił zawartości dla modelu {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "Klucz API zawiera nieprawidłowe znaki."
},
@@ -161,6 +170,10 @@
"incomplete": "Zadanie #{{taskNumber}} (Niekompletne)",
"no_messages": "Zadanie #{{taskNumber}} (Brak wiadomości)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Odpowiedź przerwana przez użytkownika",
+ "responseInterruptedByApiError": "Odpowiedź przerwana przez błąd API"
+ },
"storage": {
"prompt_custom_path": "Wprowadź niestandardową ścieżkę przechowywania dla historii konwersacji lub pozostaw puste, aby użyć lokalizacji domyślnej",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/pt-BR/common.json b/src/i18n/locales/pt-BR/common.json
index 093ef7b0bff..663d2a0e1b5 100644
--- a/src/i18n/locales/pt-BR/common.json
+++ b/src/i18n/locales/pt-BR/common.json
@@ -118,6 +118,15 @@
"roo": {
"authenticationRequired": "O provedor Roo requer autenticação na nuvem. Faça login no Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Falha ao carregar credenciais OAuth do ChatGPT em {{path}}: {{error}}. Dica: autentique-se com a CLI do Codex (ex: \"codex login\") para criar auth.json.",
+ "oauthParseFailed": "Falha ao analisar JSON das credenciais OAuth do ChatGPT em {{path}}: {{error}}. Dica: certifique-se de que o arquivo seja JSON válido ou autentique-se novamente com \"codex login\" para regenerá-lo.",
+ "oauthFileTooLarge": "Arquivo de credenciais OAuth em {{path}} é muito grande ({{size}} bytes). O máximo permitido é {{max}} bytes.",
+ "missingAccessToken": "Credenciais OAuth do ChatGPT estão faltando tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Erro de respostas do ChatGPT: Sem corpo de resposta",
+ "emptyStream": "Stream de respostas do ChatGPT não retornou conteúdo para o modelo {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "A chave API contém caracteres inválidos."
},
@@ -165,6 +174,10 @@
"incomplete": "Tarefa #{{taskNumber}} (Incompleta)",
"no_messages": "Tarefa #{{taskNumber}} (Sem mensagens)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Resposta interrompida pelo usuário",
+ "responseInterruptedByApiError": "Resposta interrompida por erro da API"
+ },
"storage": {
"prompt_custom_path": "Digite o caminho de armazenamento personalizado para o histórico de conversas, deixe em branco para usar o local padrão",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/ru/common.json b/src/i18n/locales/ru/common.json
index 7edd656d8c0..c5897d27cea 100644
--- a/src/i18n/locales/ru/common.json
+++ b/src/i18n/locales/ru/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Провайдер Roo требует облачной аутентификации. Войдите в Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Не удалось загрузить учетные данные OAuth ChatGPT по пути {{path}}: {{error}}. Совет: аутентифицируйтесь с помощью Codex CLI (например, \"codex login\") для создания auth.json.",
+ "oauthParseFailed": "Не удалось разобрать JSON учетных данных OAuth ChatGPT по пути {{path}}: {{error}}. Совет: убедитесь, что файл является действительным JSON, или повторно аутентифицируйтесь с помощью \"codex login\" для его регенерации.",
+ "oauthFileTooLarge": "Файл учетных данных OAuth по пути {{path}} слишком большой ({{size}} байт). Максимально допустимый размер {{max}} байт.",
+ "missingAccessToken": "В учетных данных OAuth ChatGPT отсутствует tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Ошибка ответов ChatGPT: Нет тела ответа",
+ "emptyStream": "Поток ответов ChatGPT не вернул содержимое для модели {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "API-ключ содержит недопустимые символы."
},
@@ -161,6 +170,10 @@
"incomplete": "Задача #{{taskNumber}} (Незавершенная)",
"no_messages": "Задача #{{taskNumber}} (Нет сообщений)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Ответ прерван пользователем",
+ "responseInterruptedByApiError": "Ответ прерван ошибкой API"
+ },
"storage": {
"prompt_custom_path": "Введите пользовательский путь хранения истории разговоров, оставьте пустым для использования расположения по умолчанию",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/tr/common.json b/src/i18n/locales/tr/common.json
index 20b2824b983..a6829617f41 100644
--- a/src/i18n/locales/tr/common.json
+++ b/src/i18n/locales/tr/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Roo sağlayıcısı bulut kimlik doğrulaması gerektirir. Lütfen Roo Code Cloud'a giriş yapın."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "{{path}} konumunda ChatGPT OAuth kimlik bilgileri yüklenemedi: {{error}}. İpucu: auth.json oluşturmak için Codex CLI ile kimlik doğrulaması yapın (örn. \"codex login\").",
+ "oauthParseFailed": "{{path}} konumunda ChatGPT OAuth kimlik bilgileri JSON'u ayrıştırılamadı: {{error}}. İpucu: dosyanın geçerli JSON olduğundan emin olun veya yeniden oluşturmak için \"codex login\" ile yeniden kimlik doğrulaması yapın.",
+ "oauthFileTooLarge": "{{path}} konumundaki OAuth kimlik bilgileri dosyası çok büyük ({{size}} bayt). İzin verilen maksimum {{max}} bayttır.",
+ "missingAccessToken": "ChatGPT OAuth kimlik bilgilerinde tokens.access_token eksik.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT Yanıtları hatası: Yanıt gövdesi yok",
+ "emptyStream": "ChatGPT Yanıtları akışı {{modelId}} modeli için içerik döndürmedi"
+ },
"api": {
"invalidKeyInvalidChars": "API anahtarı geçersiz karakterler içeriyor."
},
@@ -161,6 +170,10 @@
"incomplete": "Görev #{{taskNumber}} (Tamamlanmamış)",
"no_messages": "Görev #{{taskNumber}} (Mesaj yok)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Yanıt kullanıcı tarafından kesildi",
+ "responseInterruptedByApiError": "Yanıt API hatası nedeniyle kesildi"
+ },
"storage": {
"prompt_custom_path": "Konuşma geçmişi için özel depolama yolunu girin, varsayılan konumu kullanmak için boş bırakın",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/vi/common.json b/src/i18n/locales/vi/common.json
index f4755162fe7..8979ed97bdc 100644
--- a/src/i18n/locales/vi/common.json
+++ b/src/i18n/locales/vi/common.json
@@ -114,6 +114,15 @@
"roo": {
"authenticationRequired": "Nhà cung cấp Roo yêu cầu xác thực đám mây. Vui lòng đăng nhập vào Roo Code Cloud."
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "Không thể tải thông tin xác thực OAuth ChatGPT tại {{path}}: {{error}}. Mẹo: xác thực với Codex CLI (ví dụ: \"codex login\") để tạo auth.json.",
+ "oauthParseFailed": "Không thể phân tích JSON thông tin xác thực OAuth ChatGPT tại {{path}}: {{error}}. Mẹo: đảm bảo tệp là JSON hợp lệ hoặc xác thực lại với \"codex login\" để tạo lại.",
+ "oauthFileTooLarge": "Tệp thông tin xác thực OAuth tại {{path}} quá lớn ({{size}} byte). Kích thước tối đa cho phép là {{max}} byte.",
+ "missingAccessToken": "Thông tin xác thực OAuth ChatGPT thiếu tokens.access_token.",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "Lỗi phản hồi ChatGPT: Không có nội dung phản hồi",
+ "emptyStream": "Luồng phản hồi ChatGPT không trả về nội dung cho mô hình {{modelId}}"
+ },
"api": {
"invalidKeyInvalidChars": "Khóa API chứa ký tự không hợp lệ."
},
@@ -161,6 +170,10 @@
"incomplete": "Nhiệm vụ #{{taskNumber}} (Chưa hoàn thành)",
"no_messages": "Nhiệm vụ #{{taskNumber}} (Không có tin nhắn)"
},
+ "interruption": {
+ "responseInterruptedByUser": "Phản hồi bị gián đoạn bởi người dùng",
+ "responseInterruptedByApiError": "Phản hồi bị gián đoạn bởi lỗi API"
+ },
"storage": {
"prompt_custom_path": "Nhập đường dẫn lưu trữ tùy chỉnh cho lịch sử hội thoại, để trống để sử dụng vị trí mặc định",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/zh-CN/common.json b/src/i18n/locales/zh-CN/common.json
index 787c5c8ae99..63e8799b8db 100644
--- a/src/i18n/locales/zh-CN/common.json
+++ b/src/i18n/locales/zh-CN/common.json
@@ -119,6 +119,15 @@
"roo": {
"authenticationRequired": "Roo 提供商需要云认证。请登录 Roo Code Cloud。"
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "无法在 {{path}} 加载 ChatGPT OAuth 凭据:{{error}}。提示:使用 Codex CLI 进行身份验证(例如 \"codex login\")以创建 auth.json。",
+ "oauthParseFailed": "无法在 {{path}} 解析 ChatGPT OAuth 凭据 JSON:{{error}}。提示:确保文件是有效的 JSON 或使用 \"codex login\" 重新验证以重新生成。",
+ "oauthFileTooLarge": "{{path}} 处的 OAuth 凭据文件过大({{size}} 字节)。最大允许大小为 {{max}} 字节。",
+ "missingAccessToken": "ChatGPT OAuth 凭据缺少 tokens.access_token。",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT 响应错误:无响应正文",
+ "emptyStream": "ChatGPT 响应流未为模型 {{modelId}} 返回内容"
+ },
"api": {
"invalidKeyInvalidChars": "API 密钥包含无效字符。"
},
@@ -166,6 +175,10 @@
"incomplete": "任务 #{{taskNumber}} (未完成)",
"no_messages": "任务 #{{taskNumber}} (无消息)"
},
+ "interruption": {
+ "responseInterruptedByUser": "响应被用户中断",
+ "responseInterruptedByApiError": "响应被 API 错误中断"
+ },
"storage": {
"prompt_custom_path": "输入自定义会话历史存储路径,留空以使用默认位置",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/i18n/locales/zh-TW/common.json b/src/i18n/locales/zh-TW/common.json
index 0ae3549d3ec..94e207e56c4 100644
--- a/src/i18n/locales/zh-TW/common.json
+++ b/src/i18n/locales/zh-TW/common.json
@@ -113,6 +113,15 @@
"roo": {
"authenticationRequired": "Roo 提供者需要雲端認證。請登入 Roo Code Cloud。"
},
+ "openaiNativeCodex": {
+ "oauthReadFailed": "無法在 {{path}} 載入 ChatGPT OAuth 憑證:{{error}}。提示:使用 Codex CLI 進行驗證(例如 \"codex login\")以建立 auth.json。",
+ "oauthParseFailed": "無法在 {{path}} 解析 ChatGPT OAuth 憑證 JSON:{{error}}。提示:確保檔案是有效的 JSON 或使用 \"codex login\" 重新驗證以重新產生。",
+ "oauthFileTooLarge": "{{path}} 的 OAuth 憑證檔案過大({{size}} 位元組)。允許的最大大小為 {{max}} 位元組。",
+ "missingAccessToken": "ChatGPT OAuth 憑證缺少 tokens.access_token。",
+ "httpError": "Codex HTTP {{status}} (req: {{requestId}}) model={{modelId}}: {{message}}",
+ "noResponseBody": "ChatGPT 回應錯誤:無回應內容",
+ "emptyStream": "ChatGPT 回應串流未為模型 {{modelId}} 傳回內容"
+ },
"api": {
"invalidKeyInvalidChars": "API 金鑰包含無效字元。"
},
@@ -161,6 +170,10 @@
"incomplete": "工作 #{{taskNumber}} (未完成)",
"no_messages": "工作 #{{taskNumber}} (無訊息)"
},
+ "interruption": {
+ "responseInterruptedByUser": "回應被使用者中斷",
+ "responseInterruptedByApiError": "回應被 API 錯誤中斷"
+ },
"storage": {
"prompt_custom_path": "輸入自訂會話歷史儲存路徑,留空以使用預設位置",
"path_placeholder": "D:\\RooCodeStorage",
diff --git a/src/package.json b/src/package.json
index 4016b06f22d..249993ecc1e 100644
--- a/src/package.json
+++ b/src/package.json
@@ -3,7 +3,7 @@
"displayName": "%extension.displayName%",
"description": "%extension.description%",
"publisher": "RooVeterinaryInc",
- "version": "3.28.3",
+ "version": "3.28.8",
"icon": "assets/icons/icon.png",
"galleryBanner": {
"color": "#617A91",
@@ -174,6 +174,11 @@
"command": "roo-cline.acceptInput",
"title": "%command.acceptInput.title%",
"category": "%configuration.title%"
+ },
+ {
+ "command": "roo-cline.toggleAutoApprove",
+ "title": "%command.toggleAutoApprove.title%",
+ "category": "%configuration.title%"
}
],
"menus": {
@@ -310,6 +315,13 @@
"win": "ctrl+y",
"linux": "ctrl+y",
"when": "editorTextFocus && editorHasSelection"
+ },
+ {
+ "command": "roo-cline.toggleAutoApprove",
+ "key": "cmd+alt+a",
+ "mac": "cmd+alt+a",
+ "win": "ctrl+alt+a",
+ "linux": "ctrl+alt+a"
}
],
"submenus": [
diff --git a/src/package.nls.ca.json b/src/package.nls.ca.json
index 537a4522b2d..902f798cbed 100644
--- a/src/package.nls.ca.json
+++ b/src/package.nls.ca.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Corregir Aquesta Ordre",
"command.terminal.explainCommand.title": "Explicar Aquesta Ordre",
"command.acceptInput.title": "Acceptar Entrada/Suggeriment",
+ "command.toggleAutoApprove.title": "Alternar Auto-Aprovació",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Prefixos d'ordres que seran automàticament denegats sense demanar aprovació. En cas de conflictes amb ordres permeses, la coincidència de prefix més llarga té prioritat. Afegeix * per denegar totes les ordres.",
"commands.commandExecutionTimeout.description": "Temps màxim en segons per esperar que l'execució de l'ordre es completi abans d'esgotar el temps (0 = sense temps límit, 1-600s, per defecte: 0s)",
"commands.commandTimeoutAllowlist.description": "Prefixos d'ordres que estan exclosos del temps límit d'execució d'ordres. Les ordres que coincideixin amb aquests prefixos s'executaran sense restriccions de temps límit.",
+ "commands.preventCompletionWithOpenTodos.description": "Evitar la finalització de tasques quan hi ha tasques pendents incompletes a la llista de tasques",
"settings.vsCodeLmModelSelector.description": "Configuració per a l'API del model de llenguatge VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "El proveïdor del model de llenguatge (p. ex. copilot)",
"settings.vsCodeLmModelSelector.family.description": "La família del model de llenguatge (p. ex. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Ruta a un fitxer de configuració de RooCode per importar automàticament en iniciar l'extensió. Admet rutes absolutes i rutes relatives al directori d'inici (per exemple, '~/Documents/roo-code-settings.json'). Deixeu-ho en blanc per desactivar la importació automàtica.",
"settings.useAgentRules.description": "Activa la càrrega de fitxers AGENTS.md per a regles específiques de l'agent (vegeu https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Temps màxim en segons per esperar les respostes de l'API (0 = sense temps d'espera, 1-3600s, per defecte: 600s). Es recomanen valors més alts per a proveïdors locals com LM Studio i Ollama que poden necessitar més temps de processament.",
+ "settings.newTaskRequireTodos.description": "Requerir el paràmetre de tasques pendents quan es creïn noves tasques amb l'eina new_task",
"settings.codeIndex.embeddingBatchSize.description": "La mida del lot per a operacions d'incrustació durant la indexació de codi. Ajusta això segons els límits del teu proveïdor d'API. Per defecte és 60."
}
diff --git a/src/package.nls.de.json b/src/package.nls.de.json
index fb43e289072..d8043da94e3 100644
--- a/src/package.nls.de.json
+++ b/src/package.nls.de.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Diesen Befehl Reparieren",
"command.terminal.explainCommand.title": "Diesen Befehl Erklären",
"command.acceptInput.title": "Eingabe/Vorschlag Akzeptieren",
+ "command.toggleAutoApprove.title": "Auto-Genehmigung Umschalten",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Befehlspräfixe, die automatisch abgelehnt werden, ohne nach Genehmigung zu fragen. Bei Konflikten mit erlaubten Befehlen hat die längste Präfix-Übereinstimmung Vorrang. Füge * hinzu, um alle Befehle abzulehnen.",
"commands.commandExecutionTimeout.description": "Maximale Zeit in Sekunden, die auf den Abschluss der Befehlsausführung gewartet wird, bevor ein Timeout auftritt (0 = kein Timeout, 1-600s, Standard: 0s)",
"commands.commandTimeoutAllowlist.description": "Befehlspräfixe, die vom Timeout der Befehlsausführung ausgeschlossen sind. Befehle, die diesen Präfixen entsprechen, werden ohne Timeout-Beschränkungen ausgeführt.",
+ "commands.preventCompletionWithOpenTodos.description": "Aufgabenabschluss verhindern, wenn unvollständige Todos in der Todo-Liste vorhanden sind",
"settings.vsCodeLmModelSelector.description": "Einstellungen für die VSCode-Sprachmodell-API",
"settings.vsCodeLmModelSelector.vendor.description": "Der Anbieter des Sprachmodells (z.B. copilot)",
"settings.vsCodeLmModelSelector.family.description": "Die Familie des Sprachmodells (z.B. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Pfad zu einer RooCode-Konfigurationsdatei, die beim Start der Erweiterung automatisch importiert wird. Unterstützt absolute Pfade und Pfade relativ zum Home-Verzeichnis (z.B. '~/Documents/roo-code-settings.json'). Leer lassen, um den automatischen Import zu deaktivieren.",
"settings.useAgentRules.description": "Aktiviert das Laden von AGENTS.md-Dateien für agentenspezifische Regeln (siehe https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Maximale Wartezeit in Sekunden auf API-Antworten (0 = kein Timeout, 1-3600s, Standard: 600s). Höhere Werte werden für lokale Anbieter wie LM Studio und Ollama empfohlen, die möglicherweise mehr Verarbeitungszeit benötigen.",
+ "settings.newTaskRequireTodos.description": "Todos-Parameter beim Erstellen neuer Aufgaben mit dem new_task-Tool erfordern",
"settings.codeIndex.embeddingBatchSize.description": "Die Batch-Größe für Embedding-Operationen während der Code-Indexierung. Passe dies an die Limits deines API-Anbieters an. Standard ist 60."
}
diff --git a/src/package.nls.es.json b/src/package.nls.es.json
index 95029057a9d..000b353550c 100644
--- a/src/package.nls.es.json
+++ b/src/package.nls.es.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Corregir Este Comando",
"command.terminal.explainCommand.title": "Explicar Este Comando",
"command.acceptInput.title": "Aceptar Entrada/Sugerencia",
+ "command.toggleAutoApprove.title": "Alternar Auto-Aprobación",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Prefijos de comandos que serán automáticamente denegados sin solicitar aprobación. En caso de conflictos con comandos permitidos, la coincidencia de prefijo más larga tiene prioridad. Añade * para denegar todos los comandos.",
"commands.commandExecutionTimeout.description": "Tiempo máximo en segundos para esperar que se complete la ejecución del comando antes de que expire (0 = sin tiempo límite, 1-600s, predeterminado: 0s)",
"commands.commandTimeoutAllowlist.description": "Prefijos de comandos que están excluidos del tiempo límite de ejecución de comandos. Los comandos que coincidan con estos prefijos se ejecutarán sin restricciones de tiempo límite.",
+ "commands.preventCompletionWithOpenTodos.description": "Prevenir la finalización de tareas cuando hay todos incompletos en la lista de todos",
"settings.vsCodeLmModelSelector.description": "Configuración para la API del modelo de lenguaje VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "El proveedor del modelo de lenguaje (ej. copilot)",
"settings.vsCodeLmModelSelector.family.description": "La familia del modelo de lenguaje (ej. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Ruta a un archivo de configuración de RooCode para importar automáticamente al iniciar la extensión. Admite rutas absolutas y rutas relativas al directorio de inicio (por ejemplo, '~/Documents/roo-code-settings.json'). Dejar vacío para desactivar la importación automática.",
"settings.useAgentRules.description": "Habilita la carga de archivos AGENTS.md para reglas específicas del agente (ver https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Tiempo máximo en segundos de espera para las respuestas de la API (0 = sin tiempo de espera, 1-3600s, por defecto: 600s). Se recomiendan valores más altos para proveedores locales como LM Studio y Ollama que puedan necesitar más tiempo de procesamiento.",
+ "settings.newTaskRequireTodos.description": "Requerir el parámetro todos al crear nuevas tareas con la herramienta new_task",
"settings.codeIndex.embeddingBatchSize.description": "El tamaño del lote para operaciones de embedding durante la indexación de código. Ajusta esto según los límites de tu proveedor de API. Por defecto es 60."
}
diff --git a/src/package.nls.fr.json b/src/package.nls.fr.json
index 3939451d673..0ba6ddeb8fc 100644
--- a/src/package.nls.fr.json
+++ b/src/package.nls.fr.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Corriger cette Commande",
"command.terminal.explainCommand.title": "Expliquer cette Commande",
"command.acceptInput.title": "Accepter l'Entrée/Suggestion",
+ "command.toggleAutoApprove.title": "Basculer Auto-Approbation",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Préfixes de commandes qui seront automatiquement refusés sans demander d'approbation. En cas de conflit avec les commandes autorisées, la correspondance de préfixe la plus longue a la priorité. Ajouter * pour refuser toutes les commandes.",
"commands.commandExecutionTimeout.description": "Temps maximum en secondes pour attendre que l'exécution de la commande se termine avant expiration (0 = pas de délai, 1-600s, défaut : 0s)",
"commands.commandTimeoutAllowlist.description": "Préfixes de commandes qui sont exclus du délai d'exécution des commandes. Les commandes correspondant à ces préfixes s'exécuteront sans restrictions de délai.",
+ "commands.preventCompletionWithOpenTodos.description": "Empêcher l'achèvement des tâches lorsqu'il y a des todos incomplets dans la liste de todos",
"settings.vsCodeLmModelSelector.description": "Paramètres pour l'API du modèle de langage VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "Le fournisseur du modèle de langage (ex: copilot)",
"settings.vsCodeLmModelSelector.family.description": "La famille du modèle de langage (ex: gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Chemin d'accès à un fichier de configuration RooCode à importer automatiquement au démarrage de l'extension. Prend en charge les chemins absolus et les chemins relatifs au répertoire de base (par exemple, '~/Documents/roo-code-settings.json'). Laisser vide pour désactiver l'importation automatique.",
"settings.useAgentRules.description": "Activer le chargement des fichiers AGENTS.md pour les règles spécifiques à l'agent (voir https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Temps maximum en secondes d'attente pour les réponses de l'API (0 = pas de timeout, 1-3600s, par défaut : 600s). Des valeurs plus élevées sont recommandées pour les fournisseurs locaux comme LM Studio et Ollama qui peuvent nécessiter plus de temps de traitement.",
+ "settings.newTaskRequireTodos.description": "Exiger le paramètre todos lors de la création de nouvelles tâches avec l'outil new_task",
"settings.codeIndex.embeddingBatchSize.description": "La taille du lot pour les opérations d'embedding lors de l'indexation du code. Ajustez ceci selon les limites de votre fournisseur d'API. Par défaut, c'est 60."
}
diff --git a/src/package.nls.hi.json b/src/package.nls.hi.json
index 25481f425fa..d4b4bb1cd01 100644
--- a/src/package.nls.hi.json
+++ b/src/package.nls.hi.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "यह कमांड ठीक करें",
"command.terminal.explainCommand.title": "यह कमांड समझाएं",
"command.acceptInput.title": "इनपुट/सुझाव स्वीकारें",
+ "command.toggleAutoApprove.title": "ऑटो-अनुमोदन टॉगल करें",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "कमांड प्रीफिक्स जो स्वचालित रूप से अस्वीकार कर दिए जाएंगे बिना अनुमोदन मांगे। अनुमतित कमांड के साथ संघर्ष की स्थिति में, सबसे लंबा प्रीफिक्स मैच प्राथमिकता लेता है। सभी कमांड को अस्वीकार करने के लिए * जोड़ें।",
"commands.commandExecutionTimeout.description": "कमांड निष्पादन पूरा होने का इंतजार करने के लिए अधिकतम समय सेकंड में, समय समाप्त होने से पहले (0 = कोई समय सीमा नहीं, 1-600s, डिफ़ॉल्ट: 0s)",
"commands.commandTimeoutAllowlist.description": "कमांड प्रीफिक्स जो कमांड निष्पादन टाइमआउट से बाहर रखे गए हैं। इन प्रीफिक्स से मेल खाने वाले कमांड बिना टाइमआउट प्रतिबंधों के चलेंगे।",
+ "commands.preventCompletionWithOpenTodos.description": "जब टूडू सूची में अधूरे टूडू हों तो कार्य पूर्णता को रोकें",
"settings.vsCodeLmModelSelector.description": "VSCode भाषा मॉडल API के लिए सेटिंग्स",
"settings.vsCodeLmModelSelector.vendor.description": "भाषा मॉडल का विक्रेता (उदा. copilot)",
"settings.vsCodeLmModelSelector.family.description": "भाषा मॉडल का परिवार (उदा. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "RooCode कॉन्फ़िगरेशन फ़ाइल का पथ जिसे एक्सटेंशन स्टार्टअप पर स्वचालित रूप से आयात किया जाएगा। होम डायरेक्टरी के सापेक्ष पूर्ण पथ और पथों का समर्थन करता है (उदाहरण के लिए '~/Documents/roo-code-settings.json')। ऑटो-इंपोर्ट को अक्षम करने के लिए खाली छोड़ दें।",
"settings.useAgentRules.description": "एजेंट-विशिष्ट नियमों के लिए AGENTS.md फ़ाइलों को लोड करना सक्षम करें (देखें https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "एपीआई प्रतिक्रियाओं की प्रतीक्षा करने के लिए सेकंड में अधिकतम समय (0 = कोई टाइमआउट नहीं, 1-3600s, डिफ़ॉल्ट: 600s)। एलएम स्टूडियो और ओलामा जैसे स्थानीय प्रदाताओं के लिए उच्च मानों की सिफारिश की जाती है जिन्हें अधिक प्रसंस्करण समय की आवश्यकता हो सकती है।",
+ "settings.newTaskRequireTodos.description": "new_task टूल के साथ नए कार्य बनाते समय टूडू पैरामीटर की आवश्यकता होती है",
"settings.codeIndex.embeddingBatchSize.description": "कोड इंडेक्सिंग के दौरान एम्बेडिंग ऑपरेशन के लिए बैच साइज़। इसे अपने API प्रदाता की सीमाओं के अनुसार समायोजित करें। डिफ़ॉल्ट 60 है।"
}
diff --git a/src/package.nls.id.json b/src/package.nls.id.json
index 0c69028e917..eb361a1ef70 100644
--- a/src/package.nls.id.json
+++ b/src/package.nls.id.json
@@ -26,11 +26,13 @@
"command.terminal.fixCommand.title": "Perbaiki Perintah Ini",
"command.terminal.explainCommand.title": "Jelaskan Perintah Ini",
"command.acceptInput.title": "Terima Input/Saran",
+ "command.toggleAutoApprove.title": "Alihkan Persetujuan Otomatis",
"configuration.title": "Roo Code",
"commands.allowedCommands.description": "Perintah yang dapat dijalankan secara otomatis ketika 'Selalu setujui operasi eksekusi' diaktifkan",
"commands.deniedCommands.description": "Awalan perintah yang akan otomatis ditolak tanpa meminta persetujuan. Jika terjadi konflik dengan perintah yang diizinkan, pencocokan awalan terpanjang akan diprioritaskan. Tambahkan * untuk menolak semua perintah.",
"commands.commandExecutionTimeout.description": "Waktu maksimum dalam detik untuk menunggu eksekusi perintah selesai sebelum timeout (0 = tanpa timeout, 1-600s, default: 0s)",
"commands.commandTimeoutAllowlist.description": "Awalan perintah yang dikecualikan dari timeout eksekusi perintah. Perintah yang cocok dengan awalan ini akan berjalan tanpa batasan timeout.",
+ "commands.preventCompletionWithOpenTodos.description": "Mencegah penyelesaian tugas ketika ada todos yang belum selesai dalam daftar todos",
"settings.vsCodeLmModelSelector.description": "Pengaturan untuk API Model Bahasa VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "Vendor dari model bahasa (misalnya copilot)",
"settings.vsCodeLmModelSelector.family.description": "Keluarga dari model bahasa (misalnya gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Path ke file konfigurasi RooCode untuk diimpor secara otomatis saat ekstensi dimulai. Mendukung path absolut dan path relatif terhadap direktori home (misalnya '~/Documents/roo-code-settings.json'). Biarkan kosong untuk menonaktifkan impor otomatis.",
"settings.useAgentRules.description": "Aktifkan pemuatan file AGENTS.md untuk aturan khusus agen (lihat https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Waktu maksimum dalam detik untuk menunggu respons API (0 = tidak ada batas waktu, 1-3600s, default: 600s). Nilai yang lebih tinggi disarankan untuk penyedia lokal seperti LM Studio dan Ollama yang mungkin memerlukan lebih banyak waktu pemrosesan.",
+ "settings.newTaskRequireTodos.description": "Memerlukan parameter todos saat membuat tugas baru dengan alat new_task",
"settings.codeIndex.embeddingBatchSize.description": "Ukuran batch untuk operasi embedding selama pengindeksan kode. Sesuaikan ini berdasarkan batas penyedia API kamu. Default adalah 60."
}
diff --git a/src/package.nls.it.json b/src/package.nls.it.json
index 5ce3a765668..78989df6fe7 100644
--- a/src/package.nls.it.json
+++ b/src/package.nls.it.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Correggi Questo Comando",
"command.terminal.explainCommand.title": "Spiega Questo Comando",
"command.acceptInput.title": "Accetta Input/Suggerimento",
+ "command.toggleAutoApprove.title": "Attiva/Disattiva Auto-Approvazione",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Prefissi di comandi che verranno automaticamente rifiutati senza richiedere approvazione. In caso di conflitti con comandi consentiti, la corrispondenza del prefisso più lungo ha la precedenza. Aggiungi * per rifiutare tutti i comandi.",
"commands.commandExecutionTimeout.description": "Tempo massimo in secondi per attendere il completamento dell'esecuzione del comando prima del timeout (0 = nessun timeout, 1-600s, predefinito: 0s)",
"commands.commandTimeoutAllowlist.description": "Prefissi di comandi che sono esclusi dal timeout di esecuzione dei comandi. I comandi che corrispondono a questi prefissi verranno eseguiti senza restrizioni di timeout.",
+ "commands.preventCompletionWithOpenTodos.description": "Impedire il completamento delle attività quando ci sono todos incompleti nella lista dei todos",
"settings.vsCodeLmModelSelector.description": "Impostazioni per l'API del modello linguistico VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "Il fornitore del modello linguistico (es. copilot)",
"settings.vsCodeLmModelSelector.family.description": "La famiglia del modello linguistico (es. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Percorso di un file di configurazione di RooCode da importare automaticamente all'avvio dell'estensione. Supporta percorsi assoluti e percorsi relativi alla directory home (ad es. '~/Documents/roo-code-settings.json'). Lasciare vuoto per disabilitare l'importazione automatica.",
"settings.useAgentRules.description": "Abilita il caricamento dei file AGENTS.md per regole specifiche dell'agente (vedi https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Tempo massimo in secondi di attesa per le risposte API (0 = nessun timeout, 1-3600s, predefinito: 600s). Valori più alti sono consigliati per provider locali come LM Studio e Ollama che potrebbero richiedere più tempo di elaborazione.",
+ "settings.newTaskRequireTodos.description": "Richiedere il parametro todos quando si creano nuove attività con lo strumento new_task",
"settings.codeIndex.embeddingBatchSize.description": "La dimensione del batch per le operazioni di embedding durante l'indicizzazione del codice. Regola questo in base ai limiti del tuo provider API. Il valore predefinito è 60."
}
diff --git a/src/package.nls.ja.json b/src/package.nls.ja.json
index b53b94e6ee4..3eb059cbd9a 100644
--- a/src/package.nls.ja.json
+++ b/src/package.nls.ja.json
@@ -26,11 +26,13 @@
"command.terminal.fixCommand.title": "このコマンドを修正",
"command.terminal.explainCommand.title": "このコマンドを説明",
"command.acceptInput.title": "入力/提案を承認",
+ "command.toggleAutoApprove.title": "自動承認を切替",
"configuration.title": "Roo Code",
"commands.allowedCommands.description": "'常に実行操作を承認する'が有効な場合に自動実行できるコマンド",
"commands.deniedCommands.description": "承認を求めずに自動的に拒否されるコマンドプレフィックス。許可されたコマンドとの競合がある場合、最長プレフィックスマッチが優先されます。すべてのコマンドを拒否するには * を追加してください。",
"commands.commandExecutionTimeout.description": "コマンド実行の完了を待つ最大時間(秒)、タイムアウトまで(0 = タイムアウトなし、1-600秒、デフォルト: 0秒)",
"commands.commandTimeoutAllowlist.description": "コマンド実行タイムアウトから除外されるコマンドプレフィックス。これらのプレフィックスに一致するコマンドは、タイムアウト制限なしで実行されます。",
+ "commands.preventCompletionWithOpenTodos.description": "TODOリストに未完了のTODOがある場合にタスクの完了を防ぐ",
"settings.vsCodeLmModelSelector.description": "VSCode 言語モデル API の設定",
"settings.vsCodeLmModelSelector.vendor.description": "言語モデルのベンダー(例:copilot)",
"settings.vsCodeLmModelSelector.family.description": "言語モデルのファミリー(例:gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "拡張機能の起動時に自動的にインポートするRooCode設定ファイルへのパス。絶対パスとホームディレクトリからの相対パスをサポートします(例:'~/Documents/roo-code-settings.json')。自動インポートを無効にするには、空のままにします。",
"settings.useAgentRules.description": "エージェント固有のルールのためにAGENTS.mdファイルの読み込みを有効にします(参照:https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "API応答を待機する最大時間(秒)(0 = タイムアウトなし、1-3600秒、デフォルト: 600秒)。LM StudioやOllamaのような、より多くの処理時間を必要とする可能性のあるローカルプロバイダーには、より高い値が推奨されます。",
+ "settings.newTaskRequireTodos.description": "new_taskツールで新しいタスクを作成する際にtodosパラメータを必須にする",
"settings.codeIndex.embeddingBatchSize.description": "コードインデックス作成中のエンベディング操作のバッチサイズ。APIプロバイダーの制限に基づいてこれを調整してください。デフォルトは60です。"
}
diff --git a/src/package.nls.json b/src/package.nls.json
index b0b7f401f85..1db69777ac1 100644
--- a/src/package.nls.json
+++ b/src/package.nls.json
@@ -26,6 +26,7 @@
"command.terminal.fixCommand.title": "Fix This Command",
"command.terminal.explainCommand.title": "Explain This Command",
"command.acceptInput.title": "Accept Input/Suggestion",
+ "command.toggleAutoApprove.title": "Toggle Auto-Approve",
"configuration.title": "Roo Code",
"commands.allowedCommands.description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled",
"commands.deniedCommands.description": "Command prefixes that will be automatically denied without asking for approval. In case of conflicts with allowed commands, the longest prefix match takes precedence. Add * to deny all commands.",
diff --git a/src/package.nls.ko.json b/src/package.nls.ko.json
index bd03331d4ec..a566b2a038c 100644
--- a/src/package.nls.ko.json
+++ b/src/package.nls.ko.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "이 명령어 수정",
"command.terminal.explainCommand.title": "이 명령어 설명",
"command.acceptInput.title": "입력/제안 수락",
+ "command.toggleAutoApprove.title": "자동 승인 전환",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "승인을 요청하지 않고 자동으로 거부될 명령어 접두사. 허용된 명령어와 충돌하는 경우 가장 긴 접두사 일치가 우선됩니다. 모든 명령어를 거부하려면 *를 추가하세요.",
"commands.commandExecutionTimeout.description": "명령어 실행이 완료되기를 기다리는 최대 시간(초), 타임아웃 전까지 (0 = 타임아웃 없음, 1-600초, 기본값: 0초)",
"commands.commandTimeoutAllowlist.description": "명령어 실행 타임아웃에서 제외되는 명령어 접두사. 이러한 접두사와 일치하는 명령어는 타임아웃 제한 없이 실행됩니다.",
+ "commands.preventCompletionWithOpenTodos.description": "할 일 목록에 미완료 할 일이 있을 때 작업 완료를 방지",
"settings.vsCodeLmModelSelector.description": "VSCode 언어 모델 API 설정",
"settings.vsCodeLmModelSelector.vendor.description": "언어 모델 공급자 (예: copilot)",
"settings.vsCodeLmModelSelector.family.description": "언어 모델 계열 (예: gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "확장 프로그램 시작 시 자동으로 가져올 RooCode 구성 파일의 경로입니다. 절대 경로 및 홈 디렉토리에 대한 상대 경로를 지원합니다(예: '~/Documents/roo-code-settings.json'). 자동 가져오기를 비활성화하려면 비워 둡니다.",
"settings.useAgentRules.description": "에이전트별 규칙에 대한 AGENTS.md 파일 로드를 활성화합니다 (참조: https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "API 응답을 기다리는 최대 시간(초) (0 = 시간 초과 없음, 1-3600초, 기본값: 600초). 더 많은 처리 시간이 필요할 수 있는 LM Studio 및 Ollama와 같은 로컬 공급자에게는 더 높은 값을 사용하는 것이 좋습니다.",
+ "settings.newTaskRequireTodos.description": "new_task 도구로 새 작업을 생성할 때 todos 매개변수 필요",
"settings.codeIndex.embeddingBatchSize.description": "코드 인덱싱 중 임베딩 작업의 배치 크기입니다. API 공급자의 제한에 따라 이를 조정하세요. 기본값은 60입니다."
}
diff --git a/src/package.nls.nl.json b/src/package.nls.nl.json
index 683a096c122..006725326b7 100644
--- a/src/package.nls.nl.json
+++ b/src/package.nls.nl.json
@@ -26,11 +26,13 @@
"command.terminal.fixCommand.title": "Repareer Dit Commando",
"command.terminal.explainCommand.title": "Leg Dit Commando Uit",
"command.acceptInput.title": "Invoer/Suggestie Accepteren",
+ "command.toggleAutoApprove.title": "Auto-Goedkeuring Schakelen",
"configuration.title": "Roo Code",
"commands.allowedCommands.description": "Commando's die automatisch kunnen worden uitgevoerd wanneer 'Altijd goedkeuren uitvoerbewerkingen' is ingeschakeld",
"commands.deniedCommands.description": "Commando-prefixen die automatisch worden geweigerd zonder om goedkeuring te vragen. Bij conflicten met toegestane commando's heeft de langste prefix-match voorrang. Voeg * toe om alle commando's te weigeren.",
"commands.commandExecutionTimeout.description": "Maximale tijd in seconden om te wachten tot commando-uitvoering voltooid is voordat er een timeout optreedt (0 = geen timeout, 1-600s, standaard: 0s)",
"commands.commandTimeoutAllowlist.description": "Commando-prefixen die zijn uitgesloten van de commando-uitvoering timeout. Commando's die overeenkomen met deze prefixen worden uitgevoerd zonder timeout-beperkingen.",
+ "commands.preventCompletionWithOpenTodos.description": "Taakvoltooiing voorkomen wanneer er onvolledige todos in de todo-lijst staan",
"settings.vsCodeLmModelSelector.description": "Instellingen voor VSCode Language Model API",
"settings.vsCodeLmModelSelector.vendor.description": "De leverancier van het taalmodel (bijv. copilot)",
"settings.vsCodeLmModelSelector.family.description": "De familie van het taalmodel (bijv. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Pad naar een RooCode-configuratiebestand om automatisch te importeren bij het opstarten van de extensie. Ondersteunt absolute paden en paden ten opzichte van de thuismap (bijv. '~/Documents/roo-code-settings.json'). Laat leeg om automatisch importeren uit te schakelen.",
"settings.useAgentRules.description": "Laden van AGENTS.md-bestanden voor agentspecifieke regels inschakelen (zie https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Maximale tijd in seconden om te wachten op API-reacties (0 = geen time-out, 1-3600s, standaard: 600s). Hogere waarden worden aanbevolen voor lokale providers zoals LM Studio en Ollama die mogelijk meer verwerkingstijd nodig hebben.",
+ "settings.newTaskRequireTodos.description": "Todos-parameter vereisen bij het maken van nieuwe taken met de new_task tool",
"settings.codeIndex.embeddingBatchSize.description": "De batchgrootte voor embedding-operaties tijdens code-indexering. Pas dit aan op basis van de limieten van je API-provider. Standaard is 60."
}
diff --git a/src/package.nls.pl.json b/src/package.nls.pl.json
index 76c10ddfe00..bcf80f72306 100644
--- a/src/package.nls.pl.json
+++ b/src/package.nls.pl.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Napraw tę Komendę",
"command.terminal.explainCommand.title": "Wyjaśnij tę Komendę",
"command.acceptInput.title": "Akceptuj Wprowadzanie/Sugestię",
+ "command.toggleAutoApprove.title": "Przełącz Auto-Zatwierdzanie",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Prefiksy poleceń, które będą automatycznie odrzucane bez pytania o zatwierdzenie. W przypadku konfliktów z dozwolonymi poleceniami, najdłuższe dopasowanie prefiksu ma pierwszeństwo. Dodaj * aby odrzucić wszystkie polecenia.",
"commands.commandExecutionTimeout.description": "Maksymalny czas w sekundach oczekiwania na zakończenie wykonania polecenia przed przekroczeniem limitu czasu (0 = brak limitu czasu, 1-600s, domyślnie: 0s)",
"commands.commandTimeoutAllowlist.description": "Prefiksy poleceń, które są wykluczone z limitu czasu wykonania poleceń. Polecenia pasujące do tych prefiksów będą wykonywane bez ograniczeń czasowych.",
+ "commands.preventCompletionWithOpenTodos.description": "Zapobiegaj ukończeniu zadania gdy na liście zadań są niekompletne todos",
"settings.vsCodeLmModelSelector.description": "Ustawienia dla API modelu językowego VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "Dostawca modelu językowego (np. copilot)",
"settings.vsCodeLmModelSelector.family.description": "Rodzina modelu językowego (np. gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Ścieżka do pliku konfiguracyjnego RooCode, który ma być automatycznie importowany podczas uruchamiania rozszerzenia. Obsługuje ścieżki bezwzględne i ścieżki względne do katalogu domowego (np. '~/Documents/roo-code-settings.json'). Pozostaw puste, aby wyłączyć automatyczne importowanie.",
"settings.useAgentRules.description": "Włącz wczytywanie plików AGENTS.md dla reguł specyficznych dla agenta (zobacz https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Maksymalny czas w sekundach oczekiwania na odpowiedzi API (0 = brak limitu czasu, 1-3600s, domyślnie: 600s). Wyższe wartości są zalecane dla lokalnych dostawców, takich jak LM Studio i Ollama, którzy mogą potrzebować więcej czasu na przetwarzanie.",
+ "settings.newTaskRequireTodos.description": "Wymagaj parametru todos podczas tworzenia nowych zadań za pomocą narzędzia new_task",
"settings.codeIndex.embeddingBatchSize.description": "Rozmiar partii dla operacji osadzania podczas indeksowania kodu. Dostosuj to w oparciu o limity twojego dostawcy API. Domyślnie to 60."
}
diff --git a/src/package.nls.pt-BR.json b/src/package.nls.pt-BR.json
index 85cea4d8700..1843bc476b3 100644
--- a/src/package.nls.pt-BR.json
+++ b/src/package.nls.pt-BR.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Corrigir Este Comando",
"command.terminal.explainCommand.title": "Explicar Este Comando",
"command.acceptInput.title": "Aceitar Entrada/Sugestão",
+ "command.toggleAutoApprove.title": "Alternar Auto-Aprovação",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Prefixos de comandos que serão automaticamente negados sem solicitar aprovação. Em caso de conflitos com comandos permitidos, a correspondência de prefixo mais longa tem precedência. Adicione * para negar todos os comandos.",
"commands.commandExecutionTimeout.description": "Tempo máximo em segundos para aguardar a conclusão da execução do comando antes do timeout (0 = sem timeout, 1-600s, padrão: 0s)",
"commands.commandTimeoutAllowlist.description": "Prefixos de comandos que são excluídos do timeout de execução de comandos. Comandos que correspondem a esses prefixos serão executados sem restrições de timeout.",
+ "commands.preventCompletionWithOpenTodos.description": "Impedir a conclusão de tarefas quando há todos incompletos na lista de todos",
"settings.vsCodeLmModelSelector.description": "Configurações para a API do modelo de linguagem do VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "O fornecedor do modelo de linguagem (ex: copilot)",
"settings.vsCodeLmModelSelector.family.description": "A família do modelo de linguagem (ex: gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Caminho para um arquivo de configuração do RooCode para importar automaticamente na inicialização da extensão. Suporta caminhos absolutos e caminhos relativos ao diretório inicial (por exemplo, '~/Documents/roo-code-settings.json'). Deixe em branco para desativar a importação automática.",
"settings.useAgentRules.description": "Habilita o carregamento de arquivos AGENTS.md para regras específicas do agente (consulte https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Tempo máximo em segundos de espera pelas respostas da API (0 = sem tempo limite, 1-3600s, padrão: 600s). Valores mais altos são recomendados para provedores locais como LM Studio e Ollama que podem precisar de mais tempo de processamento.",
+ "settings.newTaskRequireTodos.description": "Exigir parâmetro todos ao criar novas tarefas com a ferramenta new_task",
"settings.codeIndex.embeddingBatchSize.description": "O tamanho do lote para operações de embedding durante a indexação de código. Ajuste isso com base nos limites do seu provedor de API. O padrão é 60."
}
diff --git a/src/package.nls.ru.json b/src/package.nls.ru.json
index 83f32373a91..8a50af73894 100644
--- a/src/package.nls.ru.json
+++ b/src/package.nls.ru.json
@@ -26,11 +26,13 @@
"command.terminal.fixCommand.title": "Исправить эту команду",
"command.terminal.explainCommand.title": "Объяснить эту команду",
"command.acceptInput.title": "Принять ввод/предложение",
+ "command.toggleAutoApprove.title": "Переключить Авто-Подтверждение",
"configuration.title": "Roo Code",
"commands.allowedCommands.description": "Команды, которые могут быть автоматически выполнены, когда включена опция 'Всегда подтверждать операции выполнения'",
"commands.deniedCommands.description": "Префиксы команд, которые будут автоматически отклонены без запроса подтверждения. В случае конфликтов с разрешенными командами приоритет имеет самое длинное совпадение префикса. Добавьте * чтобы отклонить все команды.",
"commands.commandExecutionTimeout.description": "Максимальное время в секундах для ожидания завершения выполнения команды до истечения времени ожидания (0 = без тайм-аута, 1-600с, по умолчанию: 0с)",
"commands.commandTimeoutAllowlist.description": "Префиксы команд, которые исключены из тайм-аута выполнения команд. Команды, соответствующие этим префиксам, будут выполняться без ограничений по времени.",
+ "commands.preventCompletionWithOpenTodos.description": "Предотвращать завершение задачи при наличии незавершенных задач в списке задач",
"settings.vsCodeLmModelSelector.description": "Настройки для VSCode Language Model API",
"settings.vsCodeLmModelSelector.vendor.description": "Поставщик языковой модели (например, copilot)",
"settings.vsCodeLmModelSelector.family.description": "Семейство языковой модели (например, gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Путь к файлу конфигурации RooCode для автоматического импорта при запуске расширения. Поддерживает абсолютные пути и пути относительно домашнего каталога (например, '~/Documents/roo-code-settings.json'). Оставьте пустым, чтобы отключить автоматический импорт.",
"settings.useAgentRules.description": "Включить загрузку файлов AGENTS.md для специфичных для агента правил (см. https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Максимальное время в секундах для ожидания ответов API (0 = нет тайм-аута, 1-3600 с, по умолчанию: 600 с). Рекомендуются более высокие значения для локальных провайдеров, таких как LM Studio и Ollama, которым может потребоваться больше времени на обработку.",
+ "settings.newTaskRequireTodos.description": "Требовать параметр todos при создании новых задач с помощью инструмента new_task",
"settings.codeIndex.embeddingBatchSize.description": "Размер пакета для операций встраивания во время индексации кода. Настройте это в соответствии с ограничениями вашего API-провайдера. По умолчанию 60."
}
diff --git a/src/package.nls.tr.json b/src/package.nls.tr.json
index faf520c0d26..4eec2c70a02 100644
--- a/src/package.nls.tr.json
+++ b/src/package.nls.tr.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Bu Komutu Düzelt",
"command.terminal.explainCommand.title": "Bu Komutu Açıkla",
"command.acceptInput.title": "Girişi/Öneriyi Kabul Et",
+ "command.toggleAutoApprove.title": "Otomatik Onayı Değiştir",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Onay istenmeden otomatik olarak reddedilecek komut önekleri. İzin verilen komutlarla çakışma durumunda en uzun önek eşleşmesi öncelik alır. Tüm komutları reddetmek için * ekleyin.",
"commands.commandExecutionTimeout.description": "Komut yürütmesinin tamamlanmasını beklemek için maksimum süre (saniye), zaman aşımından önce (0 = zaman aşımı yok, 1-600s, varsayılan: 0s)",
"commands.commandTimeoutAllowlist.description": "Komut yürütme zaman aşımından hariç tutulan komut önekleri. Bu öneklerle eşleşen komutlar zaman aşımı kısıtlamaları olmadan çalışacaktır.",
+ "commands.preventCompletionWithOpenTodos.description": "Todo listesinde tamamlanmamış todolar olduğunda görev tamamlanmasını engelle",
"settings.vsCodeLmModelSelector.description": "VSCode dil modeli API'si için ayarlar",
"settings.vsCodeLmModelSelector.vendor.description": "Dil modelinin sağlayıcısı (örn: copilot)",
"settings.vsCodeLmModelSelector.family.description": "Dil modelinin ailesi (örn: gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Uzantı başlangıcında otomatik olarak içe aktarılacak bir RooCode yapılandırma dosyasının yolu. Mutlak yolları ve ana dizine göreli yolları destekler (ör. '~/Documents/roo-code-settings.json'). Otomatik içe aktarmayı devre dışı bırakmak için boş bırakın.",
"settings.useAgentRules.description": "Aracıya özgü kurallar için AGENTS.md dosyalarının yüklenmesini etkinleştirin (bkz. https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "API yanıtları için beklenecek maksimum süre (saniye cinsinden) (0 = zaman aşımı yok, 1-3600s, varsayılan: 600s). LM Studio ve Ollama gibi daha fazla işlem süresi gerektirebilecek yerel sağlayıcılar için daha yüksek değerler önerilir.",
+ "settings.newTaskRequireTodos.description": "new_task aracıyla yeni görevler oluştururken todos parametresini gerekli kıl",
"settings.codeIndex.embeddingBatchSize.description": "Kod indeksleme sırasında gömme işlemleri için toplu iş boyutu. Bunu API sağlayıcınızın sınırlarına göre ayarlayın. Varsayılan 60'tır."
}
diff --git a/src/package.nls.vi.json b/src/package.nls.vi.json
index 672707a111e..a0c9614dd2f 100644
--- a/src/package.nls.vi.json
+++ b/src/package.nls.vi.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "Sửa Lệnh Này",
"command.terminal.explainCommand.title": "Giải Thích Lệnh Này",
"command.acceptInput.title": "Chấp Nhận Đầu Vào/Gợi Ý",
+ "command.toggleAutoApprove.title": "Bật/Tắt Tự Động Phê Duyệt",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "Các tiền tố lệnh sẽ được tự động từ chối mà không yêu cầu phê duyệt. Trong trường hợp xung đột với các lệnh được phép, việc khớp tiền tố dài nhất sẽ được ưu tiên. Thêm * để từ chối tất cả các lệnh.",
"commands.commandExecutionTimeout.description": "Thời gian tối đa tính bằng giây để chờ việc thực thi lệnh hoàn thành trước khi hết thời gian chờ (0 = không có thời gian chờ, 1-600s, mặc định: 0s)",
"commands.commandTimeoutAllowlist.description": "Các tiền tố lệnh được loại trừ khỏi thời gian chờ thực thi lệnh. Các lệnh khớp với những tiền tố này sẽ chạy mà không có giới hạn thời gian chờ.",
+ "commands.preventCompletionWithOpenTodos.description": "Ngăn hoàn thành nhiệm vụ khi có các todos chưa hoàn thành trong danh sách todos",
"settings.vsCodeLmModelSelector.description": "Cài đặt cho API mô hình ngôn ngữ VSCode",
"settings.vsCodeLmModelSelector.vendor.description": "Nhà cung cấp mô hình ngôn ngữ (ví dụ: copilot)",
"settings.vsCodeLmModelSelector.family.description": "Họ mô hình ngôn ngữ (ví dụ: gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "Đường dẫn đến tệp cấu hình RooCode để tự động nhập khi khởi động tiện ích mở rộng. Hỗ trợ đường dẫn tuyệt đối và đường dẫn tương đối đến thư mục chính (ví dụ: '~/Documents/roo-code-settings.json'). Để trống để tắt tính năng tự động nhập.",
"settings.useAgentRules.description": "Bật tải tệp AGENTS.md cho các quy tắc dành riêng cho tác nhân (xem https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "Thời gian tối đa tính bằng giây để đợi phản hồi API (0 = không có thời gian chờ, 1-3600 giây, mặc định: 600 giây). Nên sử dụng các giá trị cao hơn cho các nhà cung cấp cục bộ như LM Studio và Ollama có thể cần thêm thời gian xử lý.",
+ "settings.newTaskRequireTodos.description": "Yêu cầu tham số todos khi tạo nhiệm vụ mới với công cụ new_task",
"settings.codeIndex.embeddingBatchSize.description": "Kích thước lô cho các hoạt động nhúng trong quá trình lập chỉ mục mã. Điều chỉnh điều này dựa trên giới hạn của nhà cung cấp API của bạn. Mặc định là 60."
}
diff --git a/src/package.nls.zh-CN.json b/src/package.nls.zh-CN.json
index 94d0ed6c747..caab1a633d7 100644
--- a/src/package.nls.zh-CN.json
+++ b/src/package.nls.zh-CN.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "修复此命令",
"command.terminal.explainCommand.title": "解释此命令",
"command.acceptInput.title": "接受输入/建议",
+ "command.toggleAutoApprove.title": "切换自动批准",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "将自动拒绝而无需请求批准的命令前缀。与允许命令冲突时,最长前缀匹配优先。添加 * 拒绝所有命令。",
"commands.commandExecutionTimeout.description": "等待命令执行完成的最大时间(秒),超时前(0 = 无超时,1-600秒,默认:0秒)",
"commands.commandTimeoutAllowlist.description": "从命令执行超时中排除的命令前缀。匹配这些前缀的命令将在没有超时限制的情况下运行。",
+ "commands.preventCompletionWithOpenTodos.description": "当待办事项列表中有未完成的待办事项时阻止任务完成",
"settings.vsCodeLmModelSelector.description": "VSCode 语言模型 API 的设置",
"settings.vsCodeLmModelSelector.vendor.description": "语言模型的供应商(例如:copilot)",
"settings.vsCodeLmModelSelector.family.description": "语言模型的系列(例如:gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "RooCode 配置文件的路径,用于在扩展启动时自动导入。支持绝对路径和相对于主目录的路径(例如 '~/Documents/roo-code-settings.json')。留空以禁用自动导入。",
"settings.useAgentRules.description": "为特定于代理的规则启用 AGENTS.md 文件的加载(请参阅 https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "等待 API 响应的最长时间(秒)(0 = 无超时,1-3600秒,默认值:600秒)。对于像 LM Studio 和 Ollama 这样可能需要更多处理时间的本地提供商,建议使用更高的值。",
+ "settings.newTaskRequireTodos.description": "使用 new_task 工具创建新任务时需要 todos 参数",
"settings.codeIndex.embeddingBatchSize.description": "代码索引期间嵌入操作的批处理大小。根据 API 提供商的限制调整此设置。默认值为 60。"
}
diff --git a/src/package.nls.zh-TW.json b/src/package.nls.zh-TW.json
index b4fd9e3cc7c..8ad1011bb48 100644
--- a/src/package.nls.zh-TW.json
+++ b/src/package.nls.zh-TW.json
@@ -14,6 +14,7 @@
"command.terminal.fixCommand.title": "修復此命令",
"command.terminal.explainCommand.title": "解釋此命令",
"command.acceptInput.title": "接受輸入/建議",
+ "command.toggleAutoApprove.title": "切換自動批准",
"views.activitybar.title": "Roo Code",
"views.contextMenu.label": "Roo Code",
"views.terminalMenu.label": "Roo Code",
@@ -31,6 +32,7 @@
"commands.deniedCommands.description": "將自動拒絕而無需請求批准的命令前綴。與允許命令衝突時,最長前綴匹配優先。新增 * 拒絕所有命令。",
"commands.commandExecutionTimeout.description": "等待命令執行完成的最大時間(秒),逾時前(0 = 無逾時,1-600秒,預設:0秒)",
"commands.commandTimeoutAllowlist.description": "從命令執行逾時中排除的命令前綴。符合這些前綴的命令將在沒有逾時限制的情況下執行。",
+ "commands.preventCompletionWithOpenTodos.description": "當待辦事項清單中有未完成的待辦事項時阻止工作完成",
"settings.vsCodeLmModelSelector.description": "VSCode 語言模型 API 的設定",
"settings.vsCodeLmModelSelector.vendor.description": "語言模型供應商(例如:copilot)",
"settings.vsCodeLmModelSelector.family.description": "語言模型系列(例如:gpt-4)",
@@ -39,5 +41,6 @@
"settings.autoImportSettingsPath.description": "RooCode 設定檔案的路徑,用於在擴充功能啟動時自動匯入。支援絕對路徑和相對於主目錄的路徑(例如 '~/Documents/roo-code-settings.json')。留空以停用自動匯入。",
"settings.useAgentRules.description": "為特定於代理的規則啟用 AGENTS.md 檔案的載入(請參閱 https://agent-rules.org/)",
"settings.apiRequestTimeout.description": "等待 API 回應的最長時間(秒)(0 = 無超時,1-3600秒,預設值:600秒)。對於像 LM Studio 和 Ollama 這樣可能需要更多處理時間的本地提供商,建議使用更高的值。",
+ "settings.newTaskRequireTodos.description": "使用 new_task 工具建立新工作時需要 todos 參數",
"settings.codeIndex.embeddingBatchSize.description": "程式碼索引期間嵌入操作的批次大小。根據 API 提供商的限制調整此設定。預設值為 60。"
}
diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts
index aaddc520cb9..66f389f81c1 100644
--- a/src/shared/ExtensionMessage.ts
+++ b/src/shared/ExtensionMessage.ts
@@ -10,6 +10,7 @@ import type {
MarketplaceItem,
TodoItem,
CloudUserInfo,
+ CloudOrganizationMembership,
OrganizationAllowList,
ShareVisibility,
QueuedMessage,
@@ -124,6 +125,7 @@ export interface ExtensionMessage {
| "commands"
| "insertTextIntoTextarea"
| "dismissedUpsells"
+ | "organizationSwitchResult"
text?: string
payload?: any // Add a generic payload for now, can refine later
action?:
@@ -137,6 +139,7 @@ export interface ExtensionMessage {
| "didBecomeVisible"
| "focusInput"
| "switchTab"
+ | "toggleAutoApprove"
invoke?: "newChat" | "sendMessage" | "primaryButtonClick" | "secondaryButtonClick" | "setChatBoxMessage"
state?: ExtensionState
images?: string[]
@@ -201,6 +204,7 @@ export interface ExtensionMessage {
commands?: Command[]
queuedMessages?: QueuedMessage[]
list?: string[] // For dismissedUpsells
+ organizationId?: string | null // For organizationSwitchResult
}
export type ExtensionState = Pick<
@@ -283,6 +287,7 @@ export type ExtensionState = Pick<
| "maxDiagnosticMessages"
| "openRouterImageGenerationSelectedModel"
| "includeTaskHistoryInEnhance"
+ | "reasoningBlockCollapsed"
> & {
version: string
clineMessages: ClineMessage[]
@@ -326,6 +331,7 @@ export type ExtensionState = Pick<
cloudUserInfo: CloudUserInfo | null
cloudIsAuthenticated: boolean
cloudApiUrl?: string
+ cloudOrganizations?: CloudOrganizationMembership[]
sharingEnabled: boolean
organizationAllowList: OrganizationAllowList
organizationSettingsVersion?: number
diff --git a/src/shared/ProfileValidator.ts b/src/shared/ProfileValidator.ts
index 78ff6ed9fe1..c76b12d47bf 100644
--- a/src/shared/ProfileValidator.ts
+++ b/src/shared/ProfileValidator.ts
@@ -59,6 +59,7 @@ export class ProfileValidator {
return profile.openAiModelId
case "anthropic":
case "openai-native":
+ case "openai-native-codex":
case "bedrock":
case "vertex":
case "gemini":
diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts
index 93d0b9bc452..d43a2fce043 100644
--- a/src/shared/WebviewMessage.ts
+++ b/src/shared/WebviewMessage.ts
@@ -102,6 +102,7 @@ export interface WebviewMessage {
| "browserViewportSize"
| "screenshotQuality"
| "remoteBrowserHost"
+ | "openKeyboardShortcuts"
| "openMcpSettings"
| "openProjectMcpSettings"
| "restartMcpServer"
@@ -180,8 +181,10 @@ export interface WebviewMessage {
| "hasOpenedModeSelector"
| "cloudButtonClicked"
| "rooCloudSignIn"
+ | "cloudLandingPageSignIn"
| "rooCloudSignOut"
| "rooCloudManualUrl"
+ | "switchOrganization"
| "condenseTaskContextRequest"
| "requestIndexingStatus"
| "startIndexing"
@@ -191,6 +194,7 @@ export interface WebviewMessage {
| "focusPanelRequest"
| "profileThresholds"
| "setHistoryPreviewCollapsed"
+ | "setReasoningBlockCollapsed"
| "openExternal"
| "filterMarketplaceItems"
| "marketplaceButtonClicked"
@@ -272,6 +276,7 @@ export interface WebviewMessage {
checkOnly?: boolean // For deleteCustomMode check
upsellId?: string // For dismissUpsell
list?: string[] // For dismissedUpsells response
+ organizationId?: string | null // For organization switching
codeIndexSettings?: {
// Global state settings
codebaseIndexEnabled: boolean
diff --git a/src/shared/__tests__/ProfileValidator.spec.ts b/src/shared/__tests__/ProfileValidator.spec.ts
index 5cfe7a720bf..84df41ac552 100644
--- a/src/shared/__tests__/ProfileValidator.spec.ts
+++ b/src/shared/__tests__/ProfileValidator.spec.ts
@@ -184,6 +184,7 @@ describe("ProfileValidator", () => {
const apiModelProviders = [
"anthropic",
"openai-native",
+ "openai-native-codex",
"bedrock",
"vertex",
"gemini",
diff --git a/src/shared/__tests__/checkExistApiConfig.spec.ts b/src/shared/__tests__/checkExistApiConfig.spec.ts
index 7696f00cc0c..05ac8081b0b 100644
--- a/src/shared/__tests__/checkExistApiConfig.spec.ts
+++ b/src/shared/__tests__/checkExistApiConfig.spec.ts
@@ -61,4 +61,11 @@ describe("checkExistKey", () => {
}
expect(checkExistKey(config)).toBe(false)
})
+
+ it("should treat openai-native-codex as configured even without oauth path", () => {
+ const config: ProviderSettings = {
+ apiProvider: "openai-native-codex",
+ }
+ expect(checkExistKey(config)).toBe(true)
+ })
})
diff --git a/src/shared/api.ts b/src/shared/api.ts
index 13b66611193..79001cb0ad0 100644
--- a/src/shared/api.ts
+++ b/src/shared/api.ts
@@ -18,6 +18,12 @@ export type ApiHandlerOptions = Omit & {
* Defaults to true; set to false to disable summaries.
*/
enableGpt5ReasoningSummary?: boolean
+ /**
+ * Optional override for Ollama's num_ctx parameter.
+ * When set, this value will be used in Ollama chat requests.
+ * When undefined, Ollama will use the model's default num_ctx from the Modelfile.
+ */
+ ollamaNumCtx?: number
}
// RouterName
diff --git a/src/shared/checkExistApiConfig.ts b/src/shared/checkExistApiConfig.ts
index 4b9af08d5af..99f1c7261a5 100644
--- a/src/shared/checkExistApiConfig.ts
+++ b/src/shared/checkExistApiConfig.ts
@@ -8,7 +8,9 @@ export function checkExistKey(config: ProviderSettings | undefined) {
// Special case for human-relay, fake-ai, claude-code, qwen-code, and roo providers which don't need any configuration.
if (
config.apiProvider &&
- ["human-relay", "fake-ai", "claude-code", "qwen-code", "roo"].includes(config.apiProvider)
+ ["human-relay", "fake-ai", "claude-code", "qwen-code", "roo", "openai-native-codex"].includes(
+ config.apiProvider,
+ )
) {
return true
}
@@ -25,6 +27,7 @@ export function checkExistKey(config: ProviderSettings | undefined) {
config.ollamaModelId,
config.lmStudioModelId,
config.vsCodeLmModelSelector,
+ config.openAiNativeCodexOauthPath,
].some((value) => value !== undefined)
return hasSecretKey || hasOtherConfig
diff --git a/webview-ui/src/App.tsx b/webview-ui/src/App.tsx
index fa38a566e74..220c8cf3af2 100644
--- a/webview-ui/src/App.tsx
+++ b/webview-ui/src/App.tsx
@@ -76,6 +76,7 @@ const App = () => {
cloudUserInfo,
cloudIsAuthenticated,
cloudApiUrl,
+ cloudOrganizations,
renderContext,
mdmCompliant,
} = useExtensionState()
@@ -267,6 +268,7 @@ const App = () => {
userInfo={cloudUserInfo}
isAuthenticated={cloudIsAuthenticated}
cloudApiUrl={cloudApiUrl}
+ organizations={cloudOrganizations}
onDone={() => switchTab("chat")}
/>
)}
diff --git a/webview-ui/src/components/chat/Announcement.tsx b/webview-ui/src/components/chat/Announcement.tsx
index 8c52c41e42f..cfe41340bc9 100644
--- a/webview-ui/src/components/chat/Announcement.tsx
+++ b/webview-ui/src/components/chat/Announcement.tsx
@@ -6,12 +6,9 @@ import { Package } from "@roo/package"
import { useAppTranslation } from "@src/i18n/TranslationContext"
import { useExtensionState } from "@src/context/ExtensionStateContext"
import { vscode } from "@src/utils/vscode"
-import { Dialog, DialogContent, DialogHeader, DialogTitle, DialogDescription } from "@src/components/ui"
+import { Dialog, DialogContent, DialogHeader, DialogTitle } from "@src/components/ui"
import { Button } from "@src/components/ui"
-// Define the production URL constant locally to avoid importing from cloud package in webview
-const PRODUCTION_ROO_CODE_API_URL = "https://app.roocode.com"
-
interface AnnouncementProps {
hideAnnouncement: () => void
}
@@ -28,8 +25,7 @@ interface AnnouncementProps {
const Announcement = ({ hideAnnouncement }: AnnouncementProps) => {
const { t } = useAppTranslation()
const [open, setOpen] = useState(true)
- const { cloudApiUrl } = useExtensionState()
- const cloudUrl = cloudApiUrl || PRODUCTION_ROO_CODE_API_URL
+ const { cloudIsAuthenticated } = useExtensionState()
return (