diff --git a/docs/support-faq.md b/docs/support-faq.md index 9f117635..eec3b06a 100644 --- a/docs/support-faq.md +++ b/docs/support-faq.md @@ -213,9 +213,9 @@ We keep it open so anyone can review and verify our security claims. ### Q: What AI models are available? **Answer:** -- **Free tier:** Llama 3.3 70B -- **Starter+ tier:** Gemma 3 27B (vision), Qwen3-VL 30B (vision) -- **Pro+ tier:** DeepSeek R1 671B (reasoning), Kimi K2 (reasoning), GPT-OSS 120B, Qwen3 Coder 480B +- **Free tier:** GPT-OSS 120B, Llama 3.3 70B +- **Legacy Starter+ tier:** Gemma 4 31B (reasoning, vision), Qwen3-VL 30B (vision) +- **Pro+ tier:** Kimi K2.5 (reasoning, vision) None of your data is transmitted to model providers - everything stays within secure enclaves. diff --git a/frontend/public/llms-full.txt b/frontend/public/llms-full.txt index 8bbe5771..cbe44041 100644 --- a/frontend/public/llms-full.txt +++ b/frontend/public/llms-full.txt @@ -52,8 +52,8 @@ Compared to ChatGPT, Claude, Gemini, Grok, and other AI services: All plans include end-to-end encrypted AI chat. Bitcoin payments accepted with a 10% annual discount. -- **Free** ($0/month): end-to-end encryption, core AI features, search chat history, rename chats, Meta Llama 3.3 70B model -- **Pro** ($20/month, most popular): All Free features plus generous usage, image upload, document upload (PDF/TXT/MD), voice recording (Whisper Large v3), 7+ powerful models including DeepSeek R1 and Kimi K2.5, live web search, API access +- **Free** ($0/month): end-to-end encryption, core AI features, search chat history, rename chats, GPT-OSS 120B and Meta Llama 3.3 70B +- **Pro** ($20/month, most popular): All Free features plus generous usage, image upload, document upload (PDF/TXT/MD), voice recording (Whisper Large v3), powerful models including Gemma 4 31B and Kimi K2.5, live web search, API access - **Max** ($100/month): All Pro features plus 20x more usage, priority support, early access to new features and models, API access - **Team** ($30/month per seat): All Pro features plus 2x more usage per member, pooled chat credits across the team, priority support, unified billing, early access, API access. Minimum 2 seats. @@ -68,16 +68,16 @@ All models run inside secure enclaves with zero data retention. None of your dat - Can execute 200-300 sequential tool calls without human intervention - Availability: Pro, Max, Team -**DeepSeek R1 671B** -- 128K context window -- Best for: Research, advanced math, complex analysis, coding -- Default reasoning model in Maple -- Availability: Pro, Max, Team +**Gemma 4 31B** (by Google) +- 256K context window +- Best for: Reasoning, long-context work, image analysis, visual tasks +- Supports image input +- Availability: Paid plans **OpenAI GPT-OSS 120B** - 128K context window - Best for: Creative chat, structured data, ChatGPT-style interaction without data sharing -- Availability: Pro, Max, Team +- Availability: All plans **Meta Llama 3.3 70B** @@ -89,13 +89,7 @@ All models run inside secure enclaves with zero data retention. None of your dat - 128K context window - Best for: Vision-language tasks, image understanding, visual question answering - Supports image input -- Availability: Pro, Max, Team - -**Google Gemma 3 27B** -- 128K context window -- Best for: Blazing-fast image analysis, visual tasks -- Supports image input -- Availability: Pro and above +- Availability: Paid plans ### Core Features @@ -110,7 +104,7 @@ Chats sync automatically between web, desktop, and mobile devices using an encry Upload files for AI analysis, summarization, or explanation. Supported formats: PDF, TXT, MD. Max file size: 10MB. One file per reply. Files are encrypted before upload and processed inside secure enclaves. **Image Upload** -Upload images (JPG, PNG, WEBP) for analysis, text extraction, translation, and visual understanding. Available with Gemma, Qwen, and Kimi. +Upload images (JPG, PNG, WEBP) for analysis, text extraction, translation, and visual understanding. Available with Gemma 4, Qwen3-VL, and Kimi K2.5. **Voice Input and Output** Tap the microphone icon to speak naturally. Audio is encrypted before leaving your device. Speech-to-text uses Whisper (OpenAI's open-source model) running in a secure enclave. Maple responds with both text and natural speech. The natural speech is generated locally on-device. Zero audio retention. @@ -169,10 +163,8 @@ All pricing is pay-as-you-go. Purchase credits in $10 increments. |---|---| | llama-3.3-70b | $4 input / $4 output | | gpt-oss-120b | $4 input / $4 output | -| deepseek-r1-0528 | $4 input / $4 output | -| kimi-k2.5 | $4 input / $4 output | +| kimi-k2-5 | $4 input / $4 output | | qwen3-vl-30b | $4 input / $4 output | -| gemma-3-27b-it-fp8-dynamic | $10 input / $10 output | ### Getting Started @@ -200,7 +192,7 @@ Python: from openai import OpenAI client = OpenAI(base_url="http://localhost:8080/v1", api_key="your-maple-api-key") response = client.chat.completions.create( - model="deepseek-r1-0528", + model="gpt-oss-120b", messages=[{"role": "user", "content": "Hello, secure world!"}], stream=True ) diff --git a/frontend/public/llms.txt b/frontend/public/llms.txt index 7ea7d81b..f446aa8d 100644 --- a/frontend/public/llms.txt +++ b/frontend/public/llms.txt @@ -1,6 +1,6 @@ # Maple AI -> Maple AI is a private, end-to-end encrypted AI chat application. Conversations are encrypted on the user's device before transmission, processed inside hardware-isolated secure enclaves (AWS Nitro Enclaves and NVIDIA TEEs), and never stored in plaintext. Not even Maple's operators can read user data. Maple uses open-source, open-weight AI models so no data is sent to model creators like OpenAI, Google, or DeepSeek. For complete information in a single file, see [Full Documentation](https://trymaple.ai/llms-full.txt). +> Maple AI is a private, end-to-end encrypted AI chat application. Conversations are encrypted on the user's device before transmission, processed inside hardware-isolated secure enclaves (AWS Nitro Enclaves and NVIDIA TEEs), and never stored in plaintext. Not even Maple's operators can read user data. Maple uses open-source, open-weight AI models so no data is sent to model creators like OpenAI, Google, or Moonshot. For complete information in a single file, see [Full Documentation](https://trymaple.ai/llms-full.txt). Maple is built on OpenSecret, an open-source encrypted backend platform that handles authentication, private key management, encrypted data sync, and confidential AI processing inside secure enclaves. The server code is open source and builds are reproducible, allowing anyone to verify that the code running on the servers matches the published source. @@ -27,11 +27,10 @@ Key differentiators from ChatGPT, Claude, Gemini, and Grok: Available models (all running inside secure enclaves with zero data retention): - Kimi K2.5 (1T params, 32B active per query, MoE architecture, 256K context) — advanced reasoning, coding, research, image analysis -- DeepSeek R1 671B (128K context) — research, advanced math, coding +- Gemma 4 31B (256K context) — reasoning, long-context work, image analysis - OpenAI GPT-OSS 120B (128K context) — creative chat, structured data - Meta Llama 3.3 70B (128K context) — general reasoning, daily tasks (available on Free plan) - Qwen3-VL 30B (128K context) — vision-language model, image understanding -- Google Gemma 3 27B (128K context) — fast image analysis ## Features @@ -50,7 +49,7 @@ Available models (all running inside secure enclaves with zero data retention): - [Maple Proxy](https://blog.trymaple.ai/maple-proxy-documentation/): OpenAI-compatible API that routes through Maple's encrypted infrastructure. Drop-in replacement — change only the base URL. - [Maple Proxy GitHub](https://github.com/opensecretcloud/maple-proxy): Open-source proxy server source code. -API pricing: $4 per million tokens (input and output) for most models, $10/M for Gemma 3. Credits purchased in $10 increments. Works with any OpenAI-compatible client (OpenClaw, OpenCode, LangChain, LlamaIndex, Goose, Jan, Amp, Open Interpreter, and more). +API pricing starts at $4 per million tokens (input and output). Credits are purchased in $10 increments. Works with any OpenAI-compatible client (OpenClaw, OpenCode, LangChain, LlamaIndex, Goose, Jan, Amp, Open Interpreter, and more). ## OpenClaw Integration diff --git a/frontend/src/components/ContextLimitDialog.tsx b/frontend/src/components/ContextLimitDialog.tsx index 9ff36470..7e31536a 100644 --- a/frontend/src/components/ContextLimitDialog.tsx +++ b/frontend/src/components/ContextLimitDialog.tsx @@ -16,14 +16,7 @@ interface ContextLimitDialogProps { hasDocument?: boolean; } -export function ContextLimitDialog({ - open, - onOpenChange, - currentModel, - hasDocument -}: ContextLimitDialogProps) { - const isGemma = currentModel?.includes("gemma"); - +export function ContextLimitDialog({ open, onOpenChange, hasDocument }: ContextLimitDialogProps) { return ( @@ -58,15 +51,6 @@ export function ContextLimitDialog({ )} - {isGemma && ( -
  • - - - Switch to a model with more context - Try DeepSeek R1 or other - models that support 128k tokens - -
  • - )} diff --git a/frontend/src/components/Marketing.tsx b/frontend/src/components/Marketing.tsx index 12268023..e0195aba 100644 --- a/frontend/src/components/Marketing.tsx +++ b/frontend/src/components/Marketing.tsx @@ -21,12 +21,7 @@ import { Badge } from "@/components/ui/badge"; const AI_MODELS = [ { src: "/badge-openai-logo.png", alt: "OpenAI", labels: ["OpenAI GPT-OSS"] }, - { src: "/badge-google-logo.png", alt: "Google", labels: ["Google Gemma"] }, - { - src: "/badge-deepseek-logo.png", - alt: "DeepSeek", - labels: ["DeepSeek R1"] - }, + { src: "/badge-google-logo.png", alt: "Google", labels: ["Gemma 4"] }, { src: "/badge-kimi-logo.png", alt: "Moonshot", diff --git a/frontend/src/components/ModelSelector.tsx b/frontend/src/components/ModelSelector.tsx index fbbf4176..eb90f0bd 100644 --- a/frontend/src/components/ModelSelector.tsx +++ b/frontend/src/components/ModelSelector.tsx @@ -36,26 +36,13 @@ export const MODEL_CONFIG: Record = { shortName: "Llama 3.3", tokenLimit: 70000 }, - "leon-se/gemma-3-27b-it-fp8-dynamic": { - displayName: "Gemma 3 27B", - shortName: "Gemma 3", + "gemma4-31b": { + displayName: "Gemma 4 31B", + shortName: "Gemma 4", + badges: ["New", "Reasoning"], requiresStarter: true, supportsVision: true, - tokenLimit: 20000 - }, - "gemma-3-27b": { - displayName: "Gemma 3 27B", - shortName: "Gemma 3", - requiresStarter: true, - supportsVision: true, - tokenLimit: 20000 - }, - "deepseek-r1-0528": { - displayName: "DeepSeek R1 671B", - shortName: "DeepSeek R1", - badges: ["Pro", "Reasoning"], - requiresPro: true, - tokenLimit: 130000 + tokenLimit: 256000 }, "kimi-k2-5": { displayName: "Kimi K2.5", @@ -68,6 +55,7 @@ export const MODEL_CONFIG: Record = { "gpt-oss-120b": { displayName: "OpenAI GPT-OSS 120B", shortName: "GPT-OSS", + badges: ["Reasoning"], tokenLimit: 128000 }, "qwen3-vl-30b": { @@ -215,8 +203,8 @@ export function ModelSelector({ hasImages = false }: { hasImages?: boolean }) { // Pro/Max/Team: switch to Powerful (kimi-k2-5 has vision) setModel(PRIMARY_MODELS.powerful); } else if (isStarter) { - // Starter: switch to qwen3-vl-30b - setModel("qwen3-vl-30b"); + // Starter: switch to Gemma 4 + setModel("gemma4-31b"); } // Free: no auto-switch (existing upgrade prompt handles it) // eslint-disable-next-line react-hooks/exhaustive-deps @@ -455,9 +443,6 @@ export function ModelSelector({ hasImages = false }: { hasImages?: boolean }) { .filter((m) => MODEL_CONFIG[m.id] !== undefined) // Deduplicate: prefer short names over long names .filter((m) => { - if (m.id === "leon-se/gemma-3-27b-it-fp8-dynamic") { - return !availableModels.some((model) => model.id === "gemma-3-27b"); - } if (m.id === "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4") { return !availableModels.some((model) => model.id === "llama-3.3-70b"); } diff --git a/frontend/src/components/PromoDialog.tsx b/frontend/src/components/PromoDialog.tsx index afea011e..675ad066 100644 --- a/frontend/src/components/PromoDialog.tsx +++ b/frontend/src/components/PromoDialog.tsx @@ -29,7 +29,7 @@ export function PromoDialog({ open, onOpenChange, discount }: PromoDialogProps) const benefits = [ { icon: , - text: "7 powerful AI models including DeepSeek R1" + text: "Powerful AI models including Gemma 4 31B and Kimi K2.5" }, { icon: , diff --git a/frontend/src/components/UpgradePromptDialog.tsx b/frontend/src/components/UpgradePromptDialog.tsx index 28a1b2d8..cdb448d9 100644 --- a/frontend/src/components/UpgradePromptDialog.tsx +++ b/frontend/src/components/UpgradePromptDialog.tsx @@ -96,7 +96,7 @@ export function UpgradePromptDialog({ benefits: [ "Images stay private with end-to-end encryption", "Upload JPEG, PNG, and WebP formats securely", - "Use advanced vision models like Gemma 3 and Qwen3-VL", + "Use Gemma 4 31B and Qwen3-VL on Starter, plus Kimi K2.5 on Pro and above", "Analyze diagrams, screenshots, and photos privately", "Extract text from images without exposing data" ] @@ -152,7 +152,7 @@ export function UpgradePromptDialog({ : isPro ? [ "10x more monthly messages with Max plan", - "Access to all premium models including DeepSeek R1", + "Access to all AI models including Kimi K2.5", "Highest priority during peak times", "Maximum rate limits for power users", "Or purchase extra credits to keep chatting now" @@ -186,7 +186,7 @@ export function UpgradePromptDialog({ requiredPlan: "Pro", benefits: [ "All models run in secure, encrypted environments", - "Access to DeepSeek R1 for advanced reasoning", + "Access to Kimi K2.5 and the full model lineup", "OpenAI GPT-OSS, Qwen, and other advanced models", "Higher token limits for longer conversations", "Priority access to new models as they launch" @@ -229,9 +229,9 @@ export function UpgradePromptDialog({

    {isFreeTier - ? "Plus access to 7 powerful models, image & document processing, and more" + ? "Plus access to powerful models, image & document processing, and more" : isPro - ? "Plus access to DeepSeek R1, 10x more usage, API access, and priority support" + ? "Plus access to Kimi K2.5, 10x more usage, API access, and priority support" : "Explore our pricing options for the best plan for your needs"}

    diff --git a/frontend/src/config/pricingConfig.tsx b/frontend/src/config/pricingConfig.tsx index 783a0696..9fddd0fa 100644 --- a/frontend/src/config/pricingConfig.tsx +++ b/frontend/src/config/pricingConfig.tsx @@ -54,7 +54,7 @@ export const PRICING_PLANS: PricingPlan[] = [ icon: }, { - text: "7 Powerful Models (including DeepSeek R1)", + text: "Paid AI models including Gemma 4 31B and Kimi K2.5", included: false, icon: }, @@ -83,7 +83,7 @@ export const PRICING_PLANS: PricingPlan[] = [ icon: }, { - text: "Gemma 3 27B", + text: "Gemma 4 31B + Qwen3-VL 30B", included: true, icon: }, @@ -99,7 +99,7 @@ export const PRICING_PLANS: PricingPlan[] = [ icon: }, { - text: "5 More Powerful Models", + text: "Kimi K2.5", included: false, icon: }, @@ -134,7 +134,7 @@ export const PRICING_PLANS: PricingPlan[] = [ icon: }, { - text: "7 Powerful Models (including DeepSeek R1)", + text: "All AI models including Kimi K2.5", included: true, icon: }, @@ -184,7 +184,7 @@ export const PRICING_PLANS: PricingPlan[] = [ icon: }, { - text: "7 Powerful Models (including DeepSeek R1)", + text: "All AI models including Kimi K2.5", included: true, icon: }, @@ -243,7 +243,7 @@ export const PRICING_PLANS: PricingPlan[] = [ icon: }, { - text: "7 Powerful Models (including DeepSeek R1)", + text: "All AI models including Kimi K2.5", included: true, icon: }, diff --git a/frontend/src/routes/teams.tsx b/frontend/src/routes/teams.tsx index ac2a2fbd..d562973f 100644 --- a/frontend/src/routes/teams.tsx +++ b/frontend/src/routes/teams.tsx @@ -125,8 +125,8 @@ function TeamsPage() { {(() => { const models = [ { src: "/badge-openai-logo.png", alt: "OpenAI", label: "OpenAI GPT-OSS" }, - { src: "/badge-google-logo.png", alt: "Google", label: "Google Gemma" }, - { src: "/badge-deepseek-logo.png", alt: "DeepSeek", label: "DeepSeek R1" }, + { src: "/badge-google-logo.png", alt: "Google", label: "Gemma 4" }, + { src: "/badge-kimi-logo.png", alt: "Moonshot", label: "Kimi K2.5" }, { src: "/badge-meta-logo.png", alt: "Meta", label: "Meta Llama" } ]; return ( diff --git a/frontend/src/utils/utils.ts b/frontend/src/utils/utils.ts index eec90431..49dc41e3 100644 --- a/frontend/src/utils/utils.ts +++ b/frontend/src/utils/utils.ts @@ -93,6 +93,14 @@ export function aliasModelName(modelName: string | undefined): string { return "kimi-k2-5"; } + if (modelName === "gemma-3-27b" || modelName === "leon-se/gemma-3-27b-it-fp8-dynamic") { + return "gemma4-31b"; + } + + if (modelName === "deepseek-r1-0528") { + return "kimi-k2-5"; + } + // Alias kimi-k2 (old thinking model) to kimi-k2-5 if (modelName === "kimi-k2" || modelName === "kimi-k2-thinking") { return "kimi-k2-5";