diff --git a/.github/workflows/chromatic.yml b/.github/workflows/chromatic.yml index be4d583f9..04ca19c99 100644 --- a/.github/workflows/chromatic.yml +++ b/.github/workflows/chromatic.yml @@ -36,4 +36,3 @@ jobs: projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }} exitZeroOnChanges: true onlyChanged: true - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 743abdfb4..613c390f2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,6 +74,9 @@ jobs: - uses: ./.github/actions/setup-cmux + - name: Build worker files + run: make build-main + - name: Run tests with coverage run: bun test --coverage --coverage-reporter=lcov ${{ github.event.inputs.test_filter || 'src' }} @@ -96,6 +99,9 @@ jobs: - uses: ./.github/actions/setup-cmux + - name: Build worker files + run: make build-main + - name: Run integration tests with coverage # --silent suppresses per-test output (17 test files × 32 workers = overwhelming logs) run: TEST_INTEGRATION=1 bun x jest --coverage --maxWorkers=100% --silent ${{ github.event.inputs.test_filter || 'tests' }} diff --git a/.github/workflows/nightly-terminal-bench.yml b/.github/workflows/nightly-terminal-bench.yml index e78b2ce2d..119c20dcc 100644 --- a/.github/workflows/nightly-terminal-bench.yml +++ b/.github/workflows/nightly-terminal-bench.yml @@ -3,13 +3,13 @@ name: Nightly Terminal-Bench on: schedule: # Run full benchmark suite (~80 tasks) every night at midnight UTC - - cron: '0 0 * * *' + - cron: "0 0 * * *" workflow_dispatch: inputs: models: description: 'Models to test (comma-separated, or "all" for both)' required: false - default: 'all' + default: "all" type: string jobs: @@ -41,9 +41,9 @@ jobs: uses: ./.github/workflows/terminal-bench.yml with: model_name: ${{ matrix.model }} - thinking_level: 'high' - dataset: 'terminal-bench-core==0.1.1' - concurrency: '4' + thinking_level: "high" + dataset: "terminal-bench-core==0.1.1" + concurrency: "4" livestream: true secrets: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 47ff8515b..3dafaec9e 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -5,7 +5,7 @@ on: branches: - main tags: - - 'v*' + - "v*" workflow_dispatch: permissions: @@ -24,12 +24,12 @@ jobs: - uses: ./.github/actions/setup-cmux with: - install-imagemagick: 'true' + install-imagemagick: "true" # Sets up .npmrc with the auth token - uses: actions/setup-node@v4 with: - registry-url: 'https://registry.npmjs.org' + registry-url: "https://registry.npmjs.org" - run: sudo npm i -g npm@latest @@ -38,10 +38,10 @@ jobs: run: | # Get base version from package.json BASE_VERSION=$(node -p "require('./package.json').version") - + # Generate git describe version GIT_DESCRIBE=$(git describe --tags --always --dirty 2>/dev/null || echo "unknown") - + if [[ $GITHUB_REF == refs/tags/* ]]; then # For tags, use the base version as-is (stable release) NPM_VERSION="${BASE_VERSION}" @@ -56,13 +56,13 @@ jobs: NPM_TAG="next" echo "Publishing pre-release: ${NPM_VERSION}" fi - + echo "version=${NPM_VERSION}" >> $GITHUB_OUTPUT echo "tag=${NPM_TAG}" >> $GITHUB_OUTPUT - + # Update package.json with the new version node -e "const fs = require('fs'); const pkg = JSON.parse(fs.readFileSync('package.json')); pkg.version = '${NPM_VERSION}'; fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2) + '\n');" - + echo "Updated package.json to version ${NPM_VERSION}" - name: Generate version file @@ -76,7 +76,7 @@ jobs: run: | PACKAGE_NAME=$(node -p "require('./package.json').name") VERSION="${{ steps.version.outputs.version }}" - + if npm view "${PACKAGE_NAME}@${VERSION}" version &>/dev/null; then echo "exists=true" >> $GITHUB_OUTPUT echo "Version ${VERSION} already exists on npm" @@ -95,7 +95,7 @@ jobs: PACKAGE_NAME=$(node -p "require('./package.json').name") VERSION="${{ steps.version.outputs.version }}" TAG="${{ steps.version.outputs.tag }}" - + echo "Version ${VERSION} already published, updating dist-tag to ${TAG}" npm dist-tag add "${PACKAGE_NAME}@${VERSION}" "${TAG}" diff --git a/.github/workflows/terminal-bench.yml b/.github/workflows/terminal-bench.yml index 666a28686..0c75a4c3d 100644 --- a/.github/workflows/terminal-bench.yml +++ b/.github/workflows/terminal-bench.yml @@ -4,34 +4,34 @@ on: workflow_call: inputs: model_name: - description: 'Model to use (e.g., anthropic:claude-sonnet-4-5)' + description: "Model to use (e.g., anthropic:claude-sonnet-4-5)" required: false type: string thinking_level: - description: 'Thinking level (off, low, medium, high)' + description: "Thinking level (off, low, medium, high)" required: false type: string dataset: - description: 'Terminal-Bench dataset to use' + description: "Terminal-Bench dataset to use" required: false type: string - default: 'terminal-bench-core==0.1.1' + default: "terminal-bench-core==0.1.1" concurrency: - description: 'Number of concurrent tasks (--n-concurrent)' + description: "Number of concurrent tasks (--n-concurrent)" required: false type: string - default: '4' + default: "4" livestream: - description: 'Enable livestream mode' + description: "Enable livestream mode" required: false type: boolean default: true sample_size: - description: 'Number of random tasks to run (empty = all tasks)' + description: "Number of random tasks to run (empty = all tasks)" required: false type: string extra_args: - description: 'Additional arguments to pass to terminal-bench' + description: "Additional arguments to pass to terminal-bench" required: false type: string secrets: @@ -42,34 +42,34 @@ on: workflow_dispatch: inputs: dataset: - description: 'Terminal-Bench dataset to use' + description: "Terminal-Bench dataset to use" required: false - default: 'terminal-bench-core==0.1.1' + default: "terminal-bench-core==0.1.1" type: string concurrency: - description: 'Number of concurrent tasks (--n-concurrent)' + description: "Number of concurrent tasks (--n-concurrent)" required: false - default: '4' + default: "4" type: string livestream: - description: 'Enable livestream mode' + description: "Enable livestream mode" required: false default: true type: boolean sample_size: - description: 'Number of random tasks to run (empty = all tasks)' + description: "Number of random tasks to run (empty = all tasks)" required: false type: string model_name: - description: 'Model to use (e.g., anthropic:claude-sonnet-4-5, openai:gpt-5-codex)' + description: "Model to use (e.g., anthropic:claude-sonnet-4-5, openai:gpt-5-codex)" required: false type: string thinking_level: - description: 'Thinking level (off, low, medium, high)' + description: "Thinking level (off, low, medium, high)" required: false type: string extra_args: - description: 'Additional arguments to pass to terminal-bench' + description: "Additional arguments to pass to terminal-bench" required: false type: string @@ -148,4 +148,3 @@ jobs: runs/ if-no-files-found: warn retention-days: 30 - diff --git a/.storybook/main.ts b/.storybook/main.ts index bdd89ff8a..f5ad7e338 100644 --- a/.storybook/main.ts +++ b/.storybook/main.ts @@ -4,11 +4,7 @@ import path from "path"; const config: StorybookConfig = { stories: ["../src/**/*.stories.@(ts|tsx)"], - addons: [ - "@storybook/addon-links", - "@storybook/addon-docs", - "@storybook/addon-interactions", - ], + addons: ["@storybook/addon-links", "@storybook/addon-docs", "@storybook/addon-interactions"], framework: { name: "@storybook/react-vite", options: {}, diff --git a/.storybook/mocks/version.ts b/.storybook/mocks/version.ts index 04e48ceb0..c44b48a84 100644 --- a/.storybook/mocks/version.ts +++ b/.storybook/mocks/version.ts @@ -6,4 +6,3 @@ export const VERSION = { git_describe: "v1.0.0", buildTime: "2024-01-24T17:41:00Z", // 9:41 AM PST }; - diff --git a/Makefile b/Makefile index d3df25aee..12f66deb4 100644 --- a/Makefile +++ b/Makefile @@ -203,11 +203,11 @@ check-deadcode: node_modules/.installed ## Check for potential dead code (manual || echo "✓ No obvious dead code found" ## Testing -test-integration: node_modules/.installed ## Run all tests (unit + integration) +test-integration: node_modules/.installed build-main ## Run all tests (unit + integration) @bun test src @TEST_INTEGRATION=1 bun x jest tests -test-unit: node_modules/.installed ## Run unit tests +test-unit: node_modules/.installed build-main ## Run unit tests @bun test src test: test-unit ## Alias for test-unit @@ -220,7 +220,7 @@ test-coverage: ## Run tests with coverage test-e2e: ## Run end-to-end tests @$(MAKE) build - @CMUX_E2E_LOAD_DIST=1 CMUX_E2E_SKIP_BUILD=1 PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1 bun x playwright test --project=electron + @CMUX_E2E_LOAD_DIST=1 CMUX_E2E_SKIP_BUILD=1 PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1 bun x playwright test --project=electron $(PLAYWRIGHT_ARGS) ## Distribution dist: build ## Build distributable packages diff --git a/benchmarks/terminal_bench/README.md b/benchmarks/terminal_bench/README.md index 0c3d727b5..c106c8804 100644 --- a/benchmarks/terminal_bench/README.md +++ b/benchmarks/terminal_bench/README.md @@ -36,6 +36,7 @@ The benchmark uses a **global timeout** applied to all tasks. The default is **3 **Design Rationale:** Based on analysis of Oct 30, 2025 nightly runs: + - Longest successful task: `blind-maze-explorer-algorithm.hard` at 20 minutes - 95th percentile: ~15 minutes - Mean duration: ~6 minutes diff --git a/bun.lock b/bun.lock index 78d71cffc..cf63a5f2f 100644 --- a/bun.lock +++ b/bun.lock @@ -118,15 +118,15 @@ "@adobe/css-tools": ["@adobe/css-tools@4.4.4", "", {}, "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg=="], - "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.37", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.12" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-r2e9BWoobisH9B5b7x3yYG/k9WlsZqa4D94o7gkwktReqrjjv83zNMop4KmlJsh/zBhbsaP8S8SUfiwK+ESxgg=="], + "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.38", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.13" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-NjU1ftHbu90OfRCgBwfFelmdEXwGFwLEcfyOyyfjRDm8QHaJUlPNnXhdhPTYuUU386yhj29Vibemiaq6jQv3lA=="], - "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.12", "@vercel/oidc": "3.0.3" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Gj0PuawK7NkZuyYgO/h5kDK/l6hFOjhLdTq3/Lli1FTl47iGmwhH1IZQpAL3Z09BeFYWakcwUmn02ovIm2wy9g=="], + "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.2", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.13", "@vercel/oidc": "3.0.3" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-25F1qPqZxOw9IcV9OQCL29hV4HAFLw5bFWlzQLBi5aDhEZsTMT2rMi3umSqNaUxrrw+dLRtjOL7RbHC+WjbA/A=="], - "@ai-sdk/openai": ["@ai-sdk/openai@2.0.53", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.12" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-GIkR3+Fyif516ftXv+YPSPstnAHhcZxNoR2s8uSHhQ1yBT7I7aQYTVwpjAuYoT3GR+TeP50q7onj2/nDRbT2FQ=="], + "@ai-sdk/openai": ["@ai-sdk/openai@2.0.56", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.13" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-D+IlvQJYvlhSMTL9t6RxwineAznyKv9j1wytjvD+mf8oivDCEyHjURXbcFKK7yyVJQTUc91YbnhjUw7YgxPbYQ=="], "@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="], - "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.12", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ZtbdvYxdMoria+2SlNarEk6Hlgyf+zzcznlD55EAl+7VZvJaSg2sqPvwArY7L6TfDEDJsnCq0fdhBSkYo0Xqdg=="], + "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.13", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-aXFLBLRPTUYA853MJliItefSXeJPl+mg0KSjbToP41kJ+banBmHO8ZPGLJhNqGlCU82o11TYN7G05EREKX8CkA=="], "@antfu/install-pkg": ["@antfu/install-pkg@1.1.0", "", { "dependencies": { "package-manager-detector": "^1.3.0", "tinyexec": "^1.0.1" } }, "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ=="], @@ -134,11 +134,11 @@ "@babel/code-frame": ["@babel/code-frame@7.27.1", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg=="], - "@babel/compat-data": ["@babel/compat-data@7.28.4", "", {}, "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw=="], + "@babel/compat-data": ["@babel/compat-data@7.28.5", "", {}, "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA=="], - "@babel/core": ["@babel/core@7.28.4", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.3", "@babel/helper-compilation-targets": "^7.27.2", "@babel/helper-module-transforms": "^7.28.3", "@babel/helpers": "^7.28.4", "@babel/parser": "^7.28.4", "@babel/template": "^7.27.2", "@babel/traverse": "^7.28.4", "@babel/types": "^7.28.4", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA=="], + "@babel/core": ["@babel/core@7.28.5", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", "@babel/helper-compilation-targets": "^7.27.2", "@babel/helper-module-transforms": "^7.28.3", "@babel/helpers": "^7.28.4", "@babel/parser": "^7.28.5", "@babel/template": "^7.27.2", "@babel/traverse": "^7.28.5", "@babel/types": "^7.28.5", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw=="], - "@babel/generator": ["@babel/generator@7.28.3", "", { "dependencies": { "@babel/parser": "^7.28.3", "@babel/types": "^7.28.2", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw=="], + "@babel/generator": ["@babel/generator@7.28.5", "", { "dependencies": { "@babel/parser": "^7.28.5", "@babel/types": "^7.28.5", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ=="], "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.27.2", "", { "dependencies": { "@babel/compat-data": "^7.27.2", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ=="], @@ -152,13 +152,13 @@ "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], - "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.27.1", "", {}, "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow=="], + "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="], "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], "@babel/helpers": ["@babel/helpers@7.28.4", "", { "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.28.4" } }, "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w=="], - "@babel/parser": ["@babel/parser@7.28.4", "", { "dependencies": { "@babel/types": "^7.28.4" }, "bin": "./bin/babel-parser.js" }, "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg=="], + "@babel/parser": ["@babel/parser@7.28.5", "", { "dependencies": { "@babel/types": "^7.28.5" }, "bin": "./bin/babel-parser.js" }, "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ=="], "@babel/plugin-syntax-async-generators": ["@babel/plugin-syntax-async-generators@7.8.4", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw=="], @@ -202,9 +202,9 @@ "@babel/template": ["@babel/template@7.27.2", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/parser": "^7.27.2", "@babel/types": "^7.27.1" } }, "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw=="], - "@babel/traverse": ["@babel/traverse@7.28.4", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.3", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.4", "@babel/template": "^7.27.2", "@babel/types": "^7.28.4", "debug": "^4.3.1" } }, "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ=="], + "@babel/traverse": ["@babel/traverse@7.28.5", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.5", "@babel/template": "^7.27.2", "@babel/types": "^7.28.5", "debug": "^4.3.1" } }, "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ=="], - "@babel/types": ["@babel/types@7.28.4", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1" } }, "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q=="], + "@babel/types": ["@babel/types@7.28.5", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA=="], "@bcoe/v8-coverage": ["@bcoe/v8-coverage@0.2.3", "", {}, "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw=="], @@ -372,7 +372,7 @@ "@jest/test-sequencer": ["@jest/test-sequencer@30.2.0", "", { "dependencies": { "@jest/test-result": "30.2.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "slash": "^3.0.0" } }, "sha512-wXKgU/lk8fKXMu/l5Hog1R61bL4q5GCdT6OJvdAFz1P+QrpoFuLU68eoKuVc4RbrTtNnTL5FByhWdLgOPSph+Q=="], - "@jest/transform": ["@jest/transform@29.7.0", "", { "dependencies": { "@babel/core": "^7.11.6", "@jest/types": "^29.6.3", "@jridgewell/trace-mapping": "^0.3.18", "babel-plugin-istanbul": "^6.1.1", "chalk": "^4.0.0", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.9", "jest-haste-map": "^29.7.0", "jest-regex-util": "^29.6.3", "jest-util": "^29.7.0", "micromatch": "^4.0.4", "pirates": "^4.0.4", "slash": "^3.0.0", "write-file-atomic": "^4.0.2" } }, "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw=="], + "@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], "@jest/types": ["@jest/types@30.2.0", "", { "dependencies": { "@jest/pattern": "30.0.1", "@jest/schemas": "30.0.5", "@types/istanbul-lib-coverage": "^2.0.6", "@types/istanbul-reports": "^3.0.4", "@types/node": "*", "@types/yargs": "^17.0.33", "chalk": "^4.1.2" } }, "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg=="], @@ -412,7 +412,7 @@ "@playwright/test": ["@playwright/test@1.56.1", "", { "dependencies": { "playwright": "1.56.1" }, "bin": { "playwright": "cli.js" } }, "sha512-vSMYtL/zOcFpvJCW71Q/OEGQb7KYBPAdKh35WNSkaZA75JlAO8ED8UN6GUNTm3drWomcbcqRPFqQbLae8yBTdg=="], - "@posthog/core": ["@posthog/core@1.3.1", "", {}, "sha512-sGKVHituJ8L/bJxVV4KamMFp+IBWAZyCiYunFawJZ4cc59PCtLnKFIMEV6kn7A4eZQcQ6EKV5Via4sF3Z7qMLQ=="], + "@posthog/core": ["@posthog/core@1.4.0", "", {}, "sha512-jmW8/I//YOHAfjzokqas+Qtc2T57Ux8d2uIJu7FLcMGxywckHsl6od59CD18jtUzKToQdjQhV6Y3429qj+KeNw=="], "@radix-ui/number": ["@radix-ui/number@1.1.1", "", {}, "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g=="], @@ -544,17 +544,17 @@ "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.52.5", "", { "os": "win32", "cpu": "x64" }, "sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg=="], - "@shikijs/core": ["@shikijs/core@3.13.0", "", { "dependencies": { "@shikijs/types": "3.13.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-3P8rGsg2Eh2qIHekwuQjzWhKI4jV97PhvYjYUzGqjvJfqdQPz+nMlfWahU24GZAyW1FxFI1sYjyhfh5CoLmIUA=="], + "@shikijs/core": ["@shikijs/core@3.14.0", "", { "dependencies": { "@shikijs/types": "3.14.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-qRSeuP5vlYHCNUIrpEBQFO7vSkR7jn7Kv+5X3FO/zBKVDGQbcnlScD3XhkrHi/R8Ltz0kEjvFR9Szp/XMRbFMw=="], - "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.13.0", "", { "dependencies": { "@shikijs/types": "3.13.0", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.3" } }, "sha512-Ty7xv32XCp8u0eQt8rItpMs6rU9Ki6LJ1dQOW3V/56PKDcpvfHPnYFbsx5FFUP2Yim34m/UkazidamMNVR4vKg=="], + "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.14.0", "", { "dependencies": { "@shikijs/types": "3.14.0", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.3" } }, "sha512-3v1kAXI2TsWQuwv86cREH/+FK9Pjw3dorVEykzQDhwrZj0lwsHYlfyARaKmn6vr5Gasf8aeVpb8JkzeWspxOLQ=="], - "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.13.0", "", { "dependencies": { "@shikijs/types": "3.13.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-O42rBGr4UDSlhT2ZFMxqM7QzIU+IcpoTMzb3W7AlziI1ZF7R8eS2M0yt5Ry35nnnTX/LTLXFPUjRFCIW+Operg=="], + "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.14.0", "", { "dependencies": { "@shikijs/types": "3.14.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-TNcYTYMbJyy+ZjzWtt0bG5y4YyMIWC2nyePz+CFMWqm+HnZZyy9SWMgo8Z6KBJVIZnx8XUXS8U2afO6Y0g1Oug=="], - "@shikijs/langs": ["@shikijs/langs@3.13.0", "", { "dependencies": { "@shikijs/types": "3.13.0" } }, "sha512-672c3WAETDYHwrRP0yLy3W1QYB89Hbpj+pO4KhxK6FzIrDI2FoEXNiNCut6BQmEApYLfuYfpgOZaqbY+E9b8wQ=="], + "@shikijs/langs": ["@shikijs/langs@3.14.0", "", { "dependencies": { "@shikijs/types": "3.14.0" } }, "sha512-DIB2EQY7yPX1/ZH7lMcwrK5pl+ZkP/xoSpUzg9YC8R+evRCCiSQ7yyrvEyBsMnfZq4eBzLzBlugMyTAf13+pzg=="], - "@shikijs/themes": ["@shikijs/themes@3.13.0", "", { "dependencies": { "@shikijs/types": "3.13.0" } }, "sha512-Vxw1Nm1/Od8jyA7QuAenaV78BG2nSr3/gCGdBkLpfLscddCkzkL36Q5b67SrLLfvAJTOUzW39x4FHVCFriPVgg=="], + "@shikijs/themes": ["@shikijs/themes@3.14.0", "", { "dependencies": { "@shikijs/types": "3.14.0" } }, "sha512-fAo/OnfWckNmv4uBoUu6dSlkcBc+SA1xzj5oUSaz5z3KqHtEbUypg/9xxgJARtM6+7RVm0Q6Xnty41xA1ma1IA=="], - "@shikijs/types": ["@shikijs/types@3.13.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-oM9P+NCFri/mmQ8LoFGVfVyemm5Hi27330zuOBp0annwJdKH1kOLndw3zCtAVDehPLg9fKqoEx3Ht/wNZxolfw=="], + "@shikijs/types": ["@shikijs/types@3.14.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-bQGgC6vrY8U/9ObG1Z/vTro+uclbjjD/uG58RvfxKZVD5p9Yc1ka3tVyEFy7BNJLzxuWyHH5NWynP9zZZS59eQ=="], "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], @@ -646,7 +646,7 @@ "@swc/types": ["@swc/types@0.1.25", "", { "dependencies": { "@swc/counter": "^0.1.3" } }, "sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g=="], - "@swc/wasm": ["@swc/wasm@1.13.20", "", {}, "sha512-NJzN+QrbdwXeVTfTYiHkqv13zleOCQA52NXBOrwKvjxWJQecRqakjUhUP2z8lqs7eWVthko4Cilqs+VeBrwo3Q=="], + "@swc/wasm": ["@swc/wasm@1.13.21", "", {}, "sha512-fnirreOh8nsRgZoHvBRW9bJL9y2cbiEM6qzSxVEU07PWTD+xFxLdBs0829tf3XSqRDPuivAPc2bDvw1K5itnXA=="], "@szmarczak/http-timer": ["@szmarczak/http-timer@4.0.6", "", { "dependencies": { "defer-to-connect": "^2.0.0" } }, "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w=="], @@ -680,7 +680,7 @@ "@tailwindcss/vite": ["@tailwindcss/vite@4.1.16", "", { "dependencies": { "@tailwindcss/node": "4.1.16", "@tailwindcss/oxide": "4.1.16", "tailwindcss": "4.1.16" }, "peerDependencies": { "vite": "^5.2.0 || ^6 || ^7" } }, "sha512-bbguNBcDxsRmi9nnlWJxhfDWamY3lmcyACHcdO1crxfzuLpOhHLLtEIN/nCbbAtj5rchUgQD17QVAKi1f7IsKg=="], - "@testing-library/dom": ["@testing-library/dom@10.4.0", "", { "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", "@types/aria-query": "^5.0.1", "aria-query": "5.3.0", "chalk": "^4.1.0", "dom-accessibility-api": "^0.5.9", "lz-string": "^1.5.0", "pretty-format": "^27.0.2" } }, "sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ=="], + "@testing-library/dom": ["@testing-library/dom@10.4.1", "", { "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", "@types/aria-query": "^5.0.1", "aria-query": "5.3.0", "dom-accessibility-api": "^0.5.9", "lz-string": "^1.5.0", "picocolors": "1.1.1", "pretty-format": "^27.0.2" } }, "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg=="], "@testing-library/jest-dom": ["@testing-library/jest-dom@6.9.1", "", { "dependencies": { "@adobe/css-tools": "^4.4.0", "aria-query": "^5.0.0", "css.escape": "^1.5.1", "dom-accessibility-api": "^0.6.3", "picocolors": "^1.1.1", "redent": "^3.0.0" } }, "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA=="], @@ -704,7 +704,7 @@ "@types/body-parser": ["@types/body-parser@1.19.6", "", { "dependencies": { "@types/connect": "*", "@types/node": "*" } }, "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g=="], - "@types/bun": ["@types/bun@1.3.0", "", { "dependencies": { "bun-types": "1.3.0" } }, "sha512-+lAGCYjXjip2qY375xX/scJeVRmZ5cY0wyHYyCYxNcdEXrQ4AOe3gACgd4iQ8ksOslJtW4VNxBJ8llUwc3a6AA=="], + "@types/bun": ["@types/bun@1.3.1", "", { "dependencies": { "bun-types": "1.3.1" } }, "sha512-4jNMk2/K9YJtfqwoAa28c8wK+T7nvJFOjxI4h/7sORWcypRNxBpr+TPNaCfVWq70tLCJsqoFwcf0oI0JU/fvMQ=="], "@types/cacheable-request": ["@types/cacheable-request@6.0.3", "", { "dependencies": { "@types/http-cache-semantics": "*", "@types/keyv": "^3.1.4", "@types/node": "*", "@types/responselike": "^1.0.0" } }, "sha512-IQ3EbTzGxIigb1I3qPZc1rWJnH0BmSKv5QYTalEwweFvyBDLSAe24zP0le/hyi7ecGfZVlIVAg4BZqb8WBwKqw=="], @@ -792,7 +792,7 @@ "@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="], - "@types/express": ["@types/express@5.0.3", "", { "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^5.0.0", "@types/serve-static": "*" } }, "sha512-wGA0NX93b19/dZC1J18tKWVIYWyyF2ZjT9vin/NRu0qzzvfVzWjs04iq2rQ3H65vCTQYlRqs3YHfY7zjdV+9Kw=="], + "@types/express": ["@types/express@5.0.5", "", { "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^5.0.0", "@types/serve-static": "^1" } }, "sha512-LuIQOcb6UmnF7C1PCFmEU1u2hmiHL43fgFQX67sN3H4Z+0Yk0Neo++mFsBjhOAuLzvlQeqAAkeDOZrJs9rzumQ=="], "@types/express-serve-static-core": ["@types/express-serve-static-core@5.1.0", "", { "dependencies": { "@types/node": "*", "@types/qs": "*", "@types/range-parser": "*", "@types/send": "*" } }, "sha512-jnHMsrd0Mwa9Cf4IdOzbz543y4XJepXrbia2T4b6+spXC2We3t1y6K44D3mR8XMFSXMCf3/l7rCgddfx7UNVBA=="], @@ -800,8 +800,6 @@ "@types/geojson": ["@types/geojson@7946.0.16", "", {}, "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg=="], - "@types/graceful-fs": ["@types/graceful-fs@4.1.9", "", { "dependencies": { "@types/node": "*" } }, "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ=="], - "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], "@types/http-cache-semantics": ["@types/http-cache-semantics@4.0.4", "", {}, "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA=="], @@ -838,7 +836,7 @@ "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], - "@types/node": ["@types/node@24.9.1", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-QoiaXANRkSXK6p0Duvt56W208du4P9Uye9hWLWgGMDTEoKPhuenzNcC4vGUmrNkiOKTlIrBoyNQYNpSwfEZXSg=="], + "@types/node": ["@types/node@24.9.2", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-uWN8YqxXxqFMX2RqGOrumsKeti4LlmIMIyV0lgut4jx7KQBcBiW6vkDtIBvHnHIquwNfJhk8v2OtmO8zXWHfPA=="], "@types/plist": ["@types/plist@3.0.5", "", { "dependencies": { "@types/node": "*", "xmlbuilder": ">=11.0.1" } }, "sha512-E6OCaRmAe4WDmWNsL/9RMqdkkzDCY1etutkflWk4c+AcjDU07Pcz1fQwTX0TQz+Pxqn9i4L1TU3UFpjnrcDgxA=="], @@ -856,9 +854,9 @@ "@types/responselike": ["@types/responselike@1.0.3", "", { "dependencies": { "@types/node": "*" } }, "sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw=="], - "@types/send": ["@types/send@0.17.5", "", { "dependencies": { "@types/mime": "^1", "@types/node": "*" } }, "sha512-z6F2D3cOStZvuk2SaP6YrwkNO65iTZcwA2ZkSABegdkAh/lf+Aa/YQndZVfmEXT5vgAp6zv06VQ3ejSVjAny4w=="], + "@types/send": ["@types/send@0.17.6", "", { "dependencies": { "@types/mime": "^1", "@types/node": "*" } }, "sha512-Uqt8rPBE8SY0RK8JB1EzVOIZ32uqy8HwdxCnoCOsYrvnswqmFZ/k+9Ikidlk/ImhsdvBsloHbAlewb2IEBV/Og=="], - "@types/serve-static": ["@types/serve-static@1.15.9", "", { "dependencies": { "@types/http-errors": "*", "@types/node": "*", "@types/send": "<1" } }, "sha512-dOTIuqpWLyl3BBXU3maNQsS4A3zuuoYRNIvYSxxhebPfXg2mzWQEPne/nlJ37yOse6uGgR386uTpdsx4D0QZWA=="], + "@types/serve-static": ["@types/serve-static@1.15.10", "", { "dependencies": { "@types/http-errors": "*", "@types/node": "*", "@types/send": "<1" } }, "sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw=="], "@types/stack-utils": ["@types/stack-utils@2.0.3", "", {}, "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw=="], @@ -874,7 +872,7 @@ "@types/ws": ["@types/ws@8.18.1", "", { "dependencies": { "@types/node": "*" } }, "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg=="], - "@types/yargs": ["@types/yargs@17.0.33", "", { "dependencies": { "@types/yargs-parser": "*" } }, "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA=="], + "@types/yargs": ["@types/yargs@17.0.34", "", { "dependencies": { "@types/yargs-parser": "*" } }, "sha512-KExbHVa92aJpw9WDQvzBaGVE2/Pz+pLZQloT2hjL8IqsZnV62rlPOYvNnLmf/L2dyllfVUOVBj64M0z/46eR2A=="], "@types/yargs-parser": ["@types/yargs-parser@21.0.3", "", {}, "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ=="], @@ -900,21 +898,21 @@ "@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.46.2", "", { "dependencies": { "@typescript-eslint/types": "8.46.2", "eslint-visitor-keys": "^4.2.1" } }, "sha512-tUFMXI4gxzzMXt4xpGJEsBsTox0XbNQ1y94EwlD/CuZwFcQP79xfQqMhau9HsRc/J0cAPA/HZt1dZPtGn9V/7w=="], - "@typescript/native-preview": ["@typescript/native-preview@7.0.0-dev.20251023.1", "", { "optionalDependencies": { "@typescript/native-preview-darwin-arm64": "7.0.0-dev.20251023.1", "@typescript/native-preview-darwin-x64": "7.0.0-dev.20251023.1", "@typescript/native-preview-linux-arm": "7.0.0-dev.20251023.1", "@typescript/native-preview-linux-arm64": "7.0.0-dev.20251023.1", "@typescript/native-preview-linux-x64": "7.0.0-dev.20251023.1", "@typescript/native-preview-win32-arm64": "7.0.0-dev.20251023.1", "@typescript/native-preview-win32-x64": "7.0.0-dev.20251023.1" }, "bin": { "tsgo": "bin/tsgo.js" } }, "sha512-vR8Hhj/6XYWzq+MquAncZeXjNdmncT3Jf5avdrMIWHYnmjWqcHtIX61NM3N32k2vcfoGfiHZgMGN4BCYmlmp0Q=="], + "@typescript/native-preview": ["@typescript/native-preview@7.0.0-dev.20251029.1", "", { "optionalDependencies": { "@typescript/native-preview-darwin-arm64": "7.0.0-dev.20251029.1", "@typescript/native-preview-darwin-x64": "7.0.0-dev.20251029.1", "@typescript/native-preview-linux-arm": "7.0.0-dev.20251029.1", "@typescript/native-preview-linux-arm64": "7.0.0-dev.20251029.1", "@typescript/native-preview-linux-x64": "7.0.0-dev.20251029.1", "@typescript/native-preview-win32-arm64": "7.0.0-dev.20251029.1", "@typescript/native-preview-win32-x64": "7.0.0-dev.20251029.1" }, "bin": { "tsgo": "bin/tsgo.js" } }, "sha512-IRmYCDgwZQEfjy2GNJnQbqoRUrvdCbzLE0sLhwc6TP4I0Hx5TnHv3sJGKAgdmcbHmKHtwJeppXjgTRGtFTWRHQ=="], - "@typescript/native-preview-darwin-arm64": ["@typescript/native-preview-darwin-arm64@7.0.0-dev.20251023.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Qe8KKzhe+bEn84+c90DPBYMkLZ1Q6709DmxStlhdSJycO4GAXlURcLyFAegbLGUPen2oU1NISFlCuOoGUDufvw=="], + "@typescript/native-preview-darwin-arm64": ["@typescript/native-preview-darwin-arm64@7.0.0-dev.20251029.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-DBJ3jFP6/MaQj/43LN1TC7tjR4SXZUNDnREiVjtFzpOG4Q71D1LB6QryskkRZsNtxLaTuVV57l2ubCE8tNmz0w=="], - "@typescript/native-preview-darwin-x64": ["@typescript/native-preview-darwin-x64@7.0.0-dev.20251023.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-1WDzpaluw8y4qfOGTyAFHRskEcg/qPSYQwkDj3jw9lLpYwhXo6uqZ7TmPEX9QhzjtUvmMCnqq4hvwPN/0e3h8Q=="], + "@typescript/native-preview-darwin-x64": ["@typescript/native-preview-darwin-x64@7.0.0-dev.20251029.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-fnxZZtlXeud6f3bev3q50QMR+FrnuTyVr5akp5G2/o4jfkqLV6cKzseGnY6so+ftwfwP/PX3GOkfL6Ag8NzR0Q=="], - "@typescript/native-preview-linux-arm": ["@typescript/native-preview-linux-arm@7.0.0-dev.20251023.1", "", { "os": "linux", "cpu": "arm" }, "sha512-Q/GxNqqqN3LNVayrWrcdV8aB1tzDbAPWeYqpvAeJpaeioIPXpcA+nqmw9yLkgCQbWMD/YA2Dum8otWtYP6sUyQ=="], + "@typescript/native-preview-linux-arm": ["@typescript/native-preview-linux-arm@7.0.0-dev.20251029.1", "", { "os": "linux", "cpu": "arm" }, "sha512-1ok8pxcIlwMTMggySPIVt926lymLWNhCgPTzO751zKFTDTJcmpzmpmSWbiFQQ3fcPzO8LocsLXRfBwYDd/uqQA=="], - "@typescript/native-preview-linux-arm64": ["@typescript/native-preview-linux-arm64@7.0.0-dev.20251023.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-Q4jcLjgP6GyUBFNgM9bQX5Scsq+RYFVEXkwC1a0f7Jpz8u3qzWz9VRJNzubHcXqFzCGbru0YPN5bZMylNOlP+g=="], + "@typescript/native-preview-linux-arm64": ["@typescript/native-preview-linux-arm64@7.0.0-dev.20251029.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-WK/N4Tk9nxI+k6AwJ7d80Gnd4+8kbBwmryIgOGPQNNvNJticYg6QiQsFGgC+HnCqvWDQ0fAyW+wdcPG6fwn/EA=="], - "@typescript/native-preview-linux-x64": ["@typescript/native-preview-linux-x64@7.0.0-dev.20251023.1", "", { "os": "linux", "cpu": "x64" }, "sha512-JH5LJMcUPWuCBPgrGybSSKoM4ktpBgxIBCLhunpL0z9vMxHOAXMbfLFu8cdM8X+rr6H+C0IDi/mEvUqMNOvlsA=="], + "@typescript/native-preview-linux-x64": ["@typescript/native-preview-linux-x64@7.0.0-dev.20251029.1", "", { "os": "linux", "cpu": "x64" }, "sha512-GvTl9BeItX0Ox0wXiMIHkktl9sCTkTPBe6f6hEs4XfJlAKm+JHbYtB9UEs62QyPYBFMx2phCytVNejpaUZRJmQ=="], - "@typescript/native-preview-win32-arm64": ["@typescript/native-preview-win32-arm64@7.0.0-dev.20251023.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-8n/uGR9pwkf3VO8Pok/0TOo0SUyDRlFdE7WWgundGz+X3rlSZYdi7fI9mFYmnSSFOOB7gKbiE0fFFSTIcDY36Q=="], + "@typescript/native-preview-win32-arm64": ["@typescript/native-preview-win32-arm64@7.0.0-dev.20251029.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-BUEC+M6gViaa/zDzOjAOEqpOZeUJxuwrjwOokqxXyUavX+mC6zb6ALqx4r7GAWrfY9sSvGUacW4ZbqDTXe8KAg=="], - "@typescript/native-preview-win32-x64": ["@typescript/native-preview-win32-x64@7.0.0-dev.20251023.1", "", { "os": "win32", "cpu": "x64" }, "sha512-GUz7HU6jSUwHEFauwrtdsXdbOVEQ0qv0Jaz3HJeUx+DrmU8Zl+FM1weOyq1GXmFDjw3dzzR5yIxCld3M3SMT6Q=="], + "@typescript/native-preview-win32-x64": ["@typescript/native-preview-win32-x64@7.0.0-dev.20251029.1", "", { "os": "win32", "cpu": "x64" }, "sha512-ODcXFgM62KpXxHqG5NMG+ipBqTbQ1pGkrzSByBwgRx0c/gTUhgML8UT7iK3nTrTtp9OBgPYPLLDNwiSLyzaIxA=="], "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="], @@ -982,7 +980,7 @@ "aggregate-error": ["aggregate-error@3.1.0", "", { "dependencies": { "clean-stack": "^2.0.0", "indent-string": "^4.0.0" } }, "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA=="], - "ai": ["ai@5.0.77", "", { "dependencies": { "@ai-sdk/gateway": "2.0.0", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.12", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-w0xP/guV27qLUR+60ru7dSDfF1Wlk6lPEHtXPBLfa8TNQ8Qc4FZ1RE9UGAdZmZU396FA6lKtP9P89Jzb5Z+Hnw=="], + "ai": ["ai@5.0.81", "", { "dependencies": { "@ai-sdk/gateway": "2.0.2", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.13", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-SB7oMC9QSpIu1VLswFTZuhhpfQfrGtFBUbWLtHBkhjWZIQskjtcdEhB+N4yO9hscdc2wYtjw/tacgoxX93QWFw=="], "ai-tokenizer": ["ai-tokenizer@1.0.3", "", { "peerDependencies": { "ai": "^5.0.0" }, "optionalPeers": ["ai"] }, "sha512-S2AQmQclsFVo79cu6FRGXwFQ0/0g+uqiEHLDvK7KLTUt8BdBE1Sf9oMnH5xBw2zxUmFWRx91GndvwyW6pw+hHw=="], @@ -1054,11 +1052,11 @@ "available-typed-arrays": ["available-typed-arrays@1.0.7", "", { "dependencies": { "possible-typed-array-names": "^1.0.0" } }, "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ=="], - "axios": ["axios@1.12.2", "", { "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw=="], + "axios": ["axios@1.13.1", "", { "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, "sha512-hU4EGxxt+j7TQijx1oYdAjw4xuIp1wRQSsbMFwSthCWeBQur1eF+qJ5iQ5sN3Tw8YRzQNKb8jszgBdMDVqwJcw=="], "babel-jest": ["babel-jest@30.2.0", "", { "dependencies": { "@jest/transform": "30.2.0", "@types/babel__core": "^7.20.5", "babel-plugin-istanbul": "^7.0.1", "babel-preset-jest": "30.2.0", "chalk": "^4.1.2", "graceful-fs": "^4.2.11", "slash": "^3.0.0" }, "peerDependencies": { "@babel/core": "^7.11.0 || ^8.0.0-0" } }, "sha512-0YiBEOxWqKkSQWL9nNGGEgndoeL0ZpWrbLMNL5u/Kaxrli3Eaxlt3ZtIDktEvXt4L/R9r3ODr2zKwGM/2BjxVw=="], - "babel-plugin-istanbul": ["babel-plugin-istanbul@6.1.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.2", "istanbul-lib-instrument": "^5.0.4", "test-exclude": "^6.0.0" } }, "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA=="], + "babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], "babel-plugin-jest-hoist": ["babel-plugin-jest-hoist@30.2.0", "", { "dependencies": { "@types/babel__core": "^7.20.5" } }, "sha512-ftzhzSGMUnOzcCXd6WHdBGMyuwy15Wnn0iyyWGKgBDLxf9/s5ABuraCSpBX2uG0jUg4rqJnxsLc5+oYBqoxVaA=="], @@ -1110,7 +1108,7 @@ "builder-util-runtime": ["builder-util-runtime@9.2.4", "", { "dependencies": { "debug": "^4.3.4", "sax": "^1.2.4" } }, "sha512-upp+biKpN/XZMLim7aguUyW8s0FUpDvOtK6sbanMFDAMBzpHDqdhgVYm6zc9HJ6nWo7u2Lxk60i2M6Jd3aiNrA=="], - "bun-types": ["bun-types@1.3.0", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-u8X0thhx+yJ0KmkxuEo9HAtdfgCBaM/aI9K90VQcQioAmkVp3SG3FkwWGibUFz3WdXAdcsqOcbU40lK7tbHdkQ=="], + "bun-types": ["bun-types@1.3.1", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-NMrcy7smratanWJ2mMXdpatalovtxVggkj11bScuWuiOoXTiKIu2eVS1/7qbyI/4yHedtsn175n4Sm4JcdHLXw=="], "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], @@ -1310,7 +1308,7 @@ "d3-zoom": ["d3-zoom@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "2 - 3", "d3-transition": "2 - 3" } }, "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw=="], - "dagre-d3-es": ["dagre-d3-es@7.0.11", "", { "dependencies": { "d3": "^7.9.0", "lodash-es": "^4.17.21" } }, "sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw=="], + "dagre-d3-es": ["dagre-d3-es@7.0.13", "", { "dependencies": { "d3": "^7.9.0", "lodash-es": "^4.17.21" } }, "sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q=="], "data-view-buffer": ["data-view-buffer@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ=="], @@ -1420,7 +1418,7 @@ "electron-publish": ["electron-publish@24.13.1", "", { "dependencies": { "@types/fs-extra": "^9.0.11", "builder-util": "24.13.1", "builder-util-runtime": "9.2.4", "chalk": "^4.1.2", "fs-extra": "^10.1.0", "lazy-val": "^1.0.5", "mime": "^2.5.2" } }, "sha512-2ZgdEqJ8e9D17Hwp5LEq5mLQPjqU3lv/IALvgp+4W8VeNhryfGhYEQC/PgDPMrnWUp+l60Ou5SJLsu+k4mhQ8A=="], - "electron-to-chromium": ["electron-to-chromium@1.5.239", "", {}, "sha512-1y5w0Zsq39MSPmEjHjbizvhYoTaulVtivpxkp5q5kaPmQtsK6/2nvAzGRxNMS9DoYySp9PkW0MAQDwU1m764mg=="], + "electron-to-chromium": ["electron-to-chromium@1.5.243", "", {}, "sha512-ZCphxFW3Q1TVhcgS9blfut1PX8lusVi2SvXQgmEEnK4TCmE1JhH2JkjJN+DNt0pJJwfBri5AROBnz2b/C+YU9g=="], "electron-updater": ["electron-updater@6.6.2", "", { "dependencies": { "builder-util-runtime": "9.3.1", "fs-extra": "^10.1.0", "js-yaml": "^4.1.0", "lazy-val": "^1.0.5", "lodash.escaperegexp": "^4.1.2", "lodash.isequal": "^4.5.0", "semver": "^7.6.3", "tiny-typed-emitter": "^2.1.0" } }, "sha512-Cr4GDOkbAUqRHP5/oeOmH/L2Bn6+FQPxVLZtPbcmKZC63a1F3uu5EefYOssgZXG3u/zBlubbJ5PJdITdMVggbw=="], @@ -1466,8 +1464,6 @@ "esbuild": ["esbuild@0.25.11", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.11", "@esbuild/android-arm": "0.25.11", "@esbuild/android-arm64": "0.25.11", "@esbuild/android-x64": "0.25.11", "@esbuild/darwin-arm64": "0.25.11", "@esbuild/darwin-x64": "0.25.11", "@esbuild/freebsd-arm64": "0.25.11", "@esbuild/freebsd-x64": "0.25.11", "@esbuild/linux-arm": "0.25.11", "@esbuild/linux-arm64": "0.25.11", "@esbuild/linux-ia32": "0.25.11", "@esbuild/linux-loong64": "0.25.11", "@esbuild/linux-mips64el": "0.25.11", "@esbuild/linux-ppc64": "0.25.11", "@esbuild/linux-riscv64": "0.25.11", "@esbuild/linux-s390x": "0.25.11", "@esbuild/linux-x64": "0.25.11", "@esbuild/netbsd-arm64": "0.25.11", "@esbuild/netbsd-x64": "0.25.11", "@esbuild/openbsd-arm64": "0.25.11", "@esbuild/openbsd-x64": "0.25.11", "@esbuild/openharmony-arm64": "0.25.11", "@esbuild/sunos-x64": "0.25.11", "@esbuild/win32-arm64": "0.25.11", "@esbuild/win32-ia32": "0.25.11", "@esbuild/win32-x64": "0.25.11" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-KohQwyzrKTQmhXDW1PjCv3Tyspn9n5GcY2RTDqeORIdIJY8yKIF7sTSopFmn/wpMPW4rdPXI0UE5LJLuq3bx0Q=="], - "esbuild-register": ["esbuild-register@3.6.0", "", { "dependencies": { "debug": "^4.3.4" }, "peerDependencies": { "esbuild": ">=0.12 <1" } }, "sha512-H2/S7Pm8a9CL1uhp9OvjwrBh5Pvx0H8qVOxNu8Wed9Y7qv56MPtq+GGM8RJpq6glYJn9Wspr8uw7l55uyinNeg=="], - "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], @@ -1906,7 +1902,7 @@ "jest-snapshot": ["jest-snapshot@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@babel/generator": "^7.27.5", "@babel/plugin-syntax-jsx": "^7.27.1", "@babel/plugin-syntax-typescript": "^7.27.1", "@babel/types": "^7.27.3", "@jest/expect-utils": "30.2.0", "@jest/get-type": "30.1.0", "@jest/snapshot-utils": "30.2.0", "@jest/transform": "30.2.0", "@jest/types": "30.2.0", "babel-preset-current-node-syntax": "^1.2.0", "chalk": "^4.1.2", "expect": "30.2.0", "graceful-fs": "^4.2.11", "jest-diff": "30.2.0", "jest-matcher-utils": "30.2.0", "jest-message-util": "30.2.0", "jest-util": "30.2.0", "pretty-format": "30.2.0", "semver": "^7.7.2", "synckit": "^0.11.8" } }, "sha512-5WEtTy2jXPFypadKNpbNkZ72puZCa6UjSr/7djeecHWOu7iYhSXSnHScT8wBz3Rn8Ena5d5RYRcsyKIeqG1IyA=="], - "jest-util": ["jest-util@29.7.0", "", { "dependencies": { "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "ci-info": "^3.2.0", "graceful-fs": "^4.2.9", "picomatch": "^2.2.3" } }, "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA=="], + "jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], "jest-validate": ["jest-validate@30.2.0", "", { "dependencies": { "@jest/get-type": "30.1.0", "@jest/types": "30.2.0", "camelcase": "^6.3.0", "chalk": "^4.1.2", "leven": "^3.1.0", "pretty-format": "30.2.0" } }, "sha512-FBGWi7dP2hpdi8nBoWxSsLvBFewKAg0+uSQwBaof4Y4DPgBabXgpSYC5/lR7VmnIlSpASmCi/ntRWPbv7089Pw=="], @@ -2046,7 +2042,7 @@ "lz-string": ["lz-string@1.5.0", "", { "bin": { "lz-string": "bin/bin.js" } }, "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ=="], - "magic-string": ["magic-string@0.30.19", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw=="], + "magic-string": ["magic-string@0.30.21", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ=="], "make-dir": ["make-dir@3.1.0", "", { "dependencies": { "semver": "^6.0.0" } }, "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw=="], @@ -2106,7 +2102,7 @@ "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], - "mermaid": ["mermaid@11.12.0", "", { "dependencies": { "@braintree/sanitize-url": "^7.1.1", "@iconify/utils": "^3.0.1", "@mermaid-js/parser": "^0.6.2", "@types/d3": "^7.4.3", "cytoscape": "^3.29.3", "cytoscape-cose-bilkent": "^4.1.0", "cytoscape-fcose": "^2.2.0", "d3": "^7.9.0", "d3-sankey": "^0.12.3", "dagre-d3-es": "7.0.11", "dayjs": "^1.11.18", "dompurify": "^3.2.5", "katex": "^0.16.22", "khroma": "^2.1.0", "lodash-es": "^4.17.21", "marked": "^16.2.1", "roughjs": "^4.6.6", "stylis": "^4.3.6", "ts-dedent": "^2.2.0", "uuid": "^11.1.0" } }, "sha512-ZudVx73BwrMJfCFmSSJT84y6u5brEoV8DOItdHomNLz32uBjNrelm7mg95X7g+C6UoQH/W6mBLGDEDv73JdxBg=="], + "mermaid": ["mermaid@11.12.1", "", { "dependencies": { "@braintree/sanitize-url": "^7.1.1", "@iconify/utils": "^3.0.1", "@mermaid-js/parser": "^0.6.3", "@types/d3": "^7.4.3", "cytoscape": "^3.29.3", "cytoscape-cose-bilkent": "^4.1.0", "cytoscape-fcose": "^2.2.0", "d3": "^7.9.0", "d3-sankey": "^0.12.3", "dagre-d3-es": "7.0.13", "dayjs": "^1.11.18", "dompurify": "^3.2.5", "katex": "^0.16.22", "khroma": "^2.1.0", "lodash-es": "^4.17.21", "marked": "^16.2.1", "roughjs": "^4.6.6", "stylis": "^4.3.6", "ts-dedent": "^2.2.0", "uuid": "^11.1.0" } }, "sha512-UlIZrRariB11TY1RtTgUWp65tphtBv4CSq7vyS2ZZ2TgoMjs2nloq+wFqxiwcxlhHUvs7DPGgMjs2aeQxz5h9g=="], "micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="], @@ -2338,7 +2334,7 @@ "postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="], - "posthog-js": ["posthog-js@1.279.3", "", { "dependencies": { "@posthog/core": "1.3.1", "core-js": "^3.38.1", "fflate": "^0.4.8", "preact": "^10.19.3", "web-vitals": "^4.2.4" } }, "sha512-09+hUgwY4W/+yTHk2mbxNiuu6NBCFzgaAcYkio1zphKZYcoQIehHOQsS1C8MHoyl3o8diZ98gAl2VJ6rS4GHaQ=="], + "posthog-js": ["posthog-js@1.281.0", "", { "dependencies": { "@posthog/core": "1.4.0", "core-js": "^3.38.1", "fflate": "^0.4.8", "preact": "^10.19.3", "web-vitals": "^4.2.4" } }, "sha512-t3sAlgVozpU1W1ppiF5zLG6eBRPUs0hmtxN8R1V7P0qZFmnECshAAk2cBxCsxEanadT3iUpS8Z7crBytATqWQQ=="], "preact": ["preact@10.27.2", "", {}, "sha512-5SYSgFKSyhCbk6SrXyMpqjb5+MQBgfvEKE/OC+PujcY34sOpqtr+0AZQtPYx5IA6VxynQ7rUPCtKzyovpj9Bpg=="], @@ -2544,7 +2540,7 @@ "shescape": ["shescape@2.1.6", "", { "dependencies": { "which": "^3.0.0 || ^4.0.0 || ^5.0.0" } }, "sha512-c9Ns1I+Tl0TC+cpsOT1FeZcvFalfd0WfHeD/CMccJH20xwochmJzq6AqtenndlyAw/BUi3BMcv92dYLVrqX+dw=="], - "shiki": ["shiki@3.13.0", "", { "dependencies": { "@shikijs/core": "3.13.0", "@shikijs/engine-javascript": "3.13.0", "@shikijs/engine-oniguruma": "3.13.0", "@shikijs/langs": "3.13.0", "@shikijs/themes": "3.13.0", "@shikijs/types": "3.13.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-aZW4l8Og16CokuCLf8CF8kq+KK2yOygapU5m3+hoGw0Mdosc6fPitjM+ujYarppj5ZIKGyPDPP1vqmQhr+5/0g=="], + "shiki": ["shiki@3.14.0", "", { "dependencies": { "@shikijs/core": "3.14.0", "@shikijs/engine-javascript": "3.14.0", "@shikijs/engine-oniguruma": "3.14.0", "@shikijs/langs": "3.14.0", "@shikijs/themes": "3.14.0", "@shikijs/types": "3.14.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-J0yvpLI7LSig3Z3acIuDLouV5UCKQqu8qOArwMx+/yPVC3WRMgrP67beaG8F+j4xfEWE0eVC4GeBCIXeOPra1g=="], "side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="], @@ -2910,43 +2906,25 @@ "@jest/console/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "@jest/console/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - - "@jest/core/@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], - "@jest/core/ansi-escapes": ["ansi-escapes@4.3.2", "", { "dependencies": { "type-fest": "^0.21.3" } }, "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ=="], "@jest/core/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "@jest/core/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "@jest/core/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - - "@jest/fake-timers/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - - "@jest/reporters/@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], - "@jest/reporters/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "@jest/reporters/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], "@jest/reporters/istanbul-lib-source-maps": ["istanbul-lib-source-maps@5.0.6", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.23", "debug": "^4.1.1", "istanbul-lib-coverage": "^3.0.0" } }, "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A=="], - "@jest/reporters/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "@jest/reporters/string-length": ["string-length@4.0.2", "", { "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" } }, "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ=="], "@jest/snapshot-utils/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "@jest/transform/@jest/types": ["@jest/types@29.6.3", "", { "dependencies": { "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", "@types/node": "*", "@types/yargs": "^17.0.8", "chalk": "^4.0.0" } }, "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw=="], - "@jest/transform/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "@jest/transform/jest-haste-map": ["jest-haste-map@29.7.0", "", { "dependencies": { "@jest/types": "^29.6.3", "@types/graceful-fs": "^4.1.3", "@types/node": "*", "anymatch": "^3.0.3", "fb-watchman": "^2.0.0", "graceful-fs": "^4.2.9", "jest-regex-util": "^29.6.3", "jest-util": "^29.7.0", "jest-worker": "^29.7.0", "micromatch": "^4.0.4", "walker": "^1.0.8" }, "optionalDependencies": { "fsevents": "^2.3.2" } }, "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA=="], - - "@jest/transform/jest-regex-util": ["jest-regex-util@29.6.3", "", {}, "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg=="], - - "@jest/transform/write-file-atomic": ["write-file-atomic@4.0.2", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^3.0.7" } }, "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg=="], + "@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], "@jest/types/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], @@ -2964,10 +2942,10 @@ "@tailwindcss/oxide-wasm32-wasi/tslib": ["tslib@2.8.1", "", { "bundled": true }, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], - "@testing-library/dom/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "@testing-library/dom/pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="], + "@testing-library/jest-dom/aria-query": ["aria-query@5.3.2", "", {}, "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw=="], + "@testing-library/jest-dom/dom-accessibility-api": ["dom-accessibility-api@0.6.3", "", {}, "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w=="], "@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], @@ -2986,13 +2964,9 @@ "archiver-utils/glob": ["glob@7.2.3", "", { "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" } }, "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q=="], - "babel-jest/@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], - - "babel-jest/babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], - "babel-jest/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@5.2.1", "", { "dependencies": { "@babel/core": "^7.12.3", "@babel/parser": "^7.14.7", "@istanbuljs/schema": "^0.1.2", "istanbul-lib-coverage": "^3.2.0", "semver": "^6.3.0" } }, "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg=="], + "babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], "bl/readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], @@ -3028,7 +3002,7 @@ "dom-serializer/entities": ["entities@2.2.0", "", {}, "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A=="], - "electron/@types/node": ["@types/node@22.18.12", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-BICHQ67iqxQGFSzfCFTT7MRQ5XcBjG5aeKh5Ok38UBbPe5fxTyE+aHFxwVrGyr8GNlqFMLKD1D3P2K/1ks8tog=="], + "electron/@types/node": ["@types/node@22.18.13", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-Bo45YKIjnmFtv6I1TuC8AaHBbqXtIo+Om5fE4QiU1Tj8QR/qt+8O3BAtOimG5IFmwaWiPmB3Mv3jtYzBA4Us2A=="], "electron-builder/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], @@ -3048,8 +3022,6 @@ "execa/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], - "expect/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], "filelist/minimatch": ["minimatch@5.1.6", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g=="], @@ -3096,77 +3068,45 @@ "istanbul-lib-report/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-changed-files/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-circus/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-circus/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-cli/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-cli/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-config/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "jest-config/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "jest-config/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-diff/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "jest-each/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-each/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - - "jest-environment-node/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-haste-map/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], - "jest-haste-map/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-matcher-utils/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "jest-message-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-mock/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-process-manager/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "jest-process-manager/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], "jest-resolve/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-resolve/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - - "jest-runner/@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], - "jest-runner/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-runner/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-runner/source-map-support": ["source-map-support@0.5.13", "", { "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" } }, "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w=="], - "jest-runtime/@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], - "jest-runtime/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-runtime/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-runtime/strip-bom": ["strip-bom@4.0.0", "", {}, "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w=="], - "jest-snapshot/@jest/transform": ["@jest/transform@30.2.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", "@jridgewell/trace-mapping": "^0.3.25", "babel-plugin-istanbul": "^7.0.1", "chalk": "^4.1.2", "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.11", "jest-haste-map": "30.2.0", "jest-regex-util": "30.0.1", "jest-util": "30.2.0", "micromatch": "^4.0.8", "pirates": "^4.0.7", "slash": "^3.0.0", "write-file-atomic": "^5.0.1" } }, "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA=="], - "jest-snapshot/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-snapshot/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-snapshot/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - "jest-util/@jest/types": ["@jest/types@29.6.3", "", { "dependencies": { "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", "@types/node": "*", "@types/yargs": "^17.0.8", "chalk": "^4.0.0" } }, "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw=="], - "jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-util/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + "jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], "jest-validate/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], @@ -3178,12 +3118,8 @@ "jest-watcher/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "jest-watcher/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "jest-watcher/string-length": ["string-length@4.0.2", "", { "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" } }, "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ=="], - "jest-worker/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - "katex/commander": ["commander@8.3.0", "", {}, "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww=="], "mdast-util-find-and-replace/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], @@ -3308,74 +3244,42 @@ "@jest/console/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "@jest/console/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "@jest/core/@jest/transform/babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], - - "@jest/core/@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], - "@jest/core/ansi-escapes/type-fest": ["type-fest@0.21.3", "", {}, "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="], "@jest/core/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "@jest/core/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "@jest/fake-timers/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "@jest/fake-timers/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "@jest/reporters/@jest/transform/babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], - - "@jest/reporters/@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], - "@jest/reporters/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "@jest/reporters/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], "@jest/reporters/istanbul-lib-instrument/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - "@jest/reporters/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "@jest/snapshot-utils/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "@jest/snapshot-utils/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "@jest/transform/@jest/types/@jest/schemas": ["@jest/schemas@29.6.3", "", { "dependencies": { "@sinclair/typebox": "^0.27.8" } }, "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA=="], - "@jest/transform/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "@jest/transform/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "@jest/transform/jest-haste-map/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], - - "@jest/transform/jest-haste-map/jest-worker": ["jest-worker@29.7.0", "", { "dependencies": { "@types/node": "*", "jest-util": "^29.7.0", "merge-stream": "^2.0.0", "supports-color": "^8.0.0" } }, "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw=="], - - "@jest/transform/write-file-atomic/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], - "@jest/types/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "@jest/types/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "@testing-library/dom/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "@testing-library/dom/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "@testing-library/dom/pretty-format/react-is": ["react-is@17.0.2", "", {}, "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w=="], "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], "app-builder-lib/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - "babel-jest/@jest/transform/jest-util": ["jest-util@30.2.0", "", { "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", "chalk": "^4.1.2", "ci-info": "^4.2.0", "graceful-fs": "^4.2.11", "picomatch": "^4.0.2" } }, "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA=="], - - "babel-jest/@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], - - "babel-jest/babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], - "babel-jest/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "babel-jest/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + "babel-plugin-istanbul/istanbul-lib-instrument/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "builder-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "builder-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -3408,10 +3312,6 @@ "eslint/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "expect/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "expect/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "filelist/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], "find-process/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], @@ -3426,22 +3326,14 @@ "istanbul-lib-report/make-dir/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - "jest-changed-files/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "jest-changed-files/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "jest-circus/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-circus/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-circus/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "jest-cli/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-cli/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-cli/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "jest-config/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-config/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -3454,16 +3346,6 @@ "jest-each/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-each/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-environment-node/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "jest-environment-node/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-haste-map/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "jest-haste-map/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "jest-matcher-utils/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-matcher-utils/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -3472,10 +3354,6 @@ "jest-message-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-mock/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "jest-mock/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "jest-process-manager/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-process-manager/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -3484,40 +3362,18 @@ "jest-resolve/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-resolve/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-runner/@jest/transform/babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], - - "jest-runner/@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], - "jest-runner/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-runner/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-runner/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-runtime/@jest/transform/babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], - - "jest-runtime/@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], - "jest-runtime/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-runtime/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-runtime/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-snapshot/@jest/transform/babel-plugin-istanbul": ["babel-plugin-istanbul@7.0.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-instrument": "^6.0.2", "test-exclude": "^6.0.0" } }, "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA=="], - - "jest-snapshot/@jest/transform/write-file-atomic": ["write-file-atomic@5.0.1", "", { "dependencies": { "imurmurhash": "^0.1.4", "signal-exit": "^4.0.1" } }, "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw=="], - "jest-snapshot/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-snapshot/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-snapshot/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-util/@jest/types/@jest/schemas": ["@jest/schemas@29.6.3", "", { "dependencies": { "@sinclair/typebox": "^0.27.8" } }, "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA=="], - "jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], @@ -3534,12 +3390,6 @@ "jest-watcher/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "jest-watcher/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "jest-worker/jest-util/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "jest-worker/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - "mlly/pkg-types/confbox": ["confbox@0.1.8", "", {}, "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w=="], "nodemon/supports-color/has-flag": ["has-flag@3.0.0", "", {}, "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw=="], @@ -3572,50 +3422,6 @@ "@istanbuljs/load-nyc-config/js-yaml/argparse/sprintf-js": ["sprintf-js@1.0.3", "", {}, "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="], - "@jest/core/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], - - "@jest/fake-timers/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "@jest/fake-timers/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "@jest/transform/@jest/types/@jest/schemas/@sinclair/typebox": ["@sinclair/typebox@0.27.8", "", {}, "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA=="], - - "babel-jest/@jest/transform/jest-util/ci-info": ["ci-info@4.3.1", "", {}, "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA=="], - - "babel-jest/babel-plugin-istanbul/istanbul-lib-instrument/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - - "expect/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "expect/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "jest-changed-files/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "jest-changed-files/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "jest-environment-node/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "jest-environment-node/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "jest-haste-map/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "jest-haste-map/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "jest-mock/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "jest-mock/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "jest-runner/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], - - "jest-runtime/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], - - "jest-snapshot/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument": ["istanbul-lib-instrument@6.0.3", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", "semver": "^7.5.4" } }, "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q=="], - - "jest-util/@jest/types/@jest/schemas/@sinclair/typebox": ["@sinclair/typebox@0.27.8", "", {}, "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA=="], - - "jest-worker/jest-util/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "jest-worker/jest-util/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - "nyc/find-up/locate-path/p-locate": ["p-locate@4.1.0", "", { "dependencies": { "p-limit": "^2.2.0" } }, "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A=="], "nyc/yargs/cliui/wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="], @@ -3630,12 +3436,6 @@ "@istanbuljs/load-nyc-config/find-up/locate-path/p-locate/p-limit": ["p-limit@2.3.0", "", { "dependencies": { "p-try": "^2.0.0" } }, "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w=="], - "@jest/core/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - - "jest-runner/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - - "jest-runtime/@jest/transform/babel-plugin-istanbul/istanbul-lib-instrument/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - "nyc/find-up/locate-path/p-locate/p-limit": ["p-limit@2.3.0", "", { "dependencies": { "p-try": "^2.0.0" } }, "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w=="], "nyc/yargs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], diff --git a/chromatic.config.json b/chromatic.config.json index 89522bddf..a7be7bbab 100644 --- a/chromatic.config.json +++ b/chromatic.config.json @@ -6,4 +6,3 @@ "exitOnceUploaded": true, "autoAcceptChanges": false } - diff --git a/codecov.yml b/codecov.yml index 85cac0a9d..a7957ebf3 100644 --- a/codecov.yml +++ b/codecov.yml @@ -14,16 +14,16 @@ coverage: # Project coverage status - informational only project: default: - target: auto # Compare to base commit - threshold: 5% # Allow 5% drop without changing status - informational: true # Won't block PRs + target: auto # Compare to base commit + threshold: 5% # Allow 5% drop without changing status + informational: true # Won't block PRs - # Patch coverage status - informational only + # Patch coverage status - informational only patch: default: - target: auto # Compare to base commit - threshold: 5% # Allow 5% drop without changing status - informational: true # Won't block PRs + target: auto # Compare to base commit + threshold: 5% # Allow 5% drop without changing status + informational: true # Won't block PRs # Flag configuration to track unit vs integration test coverage flags: diff --git a/components.json b/components.json index f13465264..26216a973 100644 --- a/components.json +++ b/components.json @@ -15,4 +15,3 @@ "utils": "@/lib/utils" } } - diff --git a/docs/theme/custom.css b/docs/theme/custom.css index 68c8149fa..d3b3a1ef2 100644 --- a/docs/theme/custom.css +++ b/docs/theme/custom.css @@ -350,7 +350,9 @@ blockquote:has(p:first-child > strong:first-child:is([class*="Note"], [class*="n blockquote > p:first-child:has(strong:first-child:is([class*="Note"], [class*="note"])) { border-left-color: hsl(210 70% 50%); } -blockquote:has(p:first-child > strong:first-child:is([class*="Note"], [class*="note"])) p:first-child strong:first-child, +blockquote:has(p:first-child > strong:first-child:is([class*="Note"], [class*="note"])) + p:first-child + strong:first-child, blockquote > p:first-child strong:first-child:is([class*="Note"], [class*="note"]) { color: hsl(210 70% 60%) !important; } @@ -360,7 +362,9 @@ blockquote:has(p:first-child > strong:first-child:is([class*="Tip"], [class*="TI blockquote > p:first-child:has(strong:first-child:is([class*="Tip"], [class*="TIP"])) { border-left-color: hsl(120 50% 45%); } -blockquote:has(p:first-child > strong:first-child:is([class*="Tip"], [class*="TIP"])) p:first-child strong:first-child, +blockquote:has(p:first-child > strong:first-child:is([class*="Tip"], [class*="TIP"])) + p:first-child + strong:first-child, blockquote > p:first-child strong:first-child:is([class*="Tip"], [class*="TIP"]) { color: hsl(120 50% 55%) !important; } @@ -370,7 +374,9 @@ blockquote:has(p:first-child > strong:first-child:is([class*="Important"], [clas blockquote > p:first-child:has(strong:first-child:is([class*="Important"], [class*="IMPORTANT"])) { border-left-color: hsl(268 94% 60%); } -blockquote:has(p:first-child > strong:first-child:is([class*="Important"], [class*="IMPORTANT"])) p:first-child strong:first-child, +blockquote:has(p:first-child > strong:first-child:is([class*="Important"], [class*="IMPORTANT"])) + p:first-child + strong:first-child, blockquote > p:first-child strong:first-child:is([class*="Important"], [class*="IMPORTANT"]) { color: hsl(268 94% 65%) !important; } @@ -380,7 +386,9 @@ blockquote:has(p:first-child > strong:first-child:is([class*="Warning"], [class* blockquote > p:first-child:has(strong:first-child:is([class*="Warning"], [class*="WARNING"])) { border-left-color: hsl(38 92% 50%); } -blockquote:has(p:first-child > strong:first-child:is([class*="Warning"], [class*="WARNING"])) p:first-child strong:first-child, +blockquote:has(p:first-child > strong:first-child:is([class*="Warning"], [class*="WARNING"])) + p:first-child + strong:first-child, blockquote > p:first-child strong:first-child:is([class*="Warning"], [class*="WARNING"]) { color: hsl(38 92% 60%) !important; } @@ -390,7 +398,9 @@ blockquote:has(p:first-child > strong:first-child:is([class*="Caution"], [class* blockquote > p:first-child:has(strong:first-child:is([class*="Caution"], [class*="CAUTION"])) { border-left-color: hsl(0 70% 55%); } -blockquote:has(p:first-child > strong:first-child:is([class*="Caution"], [class*="CAUTION"])) p:first-child strong:first-child, +blockquote:has(p:first-child > strong:first-child:is([class*="Caution"], [class*="CAUTION"])) + p:first-child + strong:first-child, blockquote > p:first-child strong:first-child:is([class*="Caution"], [class*="CAUTION"]) { color: hsl(0 70% 60%) !important; } diff --git a/playwright.config.ts b/playwright.config.ts index b79725fc2..7459d4f88 100644 --- a/playwright.config.ts +++ b/playwright.config.ts @@ -6,7 +6,7 @@ export default defineConfig({ testDir: "./tests/e2e", timeout: 120_000, expect: { - timeout: 5_000, + timeout: 15_000, // Increased to allow worker thread encoding import (~10s) }, fullyParallel: true, forbidOnly: isCI, diff --git a/public/manifest.json b/public/manifest.json index 5fa3912cf..c49a24ef3 100644 --- a/public/manifest.json +++ b/public/manifest.json @@ -32,4 +32,3 @@ } ] } - diff --git a/public/service-worker.js b/public/service-worker.js index e88aae780..7d8eba2d1 100644 --- a/public/service-worker.js +++ b/public/service-worker.js @@ -1,23 +1,22 @@ // cmux Service Worker for PWA support -const CACHE_NAME = 'cmux-v1'; -const urlsToCache = [ - '/', - '/index.html', -]; +const CACHE_NAME = "cmux-v1"; +const urlsToCache = ["/", "/index.html"]; // Install event - cache core assets -self.addEventListener('install', (event) => { +self.addEventListener("install", (event) => { event.waitUntil( - caches.open(CACHE_NAME) + caches + .open(CACHE_NAME) .then((cache) => cache.addAll(urlsToCache)) .then(() => self.skipWaiting()) ); }); // Activate event - clean up old caches -self.addEventListener('activate', (event) => { +self.addEventListener("activate", (event) => { event.waitUntil( - caches.keys() + caches + .keys() .then((cacheNames) => { return Promise.all( cacheNames.map((cacheName) => { @@ -32,10 +31,10 @@ self.addEventListener('activate', (event) => { }); // Fetch event - network first, fallback to cache -self.addEventListener('fetch', (event) => { +self.addEventListener("fetch", (event) => { // Skip caching for non-GET requests (POST, PUT, DELETE, etc.) // The Cache API only supports GET requests - if (event.request.method !== 'GET') { + if (event.request.method !== "GET") { event.respondWith(fetch(event.request)); return; } @@ -45,10 +44,9 @@ self.addEventListener('fetch', (event) => { .then((response) => { // Clone the response before caching const responseToCache = response.clone(); - caches.open(CACHE_NAME) - .then((cache) => { - cache.put(event.request, responseToCache); - }); + caches.open(CACHE_NAME).then((cache) => { + cache.put(event.request, responseToCache); + }); return response; }) .catch(() => { @@ -57,4 +55,3 @@ self.addEventListener('fetch', (event) => { }) ); }); - diff --git a/src/App.stories.tsx b/src/App.stories.tsx index 799620dde..baacef3a9 100644 --- a/src/App.stories.tsx +++ b/src/App.stories.tsx @@ -4,6 +4,7 @@ import { AppLoader } from "./components/AppLoader"; import type { ProjectConfig } from "./config"; import type { FrontendWorkspaceMetadata } from "./types/workspace"; import type { IPCApi } from "./types/ipc"; +import type { ChatStats } from "./types/chatStats"; // Stable timestamp for visual testing (Apple demo time: Jan 24, 2024, 9:41 AM PST) const STABLE_TIMESTAMP = new Date("2024-01-24T09:41:00-08:00").getTime(); @@ -17,8 +18,20 @@ function setupMockAPI(options: { }) { const mockProjects = options.projects ?? new Map(); const mockWorkspaces = options.workspaces ?? []; + const mockStats: ChatStats = { + consumers: [], + totalTokens: 0, + model: "mock-model", + tokenizerName: "mock-tokenizer", + usageHistory: [], + }; const mockApi: IPCApi = { + tokenizer: { + countTokens: () => Promise.resolve(0), + countTokensBatch: (_model, texts) => Promise.resolve(texts.map(() => 0)), + calculateStats: () => Promise.resolve(mockStats), + }, providers: { setProviderConfig: () => Promise.resolve({ success: true, data: undefined }), list: () => Promise.resolve([]), diff --git a/src/browser/api.ts b/src/browser/api.ts index 6a77917ab..9f1cc2c8a 100644 --- a/src/browser/api.ts +++ b/src/browser/api.ts @@ -186,6 +186,13 @@ const wsManager = new WebSocketManager(); // Create the Web API implementation const webApi: IPCApi = { + tokenizer: { + countTokens: (model, text) => invokeIPC(IPC_CHANNELS.TOKENIZER_COUNT_TOKENS, model, text), + countTokensBatch: (model, texts) => + invokeIPC(IPC_CHANNELS.TOKENIZER_COUNT_TOKENS_BATCH, model, texts), + calculateStats: (messages, model) => + invokeIPC(IPC_CHANNELS.TOKENIZER_CALCULATE_STATS, messages, model), + }, providers: { setProviderConfig: (provider, keyPath, value) => invokeIPC(IPC_CHANNELS.PROVIDERS_SET_CONFIG, provider, keyPath, value), diff --git a/src/components/ChatInput.tsx b/src/components/ChatInput.tsx index f42675942..26f71e2e7 100644 --- a/src/components/ChatInput.tsx +++ b/src/components/ChatInput.tsx @@ -1,4 +1,13 @@ -import React, { useState, useRef, useCallback, useEffect, useId } from "react"; +import React, { + Suspense, + useState, + useRef, + useCallback, + useEffect, + useId, + useMemo, + useDeferredValue, +} from "react"; import { cn } from "@/lib/utils"; import { CommandSuggestions, COMMAND_SUGGESTION_KEYS } from "./CommandSuggestions"; import type { Toast } from "./ChatInputToast"; @@ -41,6 +50,37 @@ import type { ThinkingLevel } from "@/types/thinking"; import type { CmuxFrontendMetadata } from "@/types/message"; import { useTelemetry } from "@/hooks/useTelemetry"; import { setTelemetryEnabled } from "@/telemetry"; +import { getTokenCountPromise } from "@/utils/tokenizer/rendererClient"; + +type TokenCountReader = () => number; + +function createTokenCountResource(promise: Promise): TokenCountReader { + let status: "pending" | "success" | "error" = "pending"; + let value = 0; + let error: Error | null = null; + + const suspender = promise.then( + (resolved) => { + status = "success"; + value = resolved; + }, + (reason: unknown) => { + status = "error"; + error = reason instanceof Error ? reason : new Error(String(reason)); + } + ); + + return () => { + if (status === "pending") { + // eslint-disable-next-line @typescript-eslint/only-throw-error + throw suspender; + } + if (status === "error") { + throw error ?? new Error("Unknown tokenizer error"); + } + return value; + }; +} export interface ChatInputAPI { focus: () => void; @@ -103,6 +143,19 @@ export const ChatInput: React.FC = ({ const sendMessageOptions = useSendMessageOptions(workspaceId); // Extract model for convenience (don't create separate state - use hook as single source of truth) const preferredModel = sendMessageOptions.model; + const deferredModel = useDeferredValue(preferredModel); + const deferredInput = useDeferredValue(input); + const tokenCountPromise = useMemo(() => { + if (!deferredModel || deferredInput.trim().length === 0 || deferredInput.startsWith("/")) { + return Promise.resolve(0); + } + return getTokenCountPromise(deferredModel, deferredInput); + }, [deferredModel, deferredInput]); + const tokenCountReader = useMemo( + () => createTokenCountResource(tokenCountPromise), + [tokenCountPromise] + ); + const hasTypedText = input.trim().length > 0; // Setter for model - updates localStorage directly so useSendMessageOptions picks it up const setPreferredModel = useCallback( (model: string) => { @@ -807,6 +860,22 @@ export const ChatInput: React.FC = ({
+ {preferredModel && ( +
+ + Calculating tokens… +
+ } + > + + + + )}
= ({
); }; + +const TokenCountDisplay: React.FC<{ reader: TokenCountReader }> = ({ reader }) => { + const tokens = reader(); + if (!tokens) { + return null; + } + return ( +
+ {tokens.toLocaleString()} tokens +
+ ); +}; diff --git a/src/components/Messages/MarkdownComponents.tsx b/src/components/Messages/MarkdownComponents.tsx index 8b46c49d6..8d6b45818 100644 --- a/src/components/Messages/MarkdownComponents.tsx +++ b/src/components/Messages/MarkdownComponents.tsx @@ -77,11 +77,22 @@ const CodeBlock: React.FC = ({ code, language }) => { const highlighter = await getShikiHighlighter(); const shikiLang = mapToShikiLang(language); - // Load language on-demand + // Load language on-demand if not already loaded + // This is race-safe: concurrent loads of the same language are idempotent const loadedLangs = highlighter.getLoadedLanguages(); if (!loadedLangs.includes(shikiLang)) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-argument - await highlighter.loadLanguage(shikiLang as any); + try { + // TypeScript doesn't know shikiLang is valid, but we handle errors gracefully + // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-argument + await highlighter.loadLanguage(shikiLang as any); + } catch { + // Language not available in Shiki bundle - fall back to plain text + console.warn(`Language '${shikiLang}' not available in Shiki, using plain text`); + if (!cancelled) { + setHighlightedLines(null); + } + return; + } } const html = highlighter.codeToHtml(code, { diff --git a/src/constants/ipc-constants.ts b/src/constants/ipc-constants.ts index b5d35489f..dab77a1b6 100644 --- a/src/constants/ipc-constants.ts +++ b/src/constants/ipc-constants.ts @@ -45,6 +45,11 @@ export const IPC_CHANNELS = { UPDATE_STATUS: "update:status", UPDATE_STATUS_SUBSCRIBE: "update:status:subscribe", + // Tokenizer channels + TOKENIZER_CALCULATE_STATS: "tokenizer:calculateStats", + TOKENIZER_COUNT_TOKENS: "tokenizer:countTokens", + TOKENIZER_COUNT_TOKENS_BATCH: "tokenizer:countTokensBatch", + // Dynamic channel prefixes WORKSPACE_CHAT_PREFIX: "workspace:chat:", WORKSPACE_METADATA: "workspace:metadata", diff --git a/src/debug/costs.ts b/src/debug/costs.ts index 967c0852d..ce34d63e4 100644 --- a/src/debug/costs.ts +++ b/src/debug/costs.ts @@ -9,7 +9,7 @@ import { getDefaultModelFromLRU } from "@/hooks/useModelLRU"; * Debug command to display cost/token statistics for a workspace * Usage: bun debug costs */ -export function costsCommand(workspaceId: string) { +export async function costsCommand(workspaceId: string) { console.log(`\n=== Cost Statistics for workspace: ${workspaceId} ===\n`); // Load chat history @@ -38,7 +38,7 @@ export function costsCommand(workspaceId: string) { const model = firstAssistantMessage?.metadata?.model ?? getDefaultModelFromLRU(); // Calculate stats using shared logic (now synchronous) - const stats = calculateTokenStats(messages, model); + const stats = await calculateTokenStats(messages, model); // Display results console.log(`Model: ${stats.model}`); diff --git a/src/debug/index.ts b/src/debug/index.ts index 046c72673..1cd498604 100644 --- a/src/debug/index.ts +++ b/src/debug/index.ts @@ -32,7 +32,7 @@ switch (command) { process.exit(1); } console.profile("costs"); - costsCommand(workspaceId); + await costsCommand(workspaceId); console.profileEnd("costs"); break; } diff --git a/src/git.test.ts b/src/git.test.ts index b511215c0..86e18d165 100644 --- a/src/git.test.ts +++ b/src/git.test.ts @@ -21,6 +21,7 @@ describe("createWorktree", () => { await execAsync(`git init`, { cwd: tempGitRepo }); await execAsync(`git config user.email "test@example.com"`, { cwd: tempGitRepo }); await execAsync(`git config user.name "Test User"`, { cwd: tempGitRepo }); + await execAsync(`git config commit.gpgsign false`, { cwd: tempGitRepo }); await execAsync(`echo "test" > README.md`, { cwd: tempGitRepo }); await execAsync(`git add .`, { cwd: tempGitRepo }); await execAsync(`git commit -m "Initial commit"`, { cwd: tempGitRepo }); diff --git a/src/main-desktop.ts b/src/main-desktop.ts index 7209ddebb..6a0760b81 100644 --- a/src/main-desktop.ts +++ b/src/main-desktop.ts @@ -17,10 +17,11 @@ import * as path from "path"; import type { Config } from "./config"; import type { IpcMain } from "./services/ipcMain"; import { VERSION } from "./version"; -import type { loadTokenizerModules } from "./utils/main/tokenizer"; import { IPC_CHANNELS } from "./constants/ipc-constants"; import { log } from "./services/log"; import { parseDebugUpdater } from "./utils/env"; +import assert from "./utils/assert"; +import { loadTokenizerModules } from "./utils/main/tokenizer"; // React DevTools for development profiling // Using require() instead of import since it's dev-only and conditionally loaded @@ -66,7 +67,6 @@ if (!app.isPackaged) { // These will be loaded on-demand when createWindow() is called let config: Config | null = null; let ipcMain: IpcMain | null = null; -let loadTokenizerModulesFn: typeof loadTokenizerModules | null = null; // eslint-disable-next-line @typescript-eslint/consistent-type-imports let updaterService: typeof import("./services/updater").UpdaterService.prototype | null = null; const isE2ETest = process.env.CMUX_E2E === "1"; @@ -294,7 +294,7 @@ function closeSplashScreen() { * the splash still provides visual feedback that the app is loading. */ async function loadServices(): Promise { - if (config && ipcMain && loadTokenizerModulesFn) return; // Already loaded + if (config && ipcMain) return; // Already loaded const startTime = Date.now(); console.log(`[${timestamp()}] Loading services...`); @@ -307,18 +307,19 @@ async function loadServices(): Promise { const [ { Config: ConfigClass }, { IpcMain: IpcMainClass }, - { loadTokenizerModules: loadTokenizerFn }, { UpdaterService: UpdaterServiceClass }, ] = await Promise.all([ import("./config"), import("./services/ipcMain"), - import("./utils/main/tokenizer"), import("./services/updater"), ]); /* eslint-enable no-restricted-syntax */ config = new ConfigClass(); ipcMain = new IpcMainClass(config); - loadTokenizerModulesFn = loadTokenizerFn; + + loadTokenizerModules().catch((error) => { + console.error("Failed to preload tokenizer modules:", error); + }); // Initialize updater service in packaged builds or when DEBUG_UPDATER is set const debugConfig = parseDebugUpdater(process.env.DEBUG_UPDATER); @@ -342,9 +343,7 @@ async function loadServices(): Promise { } function createWindow() { - if (!ipcMain) { - throw new Error("Services must be loaded before creating window"); - } + assert(ipcMain, "Services must be loaded before creating window"); // Calculate window size based on screen dimensions (80% of available space) const primaryDisplay = screen.getPrimaryDisplay(); @@ -354,7 +353,6 @@ function createWindow() { const windowHeight = Math.max(800, Math.floor(screenHeight * 0.8)); console.log(`[${timestamp()}] [window] Creating BrowserWindow...`); - console.time("[window] BrowserWindow creation"); mainWindow = new BrowserWindow({ width: windowWidth, @@ -371,13 +369,9 @@ function createWindow() { show: false, // Don't show until ready-to-show event }); - console.timeEnd("[window] BrowserWindow creation"); - // Register IPC handlers with the main window console.log(`[${timestamp()}] [window] Registering IPC handlers...`); - console.time("[window] IPC registration"); ipcMain.register(electronIpcMain, mainWindow); - console.timeEnd("[window] IPC registration"); // Register updater IPC handlers (available in both dev and prod) electronIpcMain.handle(IPC_CHANNELS.UPDATE_CHECK, () => { diff --git a/src/preload.ts b/src/preload.ts index c767ced5f..7d1531786 100644 --- a/src/preload.ts +++ b/src/preload.ts @@ -26,6 +26,14 @@ import { IPC_CHANNELS, getChatChannel } from "./constants/ipc-constants"; // Build the API implementation using the shared interface const api: IPCApi = { + tokenizer: { + countTokens: (model, text) => + ipcRenderer.invoke(IPC_CHANNELS.TOKENIZER_COUNT_TOKENS, model, text), + countTokensBatch: (model, texts) => + ipcRenderer.invoke(IPC_CHANNELS.TOKENIZER_COUNT_TOKENS_BATCH, model, texts), + calculateStats: (messages, model) => + ipcRenderer.invoke(IPC_CHANNELS.TOKENIZER_CALCULATE_STATS, messages, model), + }, providers: { setProviderConfig: (provider, keyPath, value) => ipcRenderer.invoke(IPC_CHANNELS.PROVIDERS_SET_CONFIG, provider, keyPath, value), diff --git a/src/runtime/sshConnectionPool.test.ts b/src/runtime/sshConnectionPool.test.ts index 811715084..9b3536ce9 100644 --- a/src/runtime/sshConnectionPool.test.ts +++ b/src/runtime/sshConnectionPool.test.ts @@ -1,4 +1,5 @@ import * as os from "os"; +import * as path from "path"; import { getControlPath } from "./sshConnectionPool"; import type { SSHRuntimeConfig } from "./SSHRuntime"; @@ -127,6 +128,9 @@ describe("username isolation", () => { // The path should be deterministic for this user expect(controlPath).toBe(getControlPath(config)); - expect(controlPath).toMatch(/^\/tmp\/cmux-ssh-[a-f0-9]{12}$/); + + const expectedPrefix = path.join(os.tmpdir(), "cmux-ssh-"); + expect(controlPath.startsWith(expectedPrefix)).toBe(true); + expect(controlPath).toMatch(/cmux-ssh-[a-f0-9]{12}$/); }); }); diff --git a/src/services/agentSession.ts b/src/services/agentSession.ts index 15706a2b1..f4c0feb95 100644 --- a/src/services/agentSession.ts +++ b/src/services/agentSession.ts @@ -14,7 +14,6 @@ import { createUnknownSendMessageError } from "@/services/utils/sendMessageError import type { Result } from "@/types/result"; import { Ok, Err } from "@/types/result"; import { enforceThinkingPolicy } from "@/utils/thinking/policy"; -import { loadTokenizerForModel } from "@/utils/main/tokenizer"; import { createRuntime } from "@/runtime/runtimeFactory"; interface ImagePart { @@ -147,7 +146,7 @@ export class AgentSession { const partial = await this.partialService.readPartial(this.workspaceId); if (streamInfo) { - this.aiService.replayStream(this.workspaceId); + await this.aiService.replayStream(this.workspaceId); } else if (partial) { listener({ workspaceId: this.workspaceId, message: partial }); } @@ -352,19 +351,6 @@ export class AgentSession { modelString: string, options?: SendMessageOptions ): Promise> { - try { - assert( - typeof modelString === "string" && modelString.trim().length > 0, - "modelString must be a non-empty string" - ); - await loadTokenizerForModel(modelString); - } catch (error) { - const reason = error instanceof Error ? error.message : String(error); - return Err( - createUnknownSendMessageError(`Failed to preload tokenizer for ${modelString}: ${reason}`) - ); - } - const commitResult = await this.partialService.commitToHistory(this.workspaceId); if (!commitResult.success) { return Err(createUnknownSendMessageError(commitResult.error)); diff --git a/src/services/aiService.ts b/src/services/aiService.ts index dfb528b6e..e5cc098f8 100644 --- a/src/services/aiService.ts +++ b/src/services/aiService.ts @@ -540,8 +540,8 @@ export class AIService extends EventEmitter { ); // Count system message tokens for cost tracking - const tokenizer = getTokenizerForModel(modelString); - const systemMessageTokens = tokenizer.countTokens(systemMessage); + const tokenizer = await getTokenizerForModel(modelString); + const systemMessageTokens = await tokenizer.countTokens(systemMessage); // Load project secrets const projectSecrets = this.config.getProjectSecrets(metadata.projectPath); @@ -798,12 +798,12 @@ export class AIService extends EventEmitter { * Replay stream events * Emits the same events that would be emitted during live streaming */ - replayStream(workspaceId: string): void { + async replayStream(workspaceId: string): Promise { if (this.mockModeEnabled && this.mockScenarioPlayer) { - this.mockScenarioPlayer.replayStream(workspaceId); + await this.mockScenarioPlayer.replayStream(workspaceId); return; } - this.streamManager.replayStream(workspaceId); + await this.streamManager.replayStream(workspaceId); } async deleteWorkspace(workspaceId: string): Promise> { diff --git a/src/services/historyService.ts b/src/services/historyService.ts index 61a6b88d9..87c2f12bc 100644 --- a/src/services/historyService.ts +++ b/src/services/historyService.ts @@ -340,15 +340,15 @@ export class HistoryService { } // Get tokenizer for counting (use a default model) - const tokenizer = getTokenizerForModel("anthropic:claude-sonnet-4-5"); + const tokenizer = await getTokenizerForModel("anthropic:claude-sonnet-4-5"); // Count tokens for each message // We stringify the entire message for simplicity - only relative weights matter - const messageTokens: Array<{ message: CmuxMessage; tokens: number }> = messages.map( - (msg) => { - const tokens = tokenizer.countTokens(JSON.stringify(msg)); + const messageTokens: Array<{ message: CmuxMessage; tokens: number }> = await Promise.all( + messages.map(async (msg) => { + const tokens = await tokenizer.countTokens(JSON.stringify(msg)); return { message: msg, tokens }; - } + }) ); // Calculate total tokens and target to remove diff --git a/src/services/ipcMain.ts b/src/services/ipcMain.ts index 440bb7009..4c27fbf80 100644 --- a/src/services/ipcMain.ts +++ b/src/services/ipcMain.ts @@ -1,7 +1,6 @@ import assert from "@/utils/assert"; import type { BrowserWindow, IpcMain as ElectronIpcMain } from "electron"; import { spawn, spawnSync } from "child_process"; -import * as fs from "fs"; import * as fsPromises from "fs/promises"; import * as path from "path"; import type { Config, ProjectConfig } from "@/config"; @@ -12,6 +11,8 @@ import { PartialService } from "@/services/partialService"; import { AgentSession } from "@/services/agentSession"; import type { CmuxMessage } from "@/types/message"; import { log } from "@/services/log"; +import { countTokens, countTokensBatch } from "@/utils/main/tokenizer"; +import { calculateTokenStats } from "@/utils/tokens/tokenStatsCalculator"; import { IPC_CHANNELS, getChatChannel } from "@/constants/ipc-constants"; import type { SendMessageError } from "@/types/errors"; import type { SendMessageOptions, DeleteMessage } from "@/types/ipc"; @@ -22,7 +23,6 @@ import { createBashTool } from "@/services/tools/bash"; import type { BashToolResult } from "@/types/tools"; import { secretsToRecord } from "@/types/secrets"; import { DisposableTempDir } from "@/services/tempDir"; -import { BashExecutionService } from "@/services/bashExecutionService"; import { InitStateManager } from "@/services/initStateManager"; import { createRuntime } from "@/runtime/runtimeFactory"; import type { RuntimeConfig } from "@/types/runtime"; @@ -45,7 +45,6 @@ export class IpcMain { private readonly historyService: HistoryService; private readonly partialService: PartialService; private readonly aiService: AIService; - private readonly bashService: BashExecutionService; private readonly initStateManager: InitStateManager; private readonly sessions = new Map(); private readonly sessionSubscriptions = new Map< @@ -54,75 +53,6 @@ export class IpcMain { >(); private mainWindow: BrowserWindow | null = null; - // Run optional .cmux/init hook for a newly created workspace and stream its output - private async startWorkspaceInitHook(params: { - projectPath: string; - worktreePath: string; - workspaceId: string; - }): Promise { - const { projectPath, worktreePath, workspaceId } = params; - const hookPath = path.join(projectPath, ".cmux", "init"); - - // Check if hook exists and is executable - const exists = await fsPromises - .access(hookPath, fs.constants.X_OK) - .then(() => true) - .catch(() => false); - - if (!exists) { - log.debug(`No init hook found at ${hookPath}`); - return; // Nothing to do - } - - log.info(`Starting init hook for workspace ${workspaceId}: ${hookPath}`); - - // Start init hook tracking (creates in-memory state + emits init-start event) - // This MUST complete before we return so replayInit() finds state - this.initStateManager.startInit(workspaceId, hookPath); - - // Launch the hook process (don't await completion) - void (() => { - try { - const startTime = Date.now(); - - // Execute init hook through centralized bash service - // Quote path to handle spaces and special characters - this.bashService.executeStreaming( - `"${hookPath}"`, - { - cwd: worktreePath, - detached: false, // Don't need process group for simple script execution - }, - { - onStdout: (line) => { - this.initStateManager.appendOutput(workspaceId, line, false); - }, - onStderr: (line) => { - this.initStateManager.appendOutput(workspaceId, line, true); - }, - onExit: (exitCode) => { - const duration = Date.now() - startTime; - const status = exitCode === 0 ? "success" : "error"; - log.info( - `Init hook ${status} for workspace ${workspaceId} (exit code ${exitCode}, duration ${duration}ms)` - ); - // Finalize init state (automatically emits init-end event and persists to disk) - void this.initStateManager.endInit(workspaceId, exitCode); - }, - } - ); - } catch (error) { - log.error(`Failed to run init hook for workspace ${workspaceId}:`, error); - // Report error through init state manager - this.initStateManager.appendOutput( - workspaceId, - error instanceof Error ? error.message : String(error), - true - ); - void this.initStateManager.endInit(workspaceId, -1); - } - })(); - } private registered = false; constructor(config: Config) { @@ -136,7 +66,6 @@ export class IpcMain { this.partialService, this.initStateManager ); - this.bashService = new BashExecutionService(); } private getOrCreateSession(workspaceId: string): AgentSession { @@ -218,6 +147,7 @@ export class IpcMain { } this.registerWindowHandlers(ipcMain); + this.registerTokenizerHandlers(ipcMain); this.registerWorkspaceHandlers(ipcMain); this.registerProviderHandlers(ipcMain); this.registerProjectHandlers(ipcMain); @@ -232,6 +162,47 @@ export class IpcMain { }); } + private registerTokenizerHandlers(ipcMain: ElectronIpcMain): void { + ipcMain.handle( + IPC_CHANNELS.TOKENIZER_COUNT_TOKENS, + async (_event, model: string, input: string) => { + assert( + typeof model === "string" && model.length > 0, + "Tokenizer countTokens requires model name" + ); + assert(typeof input === "string", "Tokenizer countTokens requires text"); + return countTokens(model, input); + } + ); + + ipcMain.handle( + IPC_CHANNELS.TOKENIZER_COUNT_TOKENS_BATCH, + async (_event, model: string, texts: unknown[]) => { + assert( + typeof model === "string" && model.length > 0, + "Tokenizer countTokensBatch requires model name" + ); + assert(Array.isArray(texts), "Tokenizer countTokensBatch requires an array of strings"); + return countTokensBatch(model, texts as string[]); + } + ); + + ipcMain.handle( + IPC_CHANNELS.TOKENIZER_CALCULATE_STATS, + async (_event, messages: CmuxMessage[], model: string) => { + assert(Array.isArray(messages), "Tokenizer IPC requires an array of messages"); + assert(typeof model === "string" && model.length > 0, "Tokenizer IPC requires model name"); + + try { + return await calculateTokenStats(messages, model); + } catch (error) { + log.error("[IpcMain] Token stats calculation failed", error); + throw error; + } + } + ); + } + private registerWorkspaceHandlers(ipcMain: ElectronIpcMain): void { ipcMain.handle( IPC_CHANNELS.WORKSPACE_CREATE, @@ -692,7 +663,8 @@ export class IpcMain { } return result; } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); + const errorMessage = + error instanceof Error ? error.message : JSON.stringify(error, null, 2); log.error("Unexpected error in sendMessage handler:", error); const sendError: SendMessageError = { type: "unknown", diff --git a/src/services/mock/mockScenarioPlayer.ts b/src/services/mock/mockScenarioPlayer.ts index a4a854d48..f9481e6fa 100644 --- a/src/services/mock/mockScenarioPlayer.ts +++ b/src/services/mock/mockScenarioPlayer.ts @@ -1,3 +1,4 @@ +import assert from "@/utils/assert"; import type { CmuxMessage } from "@/types/message"; import { createCmuxMessage } from "@/types/message"; import type { HistoryService } from "@/services/historyService"; @@ -5,6 +6,7 @@ import type { Result } from "@/types/result"; import { Ok, Err } from "@/types/result"; import type { SendMessageError } from "@/types/errors"; import type { AIService } from "@/services/aiService"; +import { log } from "@/services/log"; import type { MockAssistantEvent, MockStreamErrorEvent, @@ -17,6 +19,91 @@ import type { ToolCallStartEvent, ToolCallEndEvent } from "@/types/stream"; import type { ReasoningDeltaEvent } from "@/types/stream"; import { getTokenizerForModel } from "@/utils/main/tokenizer"; +const MOCK_TOKENIZER_MODEL = "openai:gpt-5"; +const TOKENIZE_TIMEOUT_MS = 150; +let tokenizerFallbackLogged = false; + +function approximateTokenCount(text: string): number { + const normalizedLength = text.trim().length; + if (normalizedLength === 0) { + return 0; + } + return Math.max(1, Math.ceil(normalizedLength / 4)); +} + +async function tokenizeWithMockModel(text: string, context: string): Promise { + assert(typeof text === "string", `Mock scenario ${context} expects string input`); + const approximateTokens = approximateTokenCount(text); + let fallbackUsed = false; + let timeoutId: NodeJS.Timeout | undefined; + + const fallbackPromise = new Promise((resolve) => { + timeoutId = setTimeout(() => { + fallbackUsed = true; + resolve(approximateTokens); + }, TOKENIZE_TIMEOUT_MS); + }); + + const actualPromise = (async () => { + const tokenizer = await getTokenizerForModel(MOCK_TOKENIZER_MODEL); + assert( + typeof tokenizer.encoding === "string" && tokenizer.encoding.length > 0, + `Tokenizer for ${MOCK_TOKENIZER_MODEL} must expose a non-empty encoding` + ); + const tokens = await tokenizer.countTokens(text); + assert( + Number.isFinite(tokens) && tokens >= 0, + `Tokenizer for ${MOCK_TOKENIZER_MODEL} returned invalid token count` + ); + return tokens; + })(); + + let tokens: number; + try { + tokens = await Promise.race([actualPromise, fallbackPromise]); + } catch (error) { + if (timeoutId !== undefined) { + clearTimeout(timeoutId); + } + const errorMessage = error instanceof Error ? error.message : String(error); + throw new Error( + `[MockScenarioPlayer] Failed to tokenize ${context} with ${MOCK_TOKENIZER_MODEL}: ${errorMessage}` + ); + } + + if (!fallbackUsed && timeoutId !== undefined) { + clearTimeout(timeoutId); + } + + actualPromise + .then((resolvedTokens) => { + if (fallbackUsed && !tokenizerFallbackLogged) { + tokenizerFallbackLogged = true; + log.debug( + `[MockScenarioPlayer] Tokenizer fallback used for ${context}; emitted ${approximateTokens}, background tokenizer returned ${resolvedTokens}` + ); + } + }) + .catch((error) => { + if (fallbackUsed && !tokenizerFallbackLogged) { + tokenizerFallbackLogged = true; + const errorMessage = error instanceof Error ? error.message : String(error); + log.debug( + `[MockScenarioPlayer] Tokenizer fallback used for ${context}; background error: ${errorMessage}` + ); + } + }); + + if (fallbackUsed) { + assert( + Number.isFinite(tokens) && tokens >= 0, + `Token fallback produced invalid count for ${context}` + ); + } + + return tokens; +} + interface MockPlayerDeps { aiService: AIService; historyService: HistoryService; @@ -25,6 +112,8 @@ interface MockPlayerDeps { interface ActiveStream { timers: NodeJS.Timeout[]; messageId: string; + eventQueue: Array<() => Promise>; + isProcessing: boolean; } export class MockScenarioPlayer { @@ -120,7 +209,7 @@ export class MockScenarioPlayer { return Ok(undefined); } - replayStream(_workspaceId: string): void { + async replayStream(_workspaceId: string): Promise { // No-op for mock scenario; events are deterministic and do not support mid-stream replay } @@ -129,16 +218,48 @@ export class MockScenarioPlayer { this.activeStreams.set(workspaceId, { timers, messageId: turn.assistant.messageId, + eventQueue: [], + isProcessing: false, }); for (const event of turn.assistant.events) { const timer = setTimeout(() => { - void this.dispatchEvent(workspaceId, event, turn.assistant.messageId, historySequence); + this.enqueueEvent(workspaceId, () => + this.dispatchEvent(workspaceId, event, turn.assistant.messageId, historySequence) + ); }, event.delay); timers.push(timer); } } + private enqueueEvent(workspaceId: string, handler: () => Promise): void { + const active = this.activeStreams.get(workspaceId); + if (!active) return; + + active.eventQueue.push(handler); + void this.processQueue(workspaceId); + } + + private async processQueue(workspaceId: string): Promise { + const active = this.activeStreams.get(workspaceId); + if (!active || active.isProcessing) return; + + active.isProcessing = true; + + while (active.eventQueue.length > 0) { + const handler = active.eventQueue.shift(); + if (!handler) break; + + try { + await handler(); + } catch (error) { + console.error(`[MockScenarioPlayer] Event handler error for ${workspaceId}:`, error); + } + } + + active.isProcessing = false; + } + private async dispatchEvent( workspaceId: string, event: MockAssistantEvent, @@ -159,8 +280,7 @@ export class MockScenarioPlayer { } case "reasoning-delta": { // Mock scenarios use the same tokenization logic as real streams for consistency - const tokenizer = getTokenizerForModel("gpt-4"); // Mock uses GPT-4 tokenizer - const tokens = tokenizer.countTokens(event.text); + const tokens = await tokenizeWithMockModel(event.text, "reasoning-delta text"); const payload: ReasoningDeltaEvent = { type: "reasoning-delta", workspaceId, @@ -175,8 +295,7 @@ export class MockScenarioPlayer { case "tool-start": { // Mock scenarios use the same tokenization logic as real streams for consistency const inputText = JSON.stringify(event.args); - const tokenizer = getTokenizerForModel("gpt-4"); // Mock uses GPT-4 tokenizer - const tokens = tokenizer.countTokens(inputText); + const tokens = await tokenizeWithMockModel(inputText, "tool-call args"); const payload: ToolCallStartEvent = { type: "tool-call-start", workspaceId, @@ -204,8 +323,13 @@ export class MockScenarioPlayer { } case "stream-delta": { // Mock scenarios use the same tokenization logic as real streams for consistency - const tokenizer = getTokenizerForModel("gpt-4"); // Mock uses GPT-4 tokenizer - const tokens = tokenizer.countTokens(event.text); + let tokens: number; + try { + tokens = await tokenizeWithMockModel(event.text, "stream-delta text"); + } catch (error) { + console.error("[MockScenarioPlayer] tokenize failed for stream-delta", error); + throw error; + } const payload: StreamDeltaEvent = { type: "stream-delta", workspaceId, @@ -278,9 +402,15 @@ export class MockScenarioPlayer { private cleanup(workspaceId: string): void { const active = this.activeStreams.get(workspaceId); if (!active) return; + + // Clear all pending timers for (const timer of active.timers) { clearTimeout(timer); } + + // Clear event queue to prevent any pending events from processing + active.eventQueue = []; + this.activeStreams.delete(workspaceId); } diff --git a/src/services/mock/scenarios/basicChat.ts b/src/services/mock/scenarios/basicChat.ts index ce83946a4..3127b634e 100644 --- a/src/services/mock/scenarios/basicChat.ts +++ b/src/services/mock/scenarios/basicChat.ts @@ -12,7 +12,7 @@ const listProgrammingLanguagesTurn: ScenarioTurn = { assistant: { messageId: "msg-basic-1", events: [ - { kind: "stream-start", delay: 0, messageId: "msg-basic-1", model: "mock:planner" }, + { kind: "stream-start", delay: 0, messageId: "msg-basic-1", model: "openai:gpt-5" }, { kind: "stream-delta", delay: STREAM_BASE_DELAY, @@ -37,7 +37,7 @@ const listProgrammingLanguagesTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 5, metadata: { - model: "mock:planner", + model: "openai:gpt-5", inputTokens: 64, outputTokens: 48, systemMessageTokens: 12, diff --git a/src/services/mock/scenarios/permissionModes.ts b/src/services/mock/scenarios/permissionModes.ts index ce7df76ab..7dab116d0 100644 --- a/src/services/mock/scenarios/permissionModes.ts +++ b/src/services/mock/scenarios/permissionModes.ts @@ -19,7 +19,7 @@ const planRefactorTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-plan-refactor", - model: "mock:planner", + model: "openai:gpt-5", }, { kind: "stream-delta", @@ -45,7 +45,7 @@ const planRefactorTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 5, metadata: { - model: "mock:planner", + model: "openai:gpt-5", inputTokens: 180, outputTokens: 130, systemMessageTokens: 24, @@ -74,7 +74,7 @@ const executePlanTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-exec-refactor", - model: "mock:executor", + model: "openai:gpt-5", }, { kind: "tool-start", @@ -118,7 +118,7 @@ const executePlanTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 3, metadata: { - model: "mock:executor", + model: "openai:gpt-5", inputTokens: 220, outputTokens: 110, systemMessageTokens: 18, diff --git a/src/services/mock/scenarios/review.ts b/src/services/mock/scenarios/review.ts index f77f48aa7..0015c4f58 100644 --- a/src/services/mock/scenarios/review.ts +++ b/src/services/mock/scenarios/review.ts @@ -16,7 +16,7 @@ const summarizeBranchesTurn: ScenarioTurn = { assistant: { messageId: "msg-plan-1", events: [ - { kind: "stream-start", delay: 0, messageId: "msg-plan-1", model: "mock:planner" }, + { kind: "stream-start", delay: 0, messageId: "msg-plan-1", model: "openai:gpt-5" }, { kind: "reasoning-delta", delay: STREAM_BASE_DELAY, @@ -61,7 +61,7 @@ const summarizeBranchesTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 6, metadata: { - model: "mock:planner", + model: "openai:gpt-5", inputTokens: 128, outputTokens: 85, systemMessageTokens: 32, @@ -86,7 +86,7 @@ const openOnboardingDocTurn: ScenarioTurn = { assistant: { messageId: "msg-exec-1", events: [ - { kind: "stream-start", delay: 0, messageId: "msg-exec-1", model: "mock:executor" }, + { kind: "stream-start", delay: 0, messageId: "msg-exec-1", model: "openai:gpt-5" }, { kind: "tool-start", delay: STREAM_BASE_DELAY, @@ -114,7 +114,7 @@ const showOnboardingDocTurn: ScenarioTurn = { assistant: { messageId: "msg-exec-2", events: [ - { kind: "stream-start", delay: 0, messageId: "msg-exec-2", model: "mock:executor" }, + { kind: "stream-start", delay: 0, messageId: "msg-exec-2", model: "openai:gpt-5" }, { kind: "tool-start", delay: STREAM_BASE_DELAY, @@ -153,7 +153,7 @@ const showOnboardingDocTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 3, metadata: { - model: "mock:executor", + model: "openai:gpt-5", inputTokens: 96, outputTokens: 142, systemMessageTokens: 32, diff --git a/src/services/mock/scenarios/slashCommands.ts b/src/services/mock/scenarios/slashCommands.ts index 3dd6ca4ac..087ac2ebb 100644 --- a/src/services/mock/scenarios/slashCommands.ts +++ b/src/services/mock/scenarios/slashCommands.ts @@ -24,7 +24,7 @@ const compactConversationTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-slash-compact-1", - model: "anthropic:claude-sonnet-4-5", + model: "openai:gpt-5", }, { kind: "stream-delta", @@ -35,7 +35,7 @@ const compactConversationTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 2, metadata: { - model: "anthropic:claude-sonnet-4-5", + model: "openai:gpt-5", inputTokens: 220, outputTokens: 96, systemMessageTokens: 18, diff --git a/src/services/mock/scenarios/toolFlows.ts b/src/services/mock/scenarios/toolFlows.ts index 80fcc1cb2..01ca24ae7 100644 --- a/src/services/mock/scenarios/toolFlows.ts +++ b/src/services/mock/scenarios/toolFlows.ts @@ -19,7 +19,7 @@ const fileReadTurn: ScenarioTurn = { assistant: { messageId: "msg-tool-file-read", events: [ - { kind: "stream-start", delay: 0, messageId: "msg-tool-file-read", model: "mock:executor" }, + { kind: "stream-start", delay: 0, messageId: "msg-tool-file-read", model: "openai:gpt-5" }, { kind: "tool-start", delay: STREAM_BASE_DELAY, @@ -55,7 +55,7 @@ const fileReadTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 3, metadata: { - model: "mock:executor", + model: "openai:gpt-5", inputTokens: 92, outputTokens: 64, systemMessageTokens: 18, @@ -78,7 +78,7 @@ const listDirectoryTurn: ScenarioTurn = { assistant: { messageId: "msg-tool-bash-ls", events: [ - { kind: "stream-start", delay: 0, messageId: "msg-tool-bash-ls", model: "mock:executor" }, + { kind: "stream-start", delay: 0, messageId: "msg-tool-bash-ls", model: "openai:gpt-5" }, { kind: "tool-start", delay: STREAM_BASE_DELAY, @@ -110,19 +110,19 @@ const listDirectoryTurn: ScenarioTurn = { }, { kind: "stream-delta", - delay: STREAM_BASE_DELAY * 2 + 300, + delay: STREAM_BASE_DELAY * 3 + 50, text: "- package.json\n", }, { kind: "stream-delta", - delay: STREAM_BASE_DELAY * 2 + 400, + delay: STREAM_BASE_DELAY * 3 + 150, text: "- src", }, { kind: "stream-end", - delay: STREAM_BASE_DELAY * 3, + delay: STREAM_BASE_DELAY * 3 + 500, metadata: { - model: "mock:executor", + model: "openai:gpt-5", inputTokens: 74, outputTokens: 58, systemMessageTokens: 16, @@ -151,7 +151,7 @@ const createTestFileTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-tool-create-test-file", - model: "mock:executor", + model: "openai:gpt-5", }, { kind: "tool-start", @@ -181,7 +181,7 @@ const createTestFileTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 3, metadata: { - model: "mock:executor", + model: "openai:gpt-5", inputTokens: 80, outputTokens: 40, systemMessageTokens: 12, @@ -205,7 +205,7 @@ const readTestFileTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-tool-read-test-file", - model: "mock:executor", + model: "openai:gpt-5", }, { kind: "tool-start", @@ -242,7 +242,7 @@ const readTestFileTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 3, metadata: { - model: "mock:executor", + model: "openai:gpt-5", inputTokens: 76, outputTokens: 52, systemMessageTokens: 12, @@ -269,7 +269,7 @@ const recallTestFileTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-tool-recall-test-file", - model: "mock:planner", + model: "openai:gpt-5", }, { kind: "stream-delta", @@ -280,7 +280,7 @@ const recallTestFileTurn: ScenarioTurn = { kind: "stream-end", delay: STREAM_BASE_DELAY * 2, metadata: { - model: "mock:planner", + model: "openai:gpt-5", inputTokens: 60, outputTokens: 34, systemMessageTokens: 10, diff --git a/src/services/streamManager.test.ts b/src/services/streamManager.test.ts index 0ceb575a7..fd4b49e54 100644 --- a/src/services/streamManager.test.ts +++ b/src/services/streamManager.test.ts @@ -379,7 +379,7 @@ describe("StreamManager - Unavailable Tool Handling", () => { messageId: "test-message-1", token: "test-token", startTime: Date.now(), - model: "test-model", + model: "anthropic:claude-sonnet-4-5", historySequence: 1, parts: [], lastPartialWriteTime: 0, diff --git a/src/services/streamManager.ts b/src/services/streamManager.ts index 10a5fe23e..56668342d 100644 --- a/src/services/streamManager.ts +++ b/src/services/streamManager.ts @@ -272,11 +272,11 @@ export class StreamManager extends EventEmitter { * Usage is only available after stream completes naturally. * On abort, the usage promise may hang - we use a timeout to return quickly. */ - private emitToolCallDeltaIfPresent( + private async emitToolCallDeltaIfPresent( workspaceId: WorkspaceId, streamInfo: WorkspaceStreamInfo, part: unknown - ): boolean { + ): Promise { const maybeDelta = part as { type?: string } | undefined; if (maybeDelta?.type !== "tool-call-delta") { return false; @@ -293,7 +293,7 @@ export class StreamManager extends EventEmitter { return true; } - const tokens = this.tokenTracker.countTokens(deltaText); + const tokens = await this.tokenTracker.countTokens(deltaText); const timestamp = Date.now(); this.emit("tool-call-delta", { @@ -347,15 +347,15 @@ export class StreamManager extends EventEmitter { * @param messageId - Message identifier * @param part - The part to emit (text, reasoning, or tool) */ - private emitPartAsEvent( + private async emitPartAsEvent( workspaceId: WorkspaceId, messageId: string, part: CompletedMessagePart - ): void { + ): Promise { const timestamp = part.timestamp ?? Date.now(); if (part.type === "text") { - const tokens = this.tokenTracker.countTokens(part.text); + const tokens = await this.tokenTracker.countTokens(part.text); this.emit("stream-delta", { type: "stream-delta", workspaceId: workspaceId as string, @@ -365,7 +365,7 @@ export class StreamManager extends EventEmitter { timestamp, }); } else if (part.type === "reasoning") { - const tokens = this.tokenTracker.countTokens(part.text); + const tokens = await this.tokenTracker.countTokens(part.text); this.emit("reasoning-delta", { type: "reasoning-delta", workspaceId: workspaceId as string, @@ -376,7 +376,7 @@ export class StreamManager extends EventEmitter { }); } else if (part.type === "dynamic-tool") { const inputText = JSON.stringify(part.input); - const tokens = this.tokenTracker.countTokens(inputText); + const tokens = await this.tokenTracker.countTokens(inputText); this.emit("tool-call-start", { type: "tool-call-start", workspaceId: workspaceId as string, @@ -615,7 +615,7 @@ export class StreamManager extends EventEmitter { } as StreamStartEvent); // Initialize token tracker for this model - this.tokenTracker.setModel(streamInfo.model); + await this.tokenTracker.setModel(streamInfo.model); // Use fullStream to capture all events including tool calls const toolCalls = new Map< @@ -649,7 +649,7 @@ export class StreamManager extends EventEmitter { streamInfo.parts.push(textPart); // Emit using shared logic (ensures replay consistency) - this.emitPartAsEvent(workspaceId, streamInfo.messageId, textPart); + await this.emitPartAsEvent(workspaceId, streamInfo.messageId, textPart); // Schedule partial write (throttled, fire-and-forget to not block stream) void this.schedulePartialWrite(workspaceId, streamInfo); @@ -657,7 +657,7 @@ export class StreamManager extends EventEmitter { } default: { - if (this.emitToolCallDeltaIfPresent(workspaceId, streamInfo, part)) { + if (await this.emitToolCallDeltaIfPresent(workspaceId, streamInfo, part)) { break; } break; @@ -676,7 +676,7 @@ export class StreamManager extends EventEmitter { streamInfo.parts.push(reasoningPart); // Emit using shared logic (ensures replay consistency) - this.emitPartAsEvent(workspaceId, streamInfo.messageId, reasoningPart); + await this.emitPartAsEvent(workspaceId, streamInfo.messageId, reasoningPart); void this.schedulePartialWrite(workspaceId, streamInfo); break; @@ -721,7 +721,7 @@ export class StreamManager extends EventEmitter { log.debug( `[StreamManager] tool-call: toolName=${part.toolName}, input length=${inputText.length}` ); - this.emitPartAsEvent(workspaceId, streamInfo.messageId, toolPart); + await this.emitPartAsEvent(workspaceId, streamInfo.messageId, toolPart); break; } @@ -1271,7 +1271,7 @@ export class StreamManager extends EventEmitter { * Emits the same events (stream-start, stream-delta, etc.) that would be emitted during live streaming * This allows replay to flow through the same event path as live streaming (no duplication) */ - replayStream(workspaceId: string): void { + async replayStream(workspaceId: string): Promise { const typedWorkspaceId = workspaceId as WorkspaceId; const streamInfo = this.workspaceStreams.get(typedWorkspaceId); @@ -1284,7 +1284,7 @@ export class StreamManager extends EventEmitter { } // Initialize token tracker for this model (required for tokenization) - this.tokenTracker.setModel(streamInfo.model); + await this.tokenTracker.setModel(streamInfo.model); // Emit stream-start event this.emit("stream-start", { @@ -1298,7 +1298,7 @@ export class StreamManager extends EventEmitter { // Replay accumulated parts as events using shared emission logic // This guarantees replay produces identical events to the original stream for (const part of streamInfo.parts) { - this.emitPartAsEvent(typedWorkspaceId, streamInfo.messageId, part); + await this.emitPartAsEvent(typedWorkspaceId, streamInfo.messageId, part); } } diff --git a/src/stores/WorkspaceConsumerManager.ts b/src/stores/WorkspaceConsumerManager.ts index dcbb48063..2ec2c0160 100644 --- a/src/stores/WorkspaceConsumerManager.ts +++ b/src/stores/WorkspaceConsumerManager.ts @@ -1,10 +1,49 @@ -import assert from "@/utils/assert"; import type { WorkspaceConsumersState } from "./WorkspaceStore"; -import { TokenStatsWorker } from "@/utils/tokens/TokenStatsWorker"; import type { StreamingMessageAggregator } from "@/utils/messages/StreamingMessageAggregator"; +import type { ChatStats } from "@/types/chatStats"; +import type { CmuxMessage } from "@/types/message"; +import assert from "@/utils/assert"; + +const TOKENIZER_CANCELLED_MESSAGE = "Cancelled by newer request"; + +let globalTokenStatsRequestId = 0; +const latestRequestByWorkspace = new Map(); + +function getTokenizerApi() { + if (typeof window === "undefined") { + return null; + } + return window.api?.tokenizer ?? null; +} + +async function calculateTokenStatsLatest( + workspaceId: string, + messages: CmuxMessage[], + model: string +): Promise { + const tokenizer = getTokenizerApi(); + assert(tokenizer, "Tokenizer IPC bridge unavailable"); + + const requestId = ++globalTokenStatsRequestId; + latestRequestByWorkspace.set(workspaceId, requestId); + + try { + const stats = await tokenizer.calculateStats(messages, model); + const latestRequestId = latestRequestByWorkspace.get(workspaceId); + if (latestRequestId !== requestId) { + throw new Error(TOKENIZER_CANCELLED_MESSAGE); + } + return stats; + } catch (error) { + if (error instanceof Error) { + throw error; + } + throw new Error(String(error)); + } +} -// Timeout for Web Worker calculations (10 seconds - generous but responsive) -const CALCULATION_TIMEOUT_MS = 10_000; +// Timeout for Web Worker calculations (60 seconds - generous but responsive) +const CALCULATION_TIMEOUT_MS = 60_000; /** * Manages consumer token calculations for workspaces. @@ -28,9 +67,6 @@ const CALCULATION_TIMEOUT_MS = 10_000; * (components subscribe to workspace changes, delegates to manager for state) */ export class WorkspaceConsumerManager { - // Web Worker for tokenization (shared across workspaces) - private readonly tokenWorker: TokenStatsWorker; - // Track scheduled calculations (in debounce window, not yet executing) private scheduledCalcs = new Set(); @@ -53,20 +89,9 @@ export class WorkspaceConsumerManager { private pendingNotifications = new Set(); constructor(onCalculationComplete: (workspaceId: string) => void) { - this.tokenWorker = new TokenStatsWorker(); this.onCalculationComplete = onCalculationComplete; } - onTokenizerReady(listener: () => void): () => void { - assert(typeof listener === "function", "Tokenizer ready listener must be a function"); - return this.tokenWorker.onTokenizerReady(listener); - } - - onTokenizerEncodingLoaded(listener: (encodingName: string) => void): () => void { - assert(typeof listener === "function", "Tokenizer encoding listener must be a function"); - return this.tokenWorker.onEncodingLoaded(listener); - } - /** * Get cached state without side effects. * Returns null if no cache exists. @@ -165,13 +190,13 @@ export class WorkspaceConsumerManager { const messages = aggregator.getAllMessages(); const model = aggregator.getCurrentModel() ?? "unknown"; - // Calculate in Web Worker with timeout protection + // Calculate in piscina pool with timeout protection const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error("Calculation timeout")), CALCULATION_TIMEOUT_MS) ); const fullStats = await Promise.race([ - this.tokenWorker.calculate(messages, model), + calculateTokenStatsLatest(workspaceId, messages, model), timeoutPromise, ]); @@ -188,7 +213,7 @@ export class WorkspaceConsumerManager { } catch (error) { // Cancellations are expected during rapid events - don't cache, don't log // This allows lazy trigger to retry on next access - if (error instanceof Error && error.message === "Cancelled by newer request") { + if (error instanceof Error && error.message === TOKENIZER_CANCELLED_MESSAGE) { return; } @@ -263,9 +288,6 @@ export class WorkspaceConsumerManager { } this.debounceTimers.clear(); - // Terminate worker - this.tokenWorker.terminate(); - // Clear state this.cache.clear(); this.scheduledCalcs.clear(); diff --git a/src/stores/WorkspaceStore.ts b/src/stores/WorkspaceStore.ts index 6c899455e..4442f4fbc 100644 --- a/src/stores/WorkspaceStore.ts +++ b/src/stores/WorkspaceStore.ts @@ -44,18 +44,6 @@ export interface WorkspaceSidebarState { agentStatus: { emoji: string; message: string; url?: string } | undefined; } -/** - * Helper to extract sidebar state from aggregator. - */ -function extractSidebarState(aggregator: StreamingMessageAggregator): WorkspaceSidebarState { - return { - canInterrupt: aggregator.getActiveStreams().length > 0, - currentModel: aggregator.getCurrentModel() ?? null, - recencyTimestamp: aggregator.getRecencyTimestamp(), - agentStatus: aggregator.getAgentStatus(), - }; -} - /** * Derived state values stored in the derived MapStore. * Currently only recency timestamps for workspace sorting. @@ -105,7 +93,6 @@ export class WorkspaceStore { // Architecture: WorkspaceStore orchestrates (decides when), manager executes (performs calculations) // Dual-cache: consumersStore (MapStore) handles subscriptions, manager owns data cache private readonly consumerManager: WorkspaceConsumerManager; - private readonly cleanupTokenizerReady: () => void; // Supporting data structures private aggregators = new Map(); @@ -228,31 +215,6 @@ export class WorkspaceStore { this.consumersStore.bump(workspaceId); }); - const rescheduleConsumers = () => { - for (const [workspaceId, aggregator] of this.aggregators.entries()) { - assert( - workspaceId.length > 0, - "Workspace ID must be non-empty when rescheduling consumers" - ); - if (!this.caughtUp.get(workspaceId)) { - continue; - } - if (aggregator.getAllMessages().length === 0) { - continue; - } - this.consumerManager.scheduleCalculation(workspaceId, aggregator); - } - }; - - const cleanupReady = this.consumerManager.onTokenizerReady(rescheduleConsumers); - const cleanupEncoding = this.consumerManager.onTokenizerEncodingLoaded(() => { - rescheduleConsumers(); - }); - this.cleanupTokenizerReady = () => { - cleanupReady(); - cleanupEncoding(); - }; - // Note: We DON'T auto-check recency on every state bump. // Instead, checkAndBumpRecencyIfChanged() is called explicitly after // message completion events (not on deltas) to prevent App.tsx re-renders. @@ -289,30 +251,6 @@ export class WorkspaceStore { } } - /** - * Only bump workspace state if sidebar-relevant fields changed. - * Prevents unnecessary re-renders during stream deltas. - */ - private bumpIfSidebarChanged(workspaceId: string): void { - const aggregator = this.aggregators.get(workspaceId); - if (!aggregator) return; - - const current = extractSidebarState(aggregator); - const previous = this.previousSidebarValues.get(workspaceId); - - // First time or any relevant field changed - if ( - !previous || - previous.canInterrupt !== current.canInterrupt || - previous.currentModel !== current.currentModel || - previous.recencyTimestamp !== current.recencyTimestamp || - previous.agentStatus !== current.agentStatus - ) { - this.previousSidebarValues.set(workspaceId, current); - this.states.bump(workspaceId); - } - } - /** * Subscribe to store changes (any workspace). * Delegates to MapStore's subscribeAny. @@ -854,7 +792,6 @@ export class WorkspaceStore { dispose(): void { // Clean up consumer manager this.consumerManager.dispose(); - this.cleanupTokenizerReady(); for (const unsubscribe of this.ipcUnsubscribers.values()) { unsubscribe(); diff --git a/src/styles/globals.css b/src/styles/globals.css index fe9226437..c90651b0b 100644 --- a/src/styles/globals.css +++ b/src/styles/globals.css @@ -6,49 +6,49 @@ --color-plan-mode: hsl(210 70% 40%); --color-plan-mode-hover: hsl(210 70% 52%); --color-plan-mode-light: hsl(210 70% 68%); - + --color-exec-mode: hsl(268.56 94.04% 55.19%); --color-exec-mode-hover: hsl(268.56 94.04% 67%); --color-exec-mode-light: hsl(268.56 94.04% 78%); - + --color-edit-mode: hsl(120 50% 35%); --color-edit-mode-hover: hsl(120 50% 47%); --color-edit-mode-light: hsl(120 50% 62%); - + --color-read: hsl(210 70% 40%); --color-editing-mode: hsl(30 100% 50%); --color-pending: hsl(30 100% 70%); - + --color-debug-mode: hsl(214 100% 64%); --color-debug-light: hsl(214 100% 76%); --color-debug-text: hsl(214 100% 80%); - + --color-thinking-mode: hsl(271 76% 53%); --color-thinking-mode-light: hsl(271 76% 65%); --color-thinking-border: hsl(271 76% 53%); - + /* Background & Layout */ --color-background: hsl(0 0% 12%); --color-background-secondary: hsl(60 1% 15%); --color-border: hsl(240 2% 25%); --color-foreground: hsl(0 0% 83%); --color-secondary: hsl(0 0% 42%); - + /* Code */ --color-code-bg: hsl(0 6.43% 8.04%); - + /* Buttons */ --color-button-bg: hsl(0 0% 24%); --color-button-text: hsl(0 0% 80%); --color-button-hover: hsl(0 0% 29%); - + /* Messages */ --color-user-border: hsl(0 0% 38%); --color-user-border-hover: hsl(0 0% 44%); --color-assistant-border: hsl(207 45% 40%); --color-assistant-border-hover: hsl(207 45% 50%); --color-message-header: hsl(0 0% 80%); - + /* Tokens */ --color-token-prompt: hsl(0 0% 40%); --color-token-completion: hsl(207 100% 40%); @@ -57,7 +57,7 @@ --color-token-input: hsl(120 40% 35%); --color-token-output: hsl(207 100% 40%); --color-token-cached: hsl(0 0% 50%); - + /* Toggle */ --color-toggle-bg: hsl(0 0% 16.5%); --color-toggle-active: hsl(0 0% 22.7%); @@ -65,112 +65,112 @@ --color-toggle-text: hsl(0 0% 53.3%); --color-toggle-text-active: hsl(0 0% 100%); --color-toggle-text-hover: hsl(0 0% 66.7%); - + /* Status */ --color-interrupted: hsl(38 92% 50%); --color-review-accent: hsl(48 70% 50%); --color-git-dirty: hsl(38 92% 50%); --color-error: hsl(0 70% 50%); --color-error-bg: hsl(0 32% 18%); - + /* Input */ --color-input-bg: hsl(0 0% 12%); --color-input-text: hsl(0 0% 80%); --color-input-border: hsl(207 51% 59%); --color-input-border-focus: hsl(193 91% 64%); - + /* Scrollbar */ --color-scrollbar-track: hsl(0 0% 18%); --color-scrollbar-thumb: hsl(0 0% 32%); --color-scrollbar-thumb-hover: hsl(0 0% 42%); - + /* Additional Semantic Colors */ - --color-muted: hsl(0 0% 53%); /* #888 - muted text */ - --color-muted-light: hsl(0 0% 50%); /* #808080 - muted light */ - --color-muted-dark: hsl(0 0% 43%); /* #6e6e6e - muted darker */ - --color-placeholder: hsl(0 0% 42%); /* #6b6b6b - placeholder */ - --color-subtle: hsl(0 0% 60%); /* #999 - subtle */ - --color-dim: hsl(0 0% 42%); /* #666 - dimmed */ - --color-light: hsl(0 0% 83%); /* #d4d4d4 - light */ - --color-lighter: hsl(0 0% 90%); /* #e5e5e5 - lighter */ - --color-bright: hsl(0 0% 80%); /* #cccccc - bright */ - --color-subdued: hsl(0 0% 60%); /* #9a9a9a - subdued */ - --color-label: hsl(0 0% 67%); /* #aaa - label */ - --color-gray: hsl(0 0% 48%); /* #7a7a7a - gray */ - --color-medium: hsl(0 0% 59%); /* #969696 - medium */ - - --color-border-light: hsl(240 3% 25%); /* #3e3e42 - lighter borders */ - --color-border-medium: hsl(0 0% 27%); /* #444 - medium borders */ - --color-border-darker: hsl(0 0% 33%); /* #555 - darker borders */ - --color-border-subtle: hsl(0 0% 40%); /* #666 - subtle border */ - --color-border-gray: hsl(240 1% 31%); /* #4e4e52 - gray border */ - - --color-dark: hsl(0 0% 11.5%); /* #1e1e1e - dark backgrounds */ - --color-darker: hsl(0 0% 8.6%); /* #161616 - darker backgrounds */ - --color-hover: hsl(0 0% 16.5%); /* #2a2a2b - hover states */ - --color-bg-medium: hsl(0 0% 27%); /* #454545 - medium bg */ - --color-bg-light: hsl(0 0% 30%); /* #4c4c4c - light bg */ - --color-bg-subtle: hsl(240 3% 22%); /* #37373d - subtle bg */ - - --color-separator: hsl(0 0% 15%); /* #252526 - separators */ - --color-separator-light: hsl(0 0% 27%); /* #464647 - lighter separator */ - --color-modal-bg: hsl(0 0% 18%); /* #2d2d30 - modal backgrounds */ - - --color-accent: hsl(207 100% 40%); /* #007acc - VS Code blue */ - --color-accent-hover: hsl(207 100% 45%); /* #1177bb - accent hover */ - --color-accent-dark: hsl(207 100% 37%); /* #0e639c - darker accent */ - --color-accent-darker: hsl(202 100% 23%); /* #094771 - even darker accent */ - --color-accent-light: hsl(198 100% 65%); /* #4db8ff - lighter accent */ - - --color-success: hsl(122 39% 49%); /* #4caf50 - success green */ - --color-success-light: hsl(123 46% 64%); /* #4ade80 - lighter success */ - - --color-danger: hsl(4 90% 58%); /* #f44336 - error red */ - --color-danger-light: hsl(0 91% 71%); /* #ff5555 - lighter error red */ - --color-danger-soft: hsl(6 93% 71%); /* #f48771 - softer error */ - - --color-warning: hsl(45 100% 51%); /* #ffc107 - warning yellow */ - --color-warning-light: hsl(0 91% 71%); /* #f87171 - lighter warning/error */ - + --color-muted: hsl(0 0% 53%); /* #888 - muted text */ + --color-muted-light: hsl(0 0% 50%); /* #808080 - muted light */ + --color-muted-dark: hsl(0 0% 43%); /* #6e6e6e - muted darker */ + --color-placeholder: hsl(0 0% 42%); /* #6b6b6b - placeholder */ + --color-subtle: hsl(0 0% 60%); /* #999 - subtle */ + --color-dim: hsl(0 0% 42%); /* #666 - dimmed */ + --color-light: hsl(0 0% 83%); /* #d4d4d4 - light */ + --color-lighter: hsl(0 0% 90%); /* #e5e5e5 - lighter */ + --color-bright: hsl(0 0% 80%); /* #cccccc - bright */ + --color-subdued: hsl(0 0% 60%); /* #9a9a9a - subdued */ + --color-label: hsl(0 0% 67%); /* #aaa - label */ + --color-gray: hsl(0 0% 48%); /* #7a7a7a - gray */ + --color-medium: hsl(0 0% 59%); /* #969696 - medium */ + + --color-border-light: hsl(240 3% 25%); /* #3e3e42 - lighter borders */ + --color-border-medium: hsl(0 0% 27%); /* #444 - medium borders */ + --color-border-darker: hsl(0 0% 33%); /* #555 - darker borders */ + --color-border-subtle: hsl(0 0% 40%); /* #666 - subtle border */ + --color-border-gray: hsl(240 1% 31%); /* #4e4e52 - gray border */ + + --color-dark: hsl(0 0% 11.5%); /* #1e1e1e - dark backgrounds */ + --color-darker: hsl(0 0% 8.6%); /* #161616 - darker backgrounds */ + --color-hover: hsl(0 0% 16.5%); /* #2a2a2b - hover states */ + --color-bg-medium: hsl(0 0% 27%); /* #454545 - medium bg */ + --color-bg-light: hsl(0 0% 30%); /* #4c4c4c - light bg */ + --color-bg-subtle: hsl(240 3% 22%); /* #37373d - subtle bg */ + + --color-separator: hsl(0 0% 15%); /* #252526 - separators */ + --color-separator-light: hsl(0 0% 27%); /* #464647 - lighter separator */ + --color-modal-bg: hsl(0 0% 18%); /* #2d2d30 - modal backgrounds */ + + --color-accent: hsl(207 100% 40%); /* #007acc - VS Code blue */ + --color-accent-hover: hsl(207 100% 45%); /* #1177bb - accent hover */ + --color-accent-dark: hsl(207 100% 37%); /* #0e639c - darker accent */ + --color-accent-darker: hsl(202 100% 23%); /* #094771 - even darker accent */ + --color-accent-light: hsl(198 100% 65%); /* #4db8ff - lighter accent */ + + --color-success: hsl(122 39% 49%); /* #4caf50 - success green */ + --color-success-light: hsl(123 46% 64%); /* #4ade80 - lighter success */ + + --color-danger: hsl(4 90% 58%); /* #f44336 - error red */ + --color-danger-light: hsl(0 91% 71%); /* #ff5555 - lighter error red */ + --color-danger-soft: hsl(6 93% 71%); /* #f48771 - softer error */ + + --color-warning: hsl(45 100% 51%); /* #ffc107 - warning yellow */ + --color-warning-light: hsl(0 91% 71%); /* #f87171 - lighter warning/error */ + /* Code syntax highlighting */ - --color-code-type: hsl(197 71% 73%); /* #9cdcfe - type annotations */ - --color-code-keyword: hsl(210 59% 63%); /* #6496ff - keywords */ - + --color-code-type: hsl(197 71% 73%); /* #9cdcfe - type annotations */ + --color-code-keyword: hsl(210 59% 63%); /* #6496ff - keywords */ + /* Toast and notification backgrounds */ - --color-toast-success-bg: hsl(207 100% 37% / 0.13); /* #0e639c with 20% opacity */ - --color-toast-success-text: hsl(207 100% 60%); /* #3794ff */ - --color-toast-error-bg: hsl(5 89% 60% / 0.15); /* #f14836 with 15% opacity */ - --color-toast-error-text: hsl(5 89% 60%); /* #f14836 */ - --color-toast-error-border: hsl(5 89% 60%); /* #f14836 */ - --color-toast-fatal-bg: hsl(0 33% 18%); /* #2d1f1f - fatal error bg */ - --color-toast-fatal-border: hsl(0 36% 26%); /* #5a2c2c - fatal error border */ - + --color-toast-success-bg: hsl(207 100% 37% / 0.13); /* #0e639c with 20% opacity */ + --color-toast-success-text: hsl(207 100% 60%); /* #3794ff */ + --color-toast-error-bg: hsl(5 89% 60% / 0.15); /* #f14836 with 15% opacity */ + --color-toast-error-text: hsl(5 89% 60%); /* #f14836 */ + --color-toast-error-border: hsl(5 89% 60%); /* #f14836 */ + --color-toast-fatal-bg: hsl(0 33% 18%); /* #2d1f1f - fatal error bg */ + --color-toast-fatal-border: hsl(0 36% 26%); /* #5a2c2c - fatal error border */ + /* Semi-transparent overlays */ - --color-danger-overlay: hsl(4 90% 58% / 0.1); /* danger with 10% opacity */ - --color-warning-overlay: hsl(45 100% 51% / 0.1); /* warning with 10% opacity */ - --color-gray-overlay: hsl(0 0% 39% / 0.05); /* gray with 5% opacity */ - --color-white-overlay-light: hsl(0 0% 100% / 0.05); /* white with 5% opacity */ - --color-white-overlay: hsl(0 0% 100% / 0.1); /* white with 10% opacity */ - --color-selection: hsl(204 100% 60% / 0.5); /* selection blue with 50% opacity */ - --color-vim-status: hsl(0 0% 83% / 0.6); /* status text with 60% opacity */ + --color-danger-overlay: hsl(4 90% 58% / 0.1); /* danger with 10% opacity */ + --color-warning-overlay: hsl(45 100% 51% / 0.1); /* warning with 10% opacity */ + --color-gray-overlay: hsl(0 0% 39% / 0.05); /* gray with 5% opacity */ + --color-white-overlay-light: hsl(0 0% 100% / 0.05); /* white with 5% opacity */ + --color-white-overlay: hsl(0 0% 100% / 0.1); /* white with 10% opacity */ + --color-selection: hsl(204 100% 60% / 0.5); /* selection blue with 50% opacity */ + --color-vim-status: hsl(0 0% 83% / 0.6); /* status text with 60% opacity */ --color-code-keyword-overlay-light: hsl(210 100% 70% / 0.05); /* code keyword with 5% opacity */ --color-code-keyword-overlay: hsl(210 100% 70% / 0.2); /* code keyword with 20% opacity */ - + /* Info/status colors */ - --color-info-light: hsl(5 100% 75%); /* #ff9980 - light info/error */ - --color-info-yellow: hsl(38 100% 64%); /* #ffb347 - yellow info */ - + --color-info-light: hsl(5 100% 75%); /* #ff9980 - light info/error */ + --color-info-yellow: hsl(38 100% 64%); /* #ffb347 - yellow info */ + /* Review/diff backgrounds */ - --color-review-bg-blue: hsl(201 31% 22%); /* #2a3a4a - review background */ - --color-review-bg-info: hsl(202 33% 24%); /* #2a4050 - info background */ + --color-review-bg-blue: hsl(201 31% 22%); /* #2a3a4a - review background */ + --color-review-bg-info: hsl(202 33% 24%); /* #2a4050 - info background */ --color-review-bg-warning: hsl(40 100% 12%); /* #3e2a00 - warning bg dark */ - --color-review-warning: hsl(38 100% 25%); /* #806000 - warning dark */ + --color-review-warning: hsl(38 100% 25%); /* #806000 - warning dark */ --color-review-warning-medium: hsl(38 100% 32%); /* #a07000 - warning medium */ - --color-review-warning-light: hsl(40 100% 20%); /* #4a3200 - warning light bg */ - + --color-review-warning-light: hsl(40 100% 20%); /* #4a3200 - warning light bg */ + /* Error backgrounds */ - --color-error-bg-dark: hsl(0 33% 13%); /* #3c1f1f - dark error bg */ - + --color-error-bg-dark: hsl(0 33% 13%); /* #3c1f1f - dark error bg */ + /* Radius */ --radius: 0.5rem; } @@ -178,7 +178,7 @@ :root { /* Legacy RGB for special uses */ --plan-mode-rgb: 31, 107, 184; - + /* Legacy CSS var format - keep for gradual migration */ --color-plan-mode: hsl(210 70% 40%); --color-plan-mode-hover: hsl(210 70% 52%); @@ -207,13 +207,13 @@ /* Font Variables */ /* Primary UI Font - System fonts for best native appearance */ --font-primary: - system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Helvetica Neue", - Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; - + system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Helvetica Neue", Arial, + sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + /* Monospace Font - Code and technical content */ --font-monospace: - "Monaco", "Menlo", "Ubuntu Mono", "Consolas", "Courier New", monospace, - "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + "Monaco", "Menlo", "Ubuntu Mono", "Consolas", "Courier New", monospace, "Apple Color Emoji", + "Segoe UI Emoji", "Segoe UI Symbol"; } body { @@ -468,8 +468,6 @@ code { font-size: 13px; } - - /* Remove default margins on first and last elements */ .markdown-content > :first-child { margin-top: 0; @@ -709,7 +707,10 @@ span.search-highlight { border-radius: 4px; color: rgba(255, 255, 255, 0.6); cursor: pointer; - transition: color 0.2s, background 0.2s, border-color 0.2s; + transition: + color 0.2s, + background 0.2s, + border-color 0.2s; font-size: 12px; display: flex; align-items: center; @@ -830,6 +831,17 @@ pre code { } } +@keyframes toastSlideOut { + from { + transform: translateX(0); + opacity: 1; + } + to { + transform: translateX(20px); + opacity: 0; + } +} + @keyframes ellipsis { 0% { content: ""; @@ -882,4 +894,3 @@ input[type="checkbox"] { .text-dim { color: var(--color-dim); } - diff --git a/src/types/ipc.ts b/src/types/ipc.ts index d0c8c485c..4a0a46c77 100644 --- a/src/types/ipc.ts +++ b/src/types/ipc.ts @@ -1,6 +1,7 @@ import type { Result } from "./result"; import type { FrontendWorkspaceMetadata, WorkspaceMetadata } from "./workspace"; import type { CmuxMessage, CmuxFrontendMetadata } from "./message"; +import type { ChatStats } from "./chatStats"; import type { ProjectConfig } from "@/config"; import type { SendMessageError, StreamErrorType } from "./errors"; import type { ThinkingLevel } from "./thinking"; @@ -200,6 +201,11 @@ export interface SendMessageOptions { // Minimize the number of methods - use optional parameters for operation variants // (e.g. remove(id, force?) not remove(id) + removeForce(id)). export interface IPCApi { + tokenizer: { + countTokens(model: string, text: string): Promise; + countTokensBatch(model: string, texts: string[]): Promise; + calculateStats(messages: CmuxMessage[], model: string): Promise; + }; providers: { setProviderConfig( provider: string, diff --git a/src/utils/hasher.ts b/src/utils/hasher.ts new file mode 100644 index 000000000..bc81b1b39 --- /dev/null +++ b/src/utils/hasher.ts @@ -0,0 +1,13 @@ +import crypto from "crypto"; + +export function uniqueSuffix(labels: crypto.BinaryLike[]): string { + const hash = crypto.createHash("sha256"); + + for (const label of labels) { + hash.update(label); + } + + const uniqueSuffix = hash.digest("hex").substring(0, 8); + + return uniqueSuffix; +} diff --git a/src/utils/main/StreamingTokenTracker.test.ts b/src/utils/main/StreamingTokenTracker.test.ts index 9e115c1fa..584e2e2e1 100644 --- a/src/utils/main/StreamingTokenTracker.test.ts +++ b/src/utils/main/StreamingTokenTracker.test.ts @@ -1,6 +1,8 @@ -import { describe, test, expect, beforeEach } from "bun:test"; +import { beforeEach, describe, expect, jest, test } from "@jest/globals"; import { StreamingTokenTracker } from "./StreamingTokenTracker"; +jest.setTimeout(20000); + describe("StreamingTokenTracker", () => { let tracker: StreamingTokenTracker; @@ -9,47 +11,47 @@ describe("StreamingTokenTracker", () => { }); describe("countTokens", () => { - test("returns 0 for empty string", () => { - tracker.setModel("anthropic:claude-sonnet-4-5"); - expect(tracker.countTokens("")).toBe(0); + test("returns 0 for empty string", async () => { + await tracker.setModel("anthropic:claude-sonnet-4-5"); + expect(await tracker.countTokens("")).toBe(0); }); - test("counts tokens in simple text", () => { - tracker.setModel("anthropic:claude-sonnet-4-5"); - const count = tracker.countTokens("Hello world"); + test("counts tokens in simple text", async () => { + await tracker.setModel("anthropic:claude-sonnet-4-5"); + const count = await tracker.countTokens("Hello world"); expect(count).toBeGreaterThan(0); expect(count).toBeLessThan(10); // Reasonable upper bound }); - test("counts tokens in longer text", () => { - tracker.setModel("anthropic:claude-sonnet-4-5"); + test("counts tokens in longer text", async () => { + await tracker.setModel("anthropic:claude-sonnet-4-5"); const text = "This is a longer piece of text with more tokens"; - const count = tracker.countTokens(text); + const count = await tracker.countTokens(text); expect(count).toBeGreaterThan(5); }); - test("handles special characters", () => { - tracker.setModel("anthropic:claude-sonnet-4-5"); - const count = tracker.countTokens("🚀 emoji test"); + test("handles special characters", async () => { + await tracker.setModel("anthropic:claude-sonnet-4-5"); + const count = await tracker.countTokens("🚀 emoji test"); expect(count).toBeGreaterThan(0); }); - test("is consistent for repeated calls", () => { - tracker.setModel("anthropic:claude-sonnet-4-5"); + test("is consistent for repeated calls", async () => { + await tracker.setModel("anthropic:claude-sonnet-4-5"); const text = "Test consistency"; - const count1 = tracker.countTokens(text); - const count2 = tracker.countTokens(text); + const count1 = await tracker.countTokens(text); + const count2 = await tracker.countTokens(text); expect(count1).toBe(count2); }); }); describe("setModel", () => { - test("switches tokenizer for different models", () => { - tracker.setModel("anthropic:claude-sonnet-4-5"); - const initial = tracker.countTokens("test"); + test("switches tokenizer for different models", async () => { + await tracker.setModel("anthropic:claude-sonnet-4-5"); + const initial = await tracker.countTokens("test"); - tracker.setModel("openai:gpt-4"); - const switched = tracker.countTokens("test"); + await tracker.setModel("openai:gpt-4"); + const switched = await tracker.countTokens("test"); expect(initial).toBeGreaterThan(0); expect(switched).toBeGreaterThan(0); diff --git a/src/utils/main/StreamingTokenTracker.ts b/src/utils/main/StreamingTokenTracker.ts index bcbd6451f..0b2ded00c 100644 --- a/src/utils/main/StreamingTokenTracker.ts +++ b/src/utils/main/StreamingTokenTracker.ts @@ -17,15 +17,15 @@ export class StreamingTokenTracker { * Initialize tokenizer for the current model * Should be called when model changes or on first stream */ - setModel(model: string): void { - this.tokenizer ??= getTokenizerForModel(model); + async setModel(model: string): Promise { + this.tokenizer ??= await getTokenizerForModel(model); } /** * Count tokens in a text string synchronously * Performance: <1ms per delta with LRU caching */ - countTokens(text: string): number { + async countTokens(text: string): Promise { if (!this.tokenizer || !text) return 0; return this.tokenizer.countTokens(text); } diff --git a/src/utils/main/tokenizer.test.ts b/src/utils/main/tokenizer.test.ts index c93605f1e..0b8512fdd 100644 --- a/src/utils/main/tokenizer.test.ts +++ b/src/utils/main/tokenizer.test.ts @@ -1,78 +1,46 @@ -import { beforeEach, describe, expect, test } from "bun:test"; +import { beforeAll, beforeEach, describe, expect, jest, test } from "@jest/globals"; import { __resetTokenizerForTests, + countTokens, + countTokensBatch, getTokenizerForModel, - loadTokenizerForModel, loadTokenizerModules, - onTokenizerEncodingLoaded, } from "./tokenizer"; +jest.setTimeout(20000); + +const model = "openai:gpt-5"; +beforeAll(async () => { + // warm up the worker_thread and tokenizer before running tests + await expect(loadTokenizerModules([model])).resolves.toHaveLength(1); +}); + beforeEach(() => { __resetTokenizerForTests(); }); -describe("tokenizer caching", () => { - test("does not cache fallback approximations", async () => { - await loadTokenizerModules(); - - const model = "openai:gpt-4-turbo"; - const tokenizer = getTokenizerForModel(model); - const text = "cmux-fallback-check-" + "a".repeat(40); - - const fallbackCount = tokenizer.countTokens(text); - const approximation = Math.ceil(text.length / 4); - expect(fallbackCount).toBe(approximation); - - await loadTokenizerForModel(model); - - const accurateCount = tokenizer.countTokens(text); - - expect(accurateCount).not.toBe(fallbackCount); - expect(accurateCount).toBeGreaterThan(0); +describe("tokenizer", () => { + test("loadTokenizerModules warms known encodings", async () => { + const tokenizer = await getTokenizerForModel(model); + expect(typeof tokenizer.encoding).toBe("string"); + expect(tokenizer.encoding.length).toBeGreaterThan(0); }); - test("replays loaded encodings for late listeners", async () => { - const model = "openai:gpt-4o"; - await loadTokenizerForModel(model); - - const received: string[] = []; - const unsubscribe = onTokenizerEncodingLoaded((encodingName) => { - received.push(encodingName); - }); - unsubscribe(); - - expect(received.length).toBeGreaterThan(0); - expect(received).toContain("o200k_base"); + test("countTokens returns stable values", async () => { + const text = "cmux-tokenizer-smoke-test"; + const first = await countTokens(model, text); + const second = await countTokens(model, text); + expect(first).toBeGreaterThan(0); + expect(second).toBe(first); }); - test("accurate counts replace fallback approximations", async () => { - const model = "openai:gpt-4-turbo"; - const tokenizer = getTokenizerForModel(model); - const text = "cmux-accuracy-check-" + "b".repeat(80); - - let unsubscribe: () => void = () => undefined; - const encodingReady = new Promise((resolve) => { - unsubscribe = onTokenizerEncodingLoaded((encodingName) => { - if (encodingName === "cl100k_base") { - unsubscribe(); - resolve(); - } - }); - }); - - const fallbackCount = tokenizer.countTokens(text); - const approximation = Math.ceil(text.length / 4); - expect(fallbackCount).toBe(approximation); - - await encodingReady; - await Promise.resolve(); - - const accurateCount = tokenizer.countTokens(text); - expect(accurateCount).not.toBe(fallbackCount); - expect(accurateCount).toBeGreaterThan(0); + test("countTokensBatch matches individual calls", async () => { + const texts = ["alpha", "beta", "gamma"]; + const batch = await countTokensBatch(model, texts); + expect(batch).toHaveLength(texts.length); - const cachedCount = tokenizer.countTokens(text); - expect(cachedCount).toBe(accurateCount); + const individual = await Promise.all(texts.map((text) => countTokens(model, text))); + expect(batch).toEqual(individual); }); }); diff --git a/src/utils/main/tokenizer.ts b/src/utils/main/tokenizer.ts index 862e5d162..c758e39d7 100644 --- a/src/utils/main/tokenizer.ts +++ b/src/utils/main/tokenizer.ts @@ -1,714 +1,196 @@ -/** - * Token calculation utilities for chat statistics - */ import assert from "@/utils/assert"; -import { LRUCache } from "lru-cache"; import CRC32 from "crc-32"; -import { getToolSchemas, getAvailableTools } from "@/utils/tools/toolDefinitions"; - -export interface Tokenizer { - encoding: string; - countTokens: (text: string) => number; -} - -interface TokenizerBaseModules { - // Base module properties (always required) - // eslint-disable-next-line @typescript-eslint/consistent-type-imports - AITokenizer: typeof import("ai-tokenizer").default; - // eslint-disable-next-line @typescript-eslint/consistent-type-imports - models: typeof import("ai-tokenizer").models; -} - -// eslint-disable-next-line @typescript-eslint/consistent-type-imports -type EncodingModule = import("ai-tokenizer").Encoding; - -const BASE_MODULE_PROPS = ["AITokenizer", "models"] as const satisfies ReadonlyArray< - keyof TokenizerBaseModules ->; - -const KNOWN_ENCODINGS = ["o200k_base", "claude"] as const; - -/** - * Dynamic imports below are deliberate to keep ~2MB encoding bundles out of the initial - * startup path. See eslint.config.mjs for the scoped override that documents this policy. - */ +import { LRUCache } from "lru-cache"; +import { getAvailableTools, getToolSchemas } from "@/utils/tools/toolDefinitions"; +import type { CountTokensInput } from "./tokenizer.worker"; +import { models, type ModelName } from "ai-tokenizer"; +import { run } from "./workerPool"; /** - * Module cache - stores loaded modules + * Public tokenizer interface exposed to callers. + * countTokens is async because the heavy lifting happens in a worker thread. */ -const moduleCache: { - base: TokenizerBaseModules | null; - encodings: Map; -} = { - base: null, - encodings: new Map(), -}; - -let baseLoadPromise: Promise | null = null; -const encodingLoadPromises = new Map>(); - -type TokenizerReadyListener = () => void; -const readyListeners = new Set(); -let tokenizerModulesReady = false; - -type TokenizerEncodingListener = (encodingName: string) => void; -const encodingListeners = new Set(); - -function isTokenizerReady(): boolean { - return moduleCache.base !== null && moduleCache.encodings.size > 0; -} - -function now(): number { - const perf = globalThis.performance; - if (perf && typeof perf.now === "function") { - return perf.now.call(perf); - } - return Date.now(); -} - -interface Logger { - info: (...args: unknown[]) => void; - error: (...args: unknown[]) => void; - debug: (...args: unknown[]) => void; -} - -const consoleLogger: Logger = { - info: (...args) => console.log(...args), - error: (...args) => console.error(...args), - debug: (...args) => { - if (typeof process !== "undefined" && process.env?.CMUX_DEBUG) { - console.debug(...args); - } - }, -}; - -let activeLogger: Logger = consoleLogger; - -// Lazy-import log.ts in the Electron main process only to keep renderer bundles small. -if (typeof process !== "undefined" && process.type === "browser") { - void import("@/services/log") - .then((module) => { - activeLogger = module.log; - }) - .catch(() => { - // Fallback to console logging when log.ts is unavailable (tests, worker builds). - }); +export interface Tokenizer { + encoding: string; + countTokens: (text: string) => Promise; } -const logger: Logger = { - info: (...args) => activeLogger.info(...args), - error: (...args) => activeLogger.error(...args), - debug: (...args) => activeLogger.debug(...args), +const MODEL_KEY_OVERRIDES: Record = { + "anthropic:claude-sonnet-4-5": "anthropic/claude-sonnet-4.5", + // FIXME(ThomasK33): Temporary workaround since ai-tokenizer does not yet + // claude-haiku-4.5 + "anthropic:claude-haiku-4-5": "anthropic/claude-3.5-haiku", }; -function notifyIfTokenizerReady(): void { - if (tokenizerModulesReady || !isTokenizerReady()) { - return; - } - - tokenizerModulesReady = true; - for (const listener of readyListeners) { - try { - listener(); - } catch (error) { - logger.error("[tokenizer] Ready listener threw:", error); - } - } - readyListeners.clear(); -} +const DEFAULT_WARM_MODELS = [ + "openai:gpt-5", + "openai:gpt-5-codex", + "anthropic:claude-sonnet-4-5", +] as const; + +const encodingPromises = new Map>(); +const inFlightCounts = new Map>(); +const tokenCountCache = new LRUCache({ + maxSize: 250_000, + sizeCalculation: () => 1, +}); -function notifyEncodingLoaded(encodingName: string): void { +function normalizeModelKey(modelName: string): ModelName | null { assert( - encodingName.length > 0, - "Tokenizer encoding notification requires non-empty encoding name" + typeof modelName === "string" && modelName.length > 0, + "Model name must be a non-empty string" ); - if (encodingListeners.size === 0) { - return; - } - for (const listener of encodingListeners) { - try { - listener(encodingName); - } catch (error) { - logger.error(`[tokenizer] Encoding listener threw for '${encodingName}':`, error); - } + + const override = MODEL_KEY_OVERRIDES[modelName]; + const normalized = + override ?? (modelName.includes(":") ? modelName.replace(":", "/") : modelName); + + if (!(normalized in models)) { + // Return null for unknown models - caller can decide to fallback or error + return null; } + return normalized as ModelName; } /** - * Registers a listener fired once the tokenizer base and at least one encoding finish loading. - * Prefer `onTokenizerEncodingLoaded` for UI updates that need per-encoding fidelity. + * Resolves a model string to a ModelName, falling back to a similar model if unknown. + * Optionally logs a warning when falling back. */ -export function onTokenizerModulesLoaded(listener: () => void): () => void { - if (tokenizerModulesReady || isTokenizerReady()) { - tokenizerModulesReady = true; - listener(); - return () => undefined; - } +function resolveModelName(modelString: string): ModelName { + let modelName = normalizeModelKey(modelString); - readyListeners.add(listener); - return () => { - readyListeners.delete(listener); - }; -} + if (!modelName) { + const provider = modelString.split(":")[0] || "anthropic"; + const fallbackModel = provider === "anthropic" ? "anthropic/claude-sonnet-4.5" : "openai/gpt-5"; -export function onTokenizerEncodingLoaded(listener: TokenizerEncodingListener): () => void { - assert(typeof listener === "function", "Tokenizer encoding listener must be a function"); - encodingListeners.add(listener); + console.warn( + `[tokenizer] Unknown model '${modelString}', using ${fallbackModel} tokenizer for approximate token counting` + ); - // Immediately notify about already-loaded encodings so listeners can catch up. - for (const encodingName of moduleCache.encodings.keys()) { - try { - listener(encodingName); - } catch (error) { - logger.error( - `[tokenizer] Encoding listener threw for '${encodingName}' during initial replay:`, - error - ); - } + modelName = fallbackModel as ModelName; } - return () => { - encodingListeners.delete(listener); - }; -} - -function getCachedBaseModules(): TokenizerBaseModules | null { - return moduleCache.base; + return modelName; } -async function loadBaseModules(): Promise { - if (moduleCache.base) { - return moduleCache.base; - } - - if (!baseLoadPromise) { - const timerLabel = "[tokenizer] load base module"; - logger.info(`${timerLabel} started`); - baseLoadPromise = (async () => { - const startMs = now(); - try { - const module = await import("ai-tokenizer"); - - assert( - typeof module.default === "function", - "Tokenizer base module default export must be a constructor" - ); +function resolveEncoding(modelName: ModelName): Promise { + let promise = encodingPromises.get(modelName); + if (!promise) { + promise = run("encodingName", modelName) + .then((result: unknown) => { assert( - typeof module.models === "object" && module.models !== null, - "Tokenizer base module must export models metadata" - ); - const baseModules: TokenizerBaseModules = { - AITokenizer: module.default, - models: module.models, - }; - for (const prop of BASE_MODULE_PROPS) { - assert(prop in baseModules, `Tokenizer base modules missing '${String(prop)}' property`); - } - moduleCache.base = baseModules; - notifyIfTokenizerReady(); - return baseModules; - } catch (error) { - logger.error( - "[tokenizer] Failed to load base tokenizer modules; token counts will rely on approximations until retry succeeds", - error + typeof result === "string" && result.length > 0, + "Token encoding name must be a non-empty string" ); + return result; + }) + .catch((error) => { + encodingPromises.delete(modelName); throw error; - } finally { - const durationMs = now() - startMs; - logger.info(`${timerLabel} finished in ${durationMs.toFixed(0)}ms`); - } - })(); - } - - try { - const baseModules = await baseLoadPromise; - assert( - moduleCache.base === baseModules, - "Tokenizer base modules cache must contain the loaded modules" - ); - return baseModules; - } catch (error) { - moduleCache.base = null; - baseLoadPromise = null; - throw error; - } finally { - if (moduleCache.base) { - baseLoadPromise = null; - } + }); + encodingPromises.set(modelName, promise); } + return promise; } -function beginLoadBase(): void { - void loadBaseModules().catch(() => { - logger.error( - "[tokenizer] Base tokenizer modules failed to preload; token counts will stay approximate until retry succeeds" - ); - // Error already logged in loadBaseModules(); leave cache unset so callers retry. - }); +function buildCacheKey(modelName: ModelName, text: string): string { + const checksum = CRC32.str(text); + return `${modelName}:${checksum}:${text.length}`; } -function getCachedEncoding(encodingName: string): EncodingModule | undefined { - assert( - typeof encodingName === "string" && encodingName.length > 0, - "Tokenizer encoding name must be a non-empty string" - ); - return moduleCache.encodings.get(encodingName); -} +async function countTokensInternal(modelName: ModelName, text: string): Promise { + assert(typeof text === "string", "Tokenizer countTokens expects string input"); + if (text.length === 0) { + return 0; + } -async function loadEncodingModule(encodingName: string): Promise { - const cached = getCachedEncoding(encodingName); - if (cached) { + const key = buildCacheKey(modelName, text); + const cached = tokenCountCache.get(key); + if (cached !== undefined) { return cached; } - let promise = encodingLoadPromises.get(encodingName); - if (!promise) { - const loader = ENCODING_LOADERS[encodingName]; - assert(loader, `Tokenizer encoding loader missing for '${encodingName}'`); - - const timerLabel = `[tokenizer] load encoding: ${encodingName}`; - logger.info(`${timerLabel} started`); - - promise = (async () => { - const startMs = now(); - try { - const module = await loader(); - moduleCache.encodings.set(encodingName, module); - notifyIfTokenizerReady(); - notifyEncodingLoaded(encodingName); - return module; - } catch (error) { - logger.error( - `[tokenizer] Failed to load tokenizer encoding '${encodingName}'; token counts will fall back to approximations`, - error + let pending = inFlightCounts.get(key); + if (!pending) { + const payload: CountTokensInput = { modelName, input: text }; + pending = run("countTokens", payload) + .then((value: unknown) => { + assert( + typeof value === "number" && Number.isFinite(value) && value >= 0, + "Tokenizer must return a non-negative finite token count" ); + tokenCountCache.set(key, value); + inFlightCounts.delete(key); + return value; + }) + .catch((error) => { + inFlightCounts.delete(key); throw error; - } finally { - const durationMs = now() - startMs; - logger.info(`${timerLabel} finished in ${durationMs.toFixed(0)}ms`); - } - })(); - - encodingLoadPromises.set(encodingName, promise); - } - - try { - const encoding = await promise; - assert( - moduleCache.encodings.get(encodingName) === encoding, - "Tokenizer encoding cache must match the loaded encoding" - ); - return encoding; - } catch (error) { - encodingLoadPromises.delete(encodingName); - throw error; - } finally { - if (moduleCache.encodings.has(encodingName)) { - encodingLoadPromises.delete(encodingName); - } - } -} - -function normalizeEncodingModule( - encodingName: string, - module: Record -): EncodingModule { - const candidate = module as Partial; - - if (typeof candidate.name !== "string" || candidate.name.length === 0) { - throw new Error(`Tokenizer encoding '${encodingName}' module missing name field`); - } - - if (candidate.name !== encodingName) { - throw new Error( - `Tokenizer encoding loader mismatch: expected '${encodingName}' but received '${String(candidate.name)}'` - ); - } - - if ( - typeof candidate.pat_str !== "string" || - typeof candidate.special_tokens !== "object" || - candidate.special_tokens === null || - typeof candidate.stringEncoder !== "object" || - candidate.stringEncoder === null || - !Array.isArray(candidate.binaryEncoder) || - typeof candidate.decoder !== "object" || - candidate.decoder === null - ) { - throw new Error(`Tokenizer encoding '${encodingName}' module missing required fields`); - } - - return { - name: candidate.name, - pat_str: candidate.pat_str, - special_tokens: candidate.special_tokens, - stringEncoder: candidate.stringEncoder, - binaryEncoder: candidate.binaryEncoder, - decoder: candidate.decoder, - }; -} - -const ENCODING_LOADERS: Record Promise> = { - o200k_base: async () => - normalizeEncodingModule("o200k_base", await import("ai-tokenizer/encoding/o200k_base")), - claude: async () => - normalizeEncodingModule("claude", await import("ai-tokenizer/encoding/claude")), - cl100k_base: async () => - normalizeEncodingModule("cl100k_base", await import("ai-tokenizer/encoding/cl100k_base")), - p50k_base: async () => - normalizeEncodingModule("p50k_base", await import("ai-tokenizer/encoding/p50k_base")), -}; - -// Track if loadTokenizerModules() is already in progress -let eagerLoadPromise: Promise | null = null; - -/** - * Load tokenizer modules asynchronously (eager mode - loads all known encodings) - * Dynamic imports are intentional here to defer loading heavy tokenizer modules - * until first use, reducing app startup time from ~8.8s to <1s - * - * Idempotent - safe to call multiple times - * - * @returns Promise that resolves when tokenizer modules are loaded - */ -export async function loadTokenizerModules(): Promise { - const allLoaded = - moduleCache.base && KNOWN_ENCODINGS.every((enc) => moduleCache.encodings.has(enc)); - - if (allLoaded) { - return; - } - - if (eagerLoadPromise) { - return eagerLoadPromise; - } - - logger.info("[tokenizer] loadTokenizerModules() called"); - - const timerLabel = "[tokenizer] loadTokenizerModules() total"; - const work = (async () => { - logger.info("[tokenizer] Starting loads for base + encodings:", KNOWN_ENCODINGS); - const startMs = now(); - try { - const basePromise = loadBaseModules(); - const encodingPromises = KNOWN_ENCODINGS.map((enc) => loadEncodingModule(enc)); - await Promise.all([basePromise, ...encodingPromises]); - logger.info("[tokenizer] All modules loaded successfully"); - notifyIfTokenizerReady(); - } finally { - const durationMs = now() - startMs; - logger.info(`${timerLabel} finished in ${durationMs.toFixed(0)}ms`); - } - })(); - - eagerLoadPromise = work - .catch((error) => { - logger.error("[tokenizer] loadTokenizerModules() failed", error); - throw error; - }) - .finally(() => { - eagerLoadPromise = null; - }); - - return eagerLoadPromise; -} - -/** - * Load only the tokenizer modules needed for a specific model - * More efficient than loadTokenizerModules() if you know the model upfront - * - * This loads ~50% faster than loadTokenizerModules() since it only loads - * the base module + one encoding instead of all encodings. - * - * @param modelString - Model identifier (e.g., "anthropic:claude-opus-4-1", "openai:gpt-4") - */ -export async function loadTokenizerForModel(modelString: string): Promise { - const baseModules = await loadBaseModules(); - assert(baseModules, "Tokenizer base modules must be loaded before selecting encodings"); - - const encodingName = getTokenizerEncoding(modelString, baseModules); - await loadEncodingModule(encodingName); - notifyIfTokenizerReady(); -} - -/** - * LRU cache for token counts by text checksum - * Avoids re-tokenizing identical strings (system messages, tool definitions, etc.) - * Key: CRC32 checksum of text, Value: token count - */ -const tokenCountCache = new LRUCache({ - max: 500000, // Max entries (safety limit) - maxSize: 16 * 1024 * 1024, // 16MB total cache size - sizeCalculation: () => { - // Each entry: ~8 bytes (key) + ~8 bytes (value) + ~32 bytes (LRU overhead) ≈ 48 bytes - return 48; - }, -}); - -interface TokenCountCacheEntry { - value: number; - cache: boolean; -} - -type TokenCountResult = number | TokenCountCacheEntry; - -function normalizeTokenCountResult(result: TokenCountResult): TokenCountCacheEntry { - if (typeof result === "number") { - assert(Number.isFinite(result), "Token count must be a finite number"); - assert(result >= 0, "Token count cannot be negative"); - return { value: result, cache: true }; + }); + inFlightCounts.set(key, pending); } - - assert(Number.isFinite(result.value), "Token count must be a finite number"); - assert(result.value >= 0, "Token count cannot be negative"); - assert(typeof result.cache === "boolean", "Token count cache flag must be boolean"); - return result; + return pending; } -function isPromiseLike(value: unknown): value is Promise { - return ( - typeof value === "object" && - value !== null && - "then" in (value as Record) && - typeof (value as PromiseLike).then === "function" - ); -} - -function fallbackTokenCount(text: string): TokenCountCacheEntry { - const approximation = Math.ceil(text.length / 4); - assert(Number.isFinite(approximation), "Token count approximation must be finite"); - return { value: approximation, cache: false }; -} - -/** - * Count tokens with caching via CRC32 checksum - * Avoids re-tokenizing identical strings (system messages, tool definitions, etc.) - * - * NOTE: For async tokenization, this returns an approximation immediately and caches - * the accurate count in the background. Subsequent calls will use the cached accurate count. - */ -function countTokensCached( - text: string, - tokenizeFn: () => TokenCountResult | Promise -): number { - const checksum = CRC32.str(text); - const cached = tokenCountCache.get(checksum); - if (cached !== undefined) { - return cached; - } - - const result = tokenizeFn(); - - if (!isPromiseLike(result)) { - const normalized = normalizeTokenCountResult(result); - if (normalized.cache) { - tokenCountCache.set(checksum, normalized.value); - } - return normalized.value; - } - - // Async case: return approximation now, cache accurate value when ready - const approximation = Math.ceil(text.length / 4); - void result - .then((resolved) => { - const normalized = normalizeTokenCountResult(resolved); - if (normalized.cache) { - tokenCountCache.set(checksum, normalized.value); +export function loadTokenizerModules( + modelsToWarm: string[] = Array.from(DEFAULT_WARM_MODELS) +): Promise>> { + return Promise.allSettled( + modelsToWarm.map((modelString) => { + const modelName = normalizeModelKey(modelString); + // Skip unknown models during warmup + if (!modelName) { + return Promise.reject(new Error(`Unknown model: ${modelString}`)); } + return resolveEncoding(modelName); }) - .catch((error) => { - logger.error("[tokenizer] Async tokenization failed", error); - }); - return approximation; -} - -type TokenizerModules = TokenizerBaseModules; -type TokenizerModelRecord = Record; - -const FALLBACK_MODEL_KEY = "openai/gpt-4o"; -const FALLBACK_ENCODING = "o200k_base"; -const TOKENIZATION_FALLBACK_MESSAGE = - "[tokenizer] Failed to tokenize with loaded modules; returning fallback approximation"; - -const MODEL_KEY_OVERRIDES: Record = { - "anthropic:claude-sonnet-4-5": "anthropic/claude-sonnet-4.5", -}; - -function normalizeModelKey(modelString: string): string { - return modelString.includes(":") ? modelString.replace(":", "/") : modelString; -} - -function getTokenizerModels(modules: TokenizerModules): TokenizerModelRecord { - return modules.models as TokenizerModelRecord; -} - -function resolveTokenizerEncoding(modelString: string, modules: TokenizerModules): string { - const models = getTokenizerModels(modules); - - const candidates: Array = []; - if (modelString.includes("/")) { - candidates.push(modelString); - } - if (modelString.includes(":")) { - candidates.push(normalizeModelKey(modelString)); - } - candidates.push(MODEL_KEY_OVERRIDES[modelString]); - - for (const key of candidates) { - if (!key) continue; - const entry = models[key]; - if (entry?.encoding) { - return entry.encoding; - } - } - - return models[FALLBACK_MODEL_KEY]?.encoding ?? FALLBACK_ENCODING; + ); } -function getTokenizerEncoding(modelString: string, modules: TokenizerModules | null): string { - if (!modules) { - beginLoadBase(); - return FALLBACK_ENCODING; - } +export async function getTokenizerForModel(modelString: string): Promise { + const modelName = resolveModelName(modelString); + const encodingName = await resolveEncoding(modelName); - return resolveTokenizerEncoding(modelString, modules); + return { + encoding: encodingName, + countTokens: (input: string) => countTokensInternal(modelName, input), + }; } -/** - * Count tokens using loaded tokenizer modules - * Assumes base module is loaded; encoding will be loaded on-demand via Proxy if needed - */ -function countTokensWithLoadedModules( - text: string, - modelString: string -): TokenCountResult | Promise { - const cachedBase = getCachedBaseModules(); - if (!cachedBase) { - return (async () => { - const baseModules = await loadBaseModules(); - const encodingName = getTokenizerEncoding(modelString, baseModules); - const encoding = await loadEncodingModule(encodingName); - const tokenizer = new baseModules.AITokenizer(encoding); - const value = tokenizer.count(text); - assert(Number.isFinite(value) && value >= 0, "Tokenizer must return a non-negative number"); - return { value, cache: true } satisfies TokenCountCacheEntry; - })(); - } - - const encodingName = getTokenizerEncoding(modelString, cachedBase); - const cachedEncoding = getCachedEncoding(encodingName); - if (cachedEncoding) { - const tokenizer = new cachedBase.AITokenizer(cachedEncoding); - const value = tokenizer.count(text); - assert(Number.isFinite(value) && value >= 0, "Tokenizer must return a non-negative number"); - return { value, cache: true } satisfies TokenCountCacheEntry; - } - - return (async () => { - const encoding = await loadEncodingModule(encodingName); - const activeBase = getCachedBaseModules(); - assert(activeBase, "Tokenizer base modules must be available after loading encoding"); - const tokenizer = new activeBase.AITokenizer(encoding); - const value = tokenizer.count(text); - assert(Number.isFinite(value) && value >= 0, "Tokenizer must return a non-negative number"); - return { value, cache: true } satisfies TokenCountCacheEntry; - })(); +export function countTokens(modelString: string, text: string): Promise { + const modelName = resolveModelName(modelString); + return countTokensInternal(modelName, text); } -/** - * Get the appropriate tokenizer for a given model string - * - * @param modelString - Model identifier (e.g., "anthropic:claude-opus-4-1", "openai:gpt-4") - * @returns Tokenizer interface with name and countTokens function - */ -export function getTokenizerForModel(modelString: string): Tokenizer { - // Start loading tokenizer modules in background (idempotent) - void loadTokenizerModules().catch((error) => { - logger.error("[tokenizer] Failed to eagerly load tokenizer modules", error); - }); - - return { - get encoding() { - // NOTE: This Proxy-style getter runs before encodings finish loading; callers must tolerate - // fallback values (and potential transient undefined) until onTokenizerEncodingLoaded fires. - return getTokenizerEncoding(modelString, moduleCache.base); - }, - countTokens: (text: string) => { - return countTokensCached(text, () => { - try { - const result = countTokensWithLoadedModules(text, modelString); - if (isPromiseLike(result)) { - return result.catch((error) => { - logger.error(TOKENIZATION_FALLBACK_MESSAGE, error); - return fallbackTokenCount(text); - }); - } - return result; - } catch (error) { - logger.error(TOKENIZATION_FALLBACK_MESSAGE, error); - return fallbackTokenCount(text); - } - }); - }, - }; +export function countTokensBatch(modelString: string, texts: string[]): Promise { + assert(Array.isArray(texts), "Batch token counting expects an array of strings"); + const modelName = resolveModelName(modelString); + return Promise.all(texts.map((text) => countTokensInternal(modelName, text))); } -/** - * Calculate token counts for serialized data (tool args/results) - */ -export function countTokensForData(data: unknown, tokenizer: Tokenizer): number { +export function countTokensForData(data: unknown, tokenizer: Tokenizer): Promise { const serialized = JSON.stringify(data); return tokenizer.countTokens(serialized); } -/** - * Test helper to fully reset tokenizer state between test cases. - * Do NOT call from production code. - */ -export function __resetTokenizerForTests(): void { - moduleCache.base = null; - moduleCache.encodings.clear(); - baseLoadPromise = null; - encodingLoadPromises.clear(); - readyListeners.clear(); - tokenizerModulesReady = false; - encodingListeners.clear(); - eagerLoadPromise = null; - tokenCountCache.clear(); -} - -/** - * Get estimated token count for tool definitions - * These are the schemas sent to the API for each tool - * - * @param toolName The name of the tool (bash, file_read, web_search, etc.) - * @param modelString The model string to get accurate tool definitions - * @returns Estimated token count for the tool definition - */ -export function getToolDefinitionTokens(toolName: string, modelString: string): number { +export async function getToolDefinitionTokens( + toolName: string, + modelString: string +): Promise { try { - // Check if this tool is available for this model const availableTools = getAvailableTools(modelString); if (!availableTools.includes(toolName)) { - // Tool not available for this model return 0; } - // Get the tool schema const toolSchemas = getToolSchemas(); const toolSchema = toolSchemas[toolName]; - if (!toolSchema) { - // Tool not found, return a default estimate return 40; } - // Serialize the tool definition to estimate tokens - const serialized = JSON.stringify(toolSchema); - const tokenizer = getTokenizerForModel(modelString); - return tokenizer.countTokens(serialized); + return countTokens(modelString, JSON.stringify(toolSchema)); } catch { - // Fallback to estimates if we can't get the actual definition const fallbackSizes: Record = { bash: 65, file_read: 45, @@ -718,6 +200,12 @@ export function getToolDefinitionTokens(toolName: string, modelString: string): web_search: 50, google_search: 50, }; - return fallbackSizes[toolName] || 40; + return fallbackSizes[toolName] ?? 40; } } + +export function __resetTokenizerForTests(): void { + encodingPromises.clear(); + tokenCountCache.clear(); + inFlightCounts.clear(); +} diff --git a/src/utils/main/tokenizer.worker.ts b/src/utils/main/tokenizer.worker.ts new file mode 100644 index 000000000..968c75423 --- /dev/null +++ b/src/utils/main/tokenizer.worker.ts @@ -0,0 +1,74 @@ +import assert from "node:assert"; +import { parentPort } from "node:worker_threads"; +import { Tokenizer, models } from "ai-tokenizer"; +import type { ModelName } from "ai-tokenizer"; +import * as encoding from "ai-tokenizer/encoding"; + +export interface CountTokensInput { + modelName: ModelName; + input: string; +} + +const tokenizerCache = new Map(); + +function getTokenizer(modelName: ModelName): Tokenizer { + const cached = tokenizerCache.get(modelName); + if (cached) { + return cached; + } + + const model = models[modelName]; + assert(model, `Unknown tokenizer model '${modelName}'`); + + const encodingModule = encoding[model.encoding]; + assert(encodingModule, `Unknown tokenizer encoding '${model.encoding}'`); + + const tokenizer = new Tokenizer(encodingModule); + tokenizerCache.set(modelName, tokenizer); + return tokenizer; +} + +export function countTokens({ modelName, input }: CountTokensInput): number { + const tokenizer = getTokenizer(modelName); + const count = tokenizer.count(input); + return count; +} + +export function encodingName(modelName: ModelName): string { + const model = models[modelName]; + assert(model, `Unknown tokenizer model '${modelName}'`); + return model.encoding; +} + +// Handle messages from main thread +if (parentPort) { + parentPort.on("message", (message: { messageId: number; taskName: string; data: unknown }) => { + try { + let result: unknown; + + switch (message.taskName) { + case "countTokens": + result = countTokens(message.data as CountTokensInput); + break; + case "encodingName": + result = encodingName(message.data as ModelName); + break; + default: + throw new Error(`Unknown task: ${message.taskName}`); + } + + parentPort!.postMessage({ + messageId: message.messageId, + result, + }); + } catch (error) { + parentPort!.postMessage({ + messageId: message.messageId, + error: { + message: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + }, + }); + } + }); +} diff --git a/src/utils/main/workerPool.ts b/src/utils/main/workerPool.ts new file mode 100644 index 000000000..2a55b85af --- /dev/null +++ b/src/utils/main/workerPool.ts @@ -0,0 +1,112 @@ +import { Worker } from "node:worker_threads"; +import { join, dirname, sep } from "node:path"; + +interface WorkerRequest { + messageId: number; + taskName: string; + data: unknown; +} + +interface WorkerSuccessResponse { + messageId: number; + result: unknown; +} + +interface WorkerErrorResponse { + messageId: number; + error: { + message: string; + stack?: string; + }; +} + +type WorkerResponse = WorkerSuccessResponse | WorkerErrorResponse; + +let messageIdCounter = 0; +const pendingPromises = new Map< + number, + { resolve: (value: unknown) => void; reject: (error: Error) => void } +>(); + +// Resolve worker path - explicitly use .js extension as worker threads require compiled files +// When running tests from src/, __filename is src/utils/main/workerPool.ts +// We need to resolve to dist/utils/main/tokenizer.worker.js +// Use platform-aware path component manipulation to handle Windows backslashes +const currentDir = dirname(__filename); +const pathParts = currentDir.split(sep); +const srcIndex = pathParts.indexOf("src"); + +let workerDir: string; +if (srcIndex !== -1) { + // Replace 'src' with 'dist' in the path (works on Windows and Unix) + pathParts[srcIndex] = "dist"; + workerDir = pathParts.join(sep); +} else { + workerDir = currentDir; +} + +const workerPath = join(workerDir, "tokenizer.worker.js"); +const worker = new Worker(workerPath); + +// Handle messages from worker +worker.on("message", (response: WorkerResponse) => { + const pending = pendingPromises.get(response.messageId); + if (!pending) { + console.error(`[workerPool] No pending promise for messageId ${response.messageId}`); + return; + } + + pendingPromises.delete(response.messageId); + + if ("error" in response) { + const error = new Error(response.error.message); + error.stack = response.error.stack; + pending.reject(error); + } else { + pending.resolve(response.result); + } +}); + +// Handle worker errors +worker.on("error", (error) => { + console.error("[workerPool] Worker error:", error); + // Reject all pending promises + for (const pending of pendingPromises.values()) { + pending.reject(error); + } + pendingPromises.clear(); +}); + +// Handle worker exit +worker.on("exit", (code) => { + if (code !== 0) { + console.error(`[workerPool] Worker stopped with exit code ${code}`); + const error = new Error(`Worker stopped with exit code ${code}`); + for (const pending of pendingPromises.values()) { + pending.reject(error); + } + pendingPromises.clear(); + } +}); + +// Don't block process exit +worker.unref(); + +/** + * Run a task on the worker thread + * @param taskName The name of the task to run (e.g., "countTokens", "encodingName") + * @param data The data to pass to the task + * @returns A promise that resolves with the task result + */ +export function run(taskName: string, data: unknown): Promise { + const messageId = messageIdCounter++; + const request: WorkerRequest = { messageId, taskName, data }; + + return new Promise((resolve, reject) => { + pendingPromises.set(messageId, { + resolve: resolve as (value: unknown) => void, + reject, + }); + worker.postMessage(request); + }); +} diff --git a/src/utils/tokenizer/rendererClient.ts b/src/utils/tokenizer/rendererClient.ts new file mode 100644 index 000000000..804cd7e46 --- /dev/null +++ b/src/utils/tokenizer/rendererClient.ts @@ -0,0 +1,140 @@ +import type { IPCApi } from "@/types/ipc"; + +const MAX_CACHE_ENTRIES = 256; + +type CacheKey = string; + +interface CacheEntry { + promise: Promise; + value: number | null; +} + +const tokenCache = new Map(); +const keyOrder: CacheKey[] = []; + +function getTokenizerApi(): IPCApi["tokenizer"] | null { + if (typeof window === "undefined") { + return null; + } + const api = window.api; + return api?.tokenizer ?? null; +} + +function makeKey(model: string, text: string): CacheKey { + return `${model}::${text}`; +} + +function pruneCache(): void { + while (keyOrder.length > MAX_CACHE_ENTRIES) { + const oldestKey = keyOrder.shift(); + if (oldestKey) { + tokenCache.delete(oldestKey); + } + } +} + +export function getTokenCountPromise(model: string, text: string): Promise { + const trimmedModel = model?.trim(); + if (!trimmedModel || text.length === 0) { + return Promise.resolve(0); + } + + const key = makeKey(trimmedModel, text); + const cached = tokenCache.get(key); + if (cached) { + return cached.value !== null ? Promise.resolve(cached.value) : cached.promise; + } + + const tokenizer = getTokenizerApi(); + if (!tokenizer) { + return Promise.resolve(0); + } + + const promise = tokenizer + .countTokens(trimmedModel, text) + .then((tokens) => { + const entry = tokenCache.get(key); + if (entry) { + entry.value = tokens; + } + return tokens; + }) + .catch((error) => { + console.error("[tokenizer] countTokens failed", error); + tokenCache.delete(key); + return 0; + }); + + tokenCache.set(key, { promise, value: null }); + keyOrder.push(key); + pruneCache(); + return promise; +} + +export async function countTokensBatchRenderer(model: string, texts: string[]): Promise { + if (!Array.isArray(texts) || texts.length === 0) { + return []; + } + + const trimmedModel = model?.trim(); + if (!trimmedModel) { + return texts.map(() => 0); + } + + const tokenizer = getTokenizerApi(); + if (!tokenizer) { + return texts.map(() => 0); + } + + const results = new Array(texts.length).fill(0); + const missingIndices: number[] = []; + const missingTexts: string[] = []; + + for (let i = 0; i < texts.length; i++) { + const text = texts[i]; + const key = makeKey(trimmedModel, text); + const cached = tokenCache.get(key); + if (cached && cached.value !== null) { + results[i] = cached.value; + } else { + missingIndices.push(i); + missingTexts.push(text); + } + } + + if (missingTexts.length === 0) { + return results; + } + + try { + const rawBatchResult: unknown = await tokenizer.countTokensBatch(trimmedModel, missingTexts); + if (!Array.isArray(rawBatchResult)) { + throw new Error("Tokenizer returned invalid batch result"); + } + const batchResult = rawBatchResult.map((value) => (typeof value === "number" ? value : 0)); + + for (let i = 0; i < missingIndices.length; i++) { + const idx = missingIndices[i]; + const rawCount = batchResult[i]; + const count = typeof rawCount === "number" ? rawCount : 0; + const text = texts[idx]; + const key = makeKey(trimmedModel, text); + tokenCache.set(key, { promise: Promise.resolve(count), value: count }); + keyOrder.push(key); + results[idx] = count; + } + pruneCache(); + } catch (error) { + console.error("[tokenizer] countTokensBatch failed", error); + for (const idx of missingIndices) { + results[idx] = 0; + } + } + + return results; +} + +export function clearRendererTokenizerCache(): void { + tokenCache.clear(); + keyOrder.length = 0; +} diff --git a/src/utils/tokens/TokenStatsWorker.ts b/src/utils/tokens/TokenStatsWorker.ts deleted file mode 100644 index a399badfc..000000000 --- a/src/utils/tokens/TokenStatsWorker.ts +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Wrapper class for managing the token statistics Web Worker - * Provides a clean async API for calculating stats off the main thread - */ - -import assert from "@/utils/assert"; -import type { CmuxMessage } from "@/types/message"; -import type { ChatStats } from "@/types/chatStats"; -import type { - WorkerRequest, - WorkerResponse, - WorkerError, - WorkerNotification, -} from "./tokenStats.worker"; - -type WorkerMessage = WorkerResponse | WorkerError | WorkerNotification; - -/** - * TokenStatsWorker manages a dedicated Web Worker for calculating token statistics - * Ensures only one calculation runs at a time and provides Promise-based API - */ -export class TokenStatsWorker { - private readonly worker: Worker; - private requestCounter = 0; - private pendingRequest: { - id: string; - resolve: (stats: ChatStats) => void; - reject: (error: Error) => void; - } | null = null; - private readonly tokenizerReadyListeners = new Set<() => void>(); - private readonly encodingListeners = new Set<(encodingName: string) => void>(); - private tokenizerReady = false; - private readonly loadedEncodings = new Set(); - - constructor() { - // Create worker using Vite's Web Worker support - // The ?worker suffix tells Vite to bundle this as a worker - this.worker = new Worker(new URL("./tokenStats.worker.ts", import.meta.url), { - type: "module", - }); - - this.worker.onmessage = this.handleMessage.bind(this); - this.worker.onerror = this.handleError.bind(this); - } - - onTokenizerReady(listener: () => void): () => void { - assert(typeof listener === "function", "Tokenizer ready listener must be a function"); - this.tokenizerReadyListeners.add(listener); - if (this.tokenizerReady) { - try { - listener(); - } catch (error) { - console.error("[TokenStatsWorker] Tokenizer ready listener threw", error); - } - } - return () => { - this.tokenizerReadyListeners.delete(listener); - }; - } - - onEncodingLoaded(listener: (encodingName: string) => void): () => void { - assert(typeof listener === "function", "Tokenizer encoding listener must be a function"); - this.encodingListeners.add(listener); - if (this.loadedEncodings.size > 0) { - for (const encodingName of this.loadedEncodings) { - try { - listener(encodingName); - } catch (error) { - console.error( - `[TokenStatsWorker] Tokenizer encoding listener threw for '${encodingName}' during replay`, - error - ); - } - } - } - return () => { - this.encodingListeners.delete(listener); - }; - } - - /** - * Calculate token statistics for the given messages - * Cancels any pending calculation and starts a new one - * @param messages - Array of CmuxMessages to analyze - * @param model - Model string for tokenizer selection - * @returns Promise that resolves with calculated stats - */ - calculate(messages: CmuxMessage[], model: string): Promise { - // Cancel any pending request (latest request wins) - if (this.pendingRequest) { - this.pendingRequest.reject(new Error("Cancelled by newer request")); - this.pendingRequest = null; - } - - // Generate unique request ID - const id = `${Date.now()}-${++this.requestCounter}`; - - // Create promise that will resolve when worker responds - const promise = new Promise((resolve, reject) => { - this.pendingRequest = { id, resolve, reject }; - }); - - // Send calculation request to worker - const request: WorkerRequest = { - id, - messages, - model, - }; - this.worker.postMessage(request); - - return promise; - } - - /** - * Handle successful or error responses from worker - */ - private handleMessage(e: MessageEvent) { - const response = e.data; - - if ("type" in response) { - if (response.type === "tokenizer-ready") { - this.notifyTokenizerReady(); - return; - } - if (response.type === "encoding-loaded") { - this.notifyEncodingLoaded(response.encodingName); - return; - } - assert(false, "Received unknown worker notification type"); - return; - } - - // Ignore responses for cancelled requests - if (!this.pendingRequest || this.pendingRequest.id !== response.id) { - return; - } - - const { resolve, reject } = this.pendingRequest; - this.pendingRequest = null; - - if (response.success) { - resolve(response.stats); - } else { - reject(new Error(response.error)); - } - } - - /** - * Handle worker errors (script errors, not calculation errors) - */ - private handleError(error: ErrorEvent) { - if (this.pendingRequest) { - this.pendingRequest.reject(new Error(`Worker error: ${error.message || "Unknown error"}`)); - this.pendingRequest = null; - } - } - - /** - * Terminate the worker and clean up resources - */ - terminate() { - if (this.pendingRequest) { - this.pendingRequest.reject(new Error("Worker terminated")); - this.pendingRequest = null; - } - this.worker.terminate(); - this.tokenizerReadyListeners.clear(); - this.encodingListeners.clear(); - this.loadedEncodings.clear(); - this.tokenizerReady = false; - } - - private notifyTokenizerReady(): void { - this.tokenizerReady = true; - if (this.tokenizerReadyListeners.size === 0) { - return; - } - for (const listener of this.tokenizerReadyListeners) { - try { - listener(); - } catch (error) { - console.error("[TokenStatsWorker] Tokenizer ready listener threw", error); - } - } - } - - private notifyEncodingLoaded(encodingName: string): void { - assert( - typeof encodingName === "string" && encodingName.length > 0, - "Tokenizer encoding notifications require a non-empty encoding name" - ); - this.loadedEncodings.add(encodingName); - if (this.encodingListeners.size === 0) { - return; - } - for (const listener of this.encodingListeners) { - try { - listener(encodingName); - } catch (error) { - console.error( - `[TokenStatsWorker] Tokenizer encoding listener threw for '${encodingName}'`, - error - ); - } - } - } -} diff --git a/src/utils/tokens/tokenStats.worker.ts b/src/utils/tokens/tokenStats.worker.ts deleted file mode 100644 index 4be5e0b7a..000000000 --- a/src/utils/tokens/tokenStats.worker.ts +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Web Worker for calculating token statistics off the main thread - * This prevents UI blocking during expensive tokenization operations - */ - -import type { CmuxMessage } from "@/types/message"; -import type { ChatStats } from "@/types/chatStats"; -import { onTokenizerEncodingLoaded, onTokenizerModulesLoaded } from "@/utils/main/tokenizer"; -import { calculateTokenStats } from "./tokenStatsCalculator"; - -export interface WorkerRequest { - id: string; - messages: CmuxMessage[]; - model: string; -} - -export interface WorkerResponse { - id: string; - success: true; - stats: ChatStats; -} - -export interface WorkerError { - id: string; - success: false; - error: string; -} - -export type WorkerNotification = - | { type: "tokenizer-ready" } - | { type: "encoding-loaded"; encodingName: string }; - -// Handle incoming calculation requests -self.onmessage = (e: MessageEvent) => { - const { id, messages, model } = e.data; - - try { - const stats = calculateTokenStats(messages, model); - const response: WorkerResponse = { - id, - success: true, - stats, - }; - self.postMessage(response); - } catch (error) { - const errorResponse: WorkerError = { - id, - success: false, - error: error instanceof Error ? error.message : String(error), - }; - self.postMessage(errorResponse); - } -}; - -onTokenizerModulesLoaded(() => { - const notification: WorkerNotification = { type: "tokenizer-ready" }; - self.postMessage(notification); -}); - -onTokenizerEncodingLoaded((encodingName) => { - if (typeof encodingName !== "string" || encodingName.length === 0) { - throw new Error("Worker received invalid tokenizer encoding name"); - } - const notification: WorkerNotification = { - type: "encoding-loaded", - encodingName, - }; - self.postMessage(notification); -}); diff --git a/src/utils/tokens/tokenStatsCalculator.test.ts b/src/utils/tokens/tokenStatsCalculator.test.ts index 18b029ad8..563b9ce2e 100644 --- a/src/utils/tokens/tokenStatsCalculator.test.ts +++ b/src/utils/tokens/tokenStatsCalculator.test.ts @@ -1,6 +1,24 @@ -import { describe, test, expect } from "@jest/globals"; -import { createDisplayUsage } from "./tokenStatsCalculator"; +import { describe, test, expect, jest } from "@jest/globals"; + +// Mock the tokenizer module before importing tokenStatsCalculator +jest.mock("@/utils/main/tokenizer", () => ({ + getTokenizerForModel: jest.fn(), + countTokensForData: jest.fn(), + getToolDefinitionTokens: jest.fn(), +})); + +import { + createDisplayUsage, + extractToolOutputData, + isEncryptedWebSearch, + countEncryptedWebSearchTokens, + collectUniqueToolNames, + extractSyncMetadata, + mergeResults, + type TokenCountJob, +} from "./tokenStatsCalculator"; import type { LanguageModelV2Usage } from "@ai-sdk/provider"; +import type { CmuxMessage } from "@/types/message"; describe("createDisplayUsage", () => { test("uses usage.reasoningTokens when available", () => { @@ -106,3 +124,286 @@ describe("createDisplayUsage", () => { expect(result?.cacheCreate.tokens).toBe(50); // Anthropic metadata still works }); }); + +describe("extractToolOutputData", () => { + test("extracts value from nested structure", () => { + const output = { type: "json", value: { foo: "bar" } }; + expect(extractToolOutputData(output)).toEqual({ foo: "bar" }); + }); + + test("returns output as-is if not nested", () => { + const output = { foo: "bar" }; + expect(extractToolOutputData(output)).toEqual({ foo: "bar" }); + }); + + test("handles null", () => { + expect(extractToolOutputData(null)).toBeNull(); + }); + + test("handles primitives", () => { + expect(extractToolOutputData("string")).toBe("string"); + expect(extractToolOutputData(123)).toBe(123); + }); +}); + +describe("isEncryptedWebSearch", () => { + test("returns false for non-web_search tools", () => { + const data = [{ encryptedContent: "abc" }]; + expect(isEncryptedWebSearch("Read", data)).toBe(false); + }); + + test("returns false for non-array data", () => { + expect(isEncryptedWebSearch("web_search", { foo: "bar" })).toBe(false); + }); + + test("returns false for web_search without encrypted content", () => { + const data = [{ title: "foo", url: "bar" }]; + expect(isEncryptedWebSearch("web_search", data)).toBe(false); + }); + + test("returns true for web_search with encrypted content", () => { + const data = [{ encryptedContent: "abc123" }]; + expect(isEncryptedWebSearch("web_search", data)).toBe(true); + }); + + test("returns true if at least one item has encrypted content", () => { + const data = [{ title: "foo" }, { encryptedContent: "abc123" }]; + expect(isEncryptedWebSearch("web_search", data)).toBe(true); + }); +}); + +describe("countEncryptedWebSearchTokens", () => { + test("calculates tokens using heuristic", () => { + const data = [{ encryptedContent: "a".repeat(100) }]; + // 100 chars * 0.75 = 75 + expect(countEncryptedWebSearchTokens(data)).toBe(75); + }); + + test("handles multiple items", () => { + const data = [{ encryptedContent: "a".repeat(50) }, { encryptedContent: "b".repeat(50) }]; + // 100 chars * 0.75 = 75 + expect(countEncryptedWebSearchTokens(data)).toBe(75); + }); + + test("ignores items without encryptedContent", () => { + const data = [{ title: "foo" }, { encryptedContent: "a".repeat(100) }]; + // Only counts encrypted content: 100 chars * 0.75 = 75 + expect(countEncryptedWebSearchTokens(data)).toBe(75); + }); + + test("rounds up", () => { + const data = [{ encryptedContent: "abc" }]; + // 3 chars * 0.75 = 2.25, rounded up to 3 + expect(countEncryptedWebSearchTokens(data)).toBe(3); + }); +}); + +describe("collectUniqueToolNames", () => { + test("collects tool names from assistant messages", () => { + const messages: CmuxMessage[] = [ + { + id: "1", + role: "assistant", + parts: [ + { + type: "dynamic-tool", + toolName: "Read", + toolCallId: "1", + state: "input-available", + input: {}, + }, + { + type: "dynamic-tool", + toolName: "Bash", + toolCallId: "2", + state: "input-available", + input: {}, + }, + ], + }, + ]; + + const toolNames = collectUniqueToolNames(messages); + expect(toolNames.size).toBe(2); + expect(toolNames.has("Read")).toBe(true); + expect(toolNames.has("Bash")).toBe(true); + }); + + test("deduplicates tool names", () => { + const messages: CmuxMessage[] = [ + { + id: "1", + role: "assistant", + parts: [ + { + type: "dynamic-tool", + toolName: "Read", + toolCallId: "1", + state: "input-available", + input: {}, + }, + { + type: "dynamic-tool", + toolName: "Read", + toolCallId: "2", + state: "input-available", + input: {}, + }, + ], + }, + ]; + + const toolNames = collectUniqueToolNames(messages); + expect(toolNames.size).toBe(1); + expect(toolNames.has("Read")).toBe(true); + }); + + test("ignores user messages", () => { + const messages: CmuxMessage[] = [ + { + id: "1", + role: "user", + parts: [{ type: "text", text: "hello" }], + }, + ]; + + const toolNames = collectUniqueToolNames(messages); + expect(toolNames.size).toBe(0); + }); + + test("returns empty set for empty messages", () => { + const toolNames = collectUniqueToolNames([]); + expect(toolNames.size).toBe(0); + }); +}); + +describe("extractSyncMetadata", () => { + test("accumulates system message tokens", () => { + const messages: CmuxMessage[] = [ + { + id: "1", + role: "assistant", + parts: [], + metadata: { systemMessageTokens: 100 }, + }, + { + id: "2", + role: "assistant", + parts: [], + metadata: { systemMessageTokens: 200 }, + }, + ]; + + const result = extractSyncMetadata(messages, "anthropic:claude-opus-4-1"); + expect(result.systemMessageTokens).toBe(300); + }); + + test("extracts usage history", () => { + const messages: CmuxMessage[] = [ + { + id: "1", + role: "assistant", + parts: [], + metadata: { + usage: { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }, + model: "anthropic:claude-opus-4-1", + }, + }, + ]; + + const result = extractSyncMetadata(messages, "anthropic:claude-opus-4-1"); + expect(result.usageHistory.length).toBe(1); + expect(result.usageHistory[0].input.tokens).toBe(100); + expect(result.usageHistory[0].output.tokens).toBe(50); + }); + + test("ignores user messages", () => { + const messages: CmuxMessage[] = [ + { + id: "1", + role: "user", + parts: [{ type: "text", text: "hello" }], + }, + ]; + + const result = extractSyncMetadata(messages, "anthropic:claude-opus-4-1"); + expect(result.systemMessageTokens).toBe(0); + expect(result.usageHistory.length).toBe(0); + }); +}); + +describe("mergeResults", () => { + test("merges job results into consumer map", () => { + const jobs: TokenCountJob[] = [ + { consumer: "User", promise: Promise.resolve(100) }, + { consumer: "Assistant", promise: Promise.resolve(200) }, + ]; + const results = [100, 200]; + const toolDefinitions = new Map(); + const systemMessageTokens = 0; + + const consumerMap = mergeResults(jobs, results, toolDefinitions, systemMessageTokens); + + expect(consumerMap.get("User")).toEqual({ fixed: 0, variable: 100 }); + expect(consumerMap.get("Assistant")).toEqual({ fixed: 0, variable: 200 }); + }); + + test("accumulates tokens for same consumer", () => { + const jobs: TokenCountJob[] = [ + { consumer: "User", promise: Promise.resolve(100) }, + { consumer: "User", promise: Promise.resolve(50) }, + ]; + const results = [100, 50]; + const toolDefinitions = new Map(); + const systemMessageTokens = 0; + + const consumerMap = mergeResults(jobs, results, toolDefinitions, systemMessageTokens); + + expect(consumerMap.get("User")).toEqual({ fixed: 0, variable: 150 }); + }); + + test("adds tool definition tokens only once", () => { + const jobs: TokenCountJob[] = [ + { consumer: "Read", promise: Promise.resolve(100) }, + { consumer: "Read", promise: Promise.resolve(50) }, + ]; + const results = [100, 50]; + const toolDefinitions = new Map([["Read", 25]]); + const systemMessageTokens = 0; + + const consumerMap = mergeResults(jobs, results, toolDefinitions, systemMessageTokens); + + // Fixed tokens added only once, variable tokens accumulated + expect(consumerMap.get("Read")).toEqual({ fixed: 25, variable: 150 }); + }); + + test("adds system message tokens", () => { + const jobs: TokenCountJob[] = []; + const results: number[] = []; + const toolDefinitions = new Map(); + const systemMessageTokens = 300; + + const consumerMap = mergeResults(jobs, results, toolDefinitions, systemMessageTokens); + + expect(consumerMap.get("System")).toEqual({ fixed: 0, variable: 300 }); + }); + + test("skips zero token results", () => { + const jobs: TokenCountJob[] = [ + { consumer: "User", promise: Promise.resolve(0) }, + { consumer: "Assistant", promise: Promise.resolve(100) }, + ]; + const results = [0, 100]; + const toolDefinitions = new Map(); + const systemMessageTokens = 0; + + const consumerMap = mergeResults(jobs, results, toolDefinitions, systemMessageTokens); + + expect(consumerMap.has("User")).toBe(false); + expect(consumerMap.get("Assistant")).toEqual({ fixed: 0, variable: 100 }); + }); +}); diff --git a/src/utils/tokens/tokenStatsCalculator.ts b/src/utils/tokens/tokenStatsCalculator.ts index 3f1542507..cb34ad8a2 100644 --- a/src/utils/tokens/tokenStatsCalculator.ts +++ b/src/utils/tokens/tokenStatsCalculator.ts @@ -12,6 +12,7 @@ import { getTokenizerForModel, countTokensForData, getToolDefinitionTokens, + type Tokenizer, } from "@/utils/main/tokenizer"; import { createDisplayUsage } from "./displayUsage"; import type { ChatUsageDisplay } from "./usageAggregator"; @@ -20,53 +21,212 @@ import type { ChatUsageDisplay } from "./usageAggregator"; export { createDisplayUsage }; /** - * Calculate token statistics from raw CmuxMessages - * This is the single source of truth for token counting - * - * @param messages - Array of CmuxMessages from chat history - * @param model - Model string (e.g., "anthropic:claude-opus-4-1") - * @returns ChatStats with token breakdown by consumer and usage history + * Helper Functions for Token Counting + * (Exported for testing) */ -export function calculateTokenStats(messages: CmuxMessage[], model: string): ChatStats { - if (messages.length === 0) { - return { - consumers: [], - totalTokens: 0, - model, - tokenizerName: "No messages", - usageHistory: [], - }; + +/** + * Extracts the actual data from nested tool output structure + * Tool results have nested structure: { type: "json", value: {...} } + */ +export function extractToolOutputData(output: unknown): unknown { + if (typeof output === "object" && output !== null && "value" in output) { + return (output as { value: unknown }).value; } + return output; +} - performance.mark("calculateTokenStatsStart"); +/** + * Checks if the given data is encrypted web_search results + */ +export function isEncryptedWebSearch(toolName: string, data: unknown): boolean { + if (toolName !== "web_search" || !Array.isArray(data)) { + return false; + } - const tokenizer = getTokenizerForModel(model); - const consumerMap = new Map(); - const toolsWithDefinitions = new Set(); // Track which tools have definitions included - const usageHistory: ChatUsageDisplay[] = []; - let systemMessageTokens = 0; // Accumulate system message tokens across all requests + return data.some( + (item: unknown): item is { encryptedContent: string } => + item !== null && + typeof item === "object" && + "encryptedContent" in item && + typeof (item as Record).encryptedContent === "string" + ); +} + +/** + * Calculates tokens for encrypted web_search content using heuristic + * Encrypted content is base64 encoded and then encrypted/compressed + * Apply reduction factors: + * 1. Remove base64 overhead (multiply by 0.75) + * 2. Apply an estimated token reduction factor of 4 + */ +export function countEncryptedWebSearchTokens(data: unknown[]): number { + let encryptedChars = 0; + for (const item of data) { + if ( + item !== null && + typeof item === "object" && + "encryptedContent" in item && + typeof (item as Record).encryptedContent === "string" + ) { + encryptedChars += (item as { encryptedContent: string }).encryptedContent.length; + } + } + // Use heuristic: encrypted chars * 0.75 for token estimation + return Math.ceil(encryptedChars * 0.75); +} + +/** + * Counts tokens for tool output, handling special cases like encrypted web_search + */ +async function countToolOutputTokens( + part: { type: "dynamic-tool"; toolName: string; state: string; output?: unknown }, + tokenizer: Tokenizer +): Promise { + if (part.state !== "output-available" || !part.output) { + return 0; + } + + const outputData = extractToolOutputData(part.output); + + // Special handling for web_search encrypted content + if (isEncryptedWebSearch(part.toolName, outputData)) { + return countEncryptedWebSearchTokens(outputData as unknown[]); + } + + // Normal tool results + return countTokensForData(outputData, tokenizer); +} + +/** + * Represents a single token counting operation + */ +export interface TokenCountJob { + consumer: string; + promise: Promise; +} + +/** + * Creates all token counting jobs from messages + * Jobs are executed immediately (promises start running) + */ +function createTokenCountingJobs(messages: CmuxMessage[], tokenizer: Tokenizer): TokenCountJob[] { + const jobs: TokenCountJob[] = []; - // Calculate tokens by content producer (User, Assistant, individual tools) - // This shows what activities are consuming tokens, useful for debugging costs for (const message of messages) { if (message.role === "user") { - // User message text - let userTokens = 0; + // User message text - batch all text parts together + const textParts = message.parts.filter((p) => p.type === "text"); + if (textParts.length > 0) { + const allText = textParts.map((p) => p.text).join(""); + jobs.push({ + consumer: "User", + promise: tokenizer.countTokens(allText), + }); + } + } else if (message.role === "assistant") { + // Assistant text parts - batch together + const textParts = message.parts.filter((p) => p.type === "text"); + if (textParts.length > 0) { + const allText = textParts.map((p) => p.text).join(""); + jobs.push({ + consumer: "Assistant", + promise: tokenizer.countTokens(allText), + }); + } + + // Reasoning parts - batch together + const reasoningParts = message.parts.filter((p) => p.type === "reasoning"); + if (reasoningParts.length > 0) { + const allReasoning = reasoningParts.map((p) => p.text).join(""); + jobs.push({ + consumer: "Reasoning", + promise: tokenizer.countTokens(allReasoning), + }); + } + + // Tool parts - count arguments and results separately for (const part of message.parts) { - if (part.type === "text") { - userTokens += tokenizer.countTokens(part.text); + if (part.type === "dynamic-tool") { + // Tool arguments + jobs.push({ + consumer: part.toolName, + promise: countTokensForData(part.input, tokenizer), + }); + + // Tool results (if available) + jobs.push({ + consumer: part.toolName, + promise: countToolOutputTokens(part, tokenizer), + }); } } + } + } - const existing = consumerMap.get("User") ?? { fixed: 0, variable: 0 }; - consumerMap.set("User", { fixed: 0, variable: existing.variable + userTokens }); - } else if (message.role === "assistant") { - // Accumulate system message tokens from this request + return jobs; +} + +/** + * Collects all unique tool names from messages + */ +export function collectUniqueToolNames(messages: CmuxMessage[]): Set { + const toolNames = new Set(); + + for (const message of messages) { + if (message.role === "assistant") { + for (const part of message.parts) { + if (part.type === "dynamic-tool") { + toolNames.add(part.toolName); + } + } + } + } + + return toolNames; +} + +/** + * Fetches all tool definitions in parallel + * Returns a map of tool name to token count + */ +export async function fetchAllToolDefinitions( + toolNames: Set, + model: string +): Promise> { + const entries = await Promise.all( + Array.from(toolNames).map(async (toolName) => { + const tokens = await getToolDefinitionTokens(toolName, model); + return [toolName, tokens] as const; + }) + ); + + return new Map(entries); +} + +/** + * Metadata that doesn't require async token counting + */ +interface SyncMetadata { + systemMessageTokens: number; + usageHistory: ChatUsageDisplay[]; +} + +/** + * Extracts synchronous metadata from messages (no token counting needed) + */ +export function extractSyncMetadata(messages: CmuxMessage[], model: string): SyncMetadata { + let systemMessageTokens = 0; + const usageHistory: ChatUsageDisplay[] = []; + + for (const message of messages) { + if (message.role === "assistant") { + // Accumulate system message tokens if (message.metadata?.systemMessageTokens) { systemMessageTokens += message.metadata.systemMessageTokens; } - // Store usage in history for comparison with estimates + // Store usage history for comparison with estimates if (message.metadata?.usage) { const usage = createDisplayUsage( message.metadata.usage, @@ -77,105 +237,47 @@ export function calculateTokenStats(messages: CmuxMessage[], model: string): Cha usageHistory.push(usage); } } + } + } - // Count assistant text separately from tools - // IMPORTANT: Batch tokenization by type to avoid calling tokenizer for each tiny part - // (reasoning messages can have 600+ parts like "I", "'m", " thinking") + return { systemMessageTokens, usageHistory }; +} - // Group and concatenate parts by type - const textParts = message.parts.filter((p) => p.type === "text"); - const reasoningParts = message.parts.filter((p) => p.type === "reasoning"); +/** + * Merges token counting results into consumer map + * Adds tool definition tokens only once per tool + */ +export function mergeResults( + jobs: TokenCountJob[], + results: number[], + toolDefinitions: Map, + systemMessageTokens: number +): Map { + const consumerMap = new Map(); + const toolsWithDefinitions = new Set(); - // Tokenize text parts once (not per part!) - if (textParts.length > 0) { - const allText = textParts.map((p) => p.text).join(""); - const textTokens = tokenizer.countTokens(allText); - const existing = consumerMap.get("Assistant") ?? { fixed: 0, variable: 0 }; - consumerMap.set("Assistant", { fixed: 0, variable: existing.variable + textTokens }); - } + // Process all job results + for (let i = 0; i < jobs.length; i++) { + const job = jobs[i]; + const tokenCount = results[i]; - // Tokenize reasoning parts once (not per part!) - if (reasoningParts.length > 0) { - const allReasoning = reasoningParts.map((p) => p.text).join(""); - const reasoningTokens = tokenizer.countTokens(allReasoning); - const existing = consumerMap.get("Reasoning") ?? { fixed: 0, variable: 0 }; - consumerMap.set("Reasoning", { fixed: 0, variable: existing.variable + reasoningTokens }); - } + if (tokenCount === 0) { + continue; // Skip empty results + } - // Handle tool parts - for (const part of message.parts) { - if (part.type === "dynamic-tool") { - // Count tool arguments - const argsTokens = countTokensForData(part.input, tokenizer); - - // Count tool results if available - // Tool results have nested structure: { type: "json", value: {...} } - let resultTokens = 0; - if (part.state === "output-available" && part.output) { - // Extract the actual data from the nested output structure - const outputData = - typeof part.output === "object" && part.output !== null && "value" in part.output - ? part.output.value - : part.output; - - // Special handling for web_search encrypted content - if (part.toolName === "web_search" && Array.isArray(outputData)) { - // Check if this is encrypted web search results - const hasEncryptedContent = outputData.some( - (item: unknown): item is { encryptedContent: string } => - item !== null && - typeof item === "object" && - "encryptedContent" in item && - typeof (item as Record).encryptedContent === "string" - ); - - if (hasEncryptedContent) { - // Calculate tokens for encrypted content with heuristic - // Encrypted content is base64 encoded and then encrypted/compressed - // Apply reduction factors: - // 1. Remove base64 overhead (multiply by 0.75) - // 2. Apply an estimated token reduction factor of 4 - let encryptedChars = 0; - for (const item of outputData) { - if ( - item !== null && - typeof item === "object" && - "encryptedContent" in item && - typeof (item as Record).encryptedContent === "string" - ) { - encryptedChars += (item as { encryptedContent: string }).encryptedContent - .length; - } - } - // Use heuristic: encrypted chars / 40 for token estimation - resultTokens = Math.ceil(encryptedChars * 0.75); - } else { - // Normal web search results without encryption - resultTokens = countTokensForData(outputData, tokenizer); - } - } else { - // Normal tool results - resultTokens = countTokensForData(outputData, tokenizer); - } - } - - // Get existing or create new consumer for this tool - const existing = consumerMap.get(part.toolName) ?? { fixed: 0, variable: 0 }; - - // Add tool definition tokens if this is the first time we see this tool - let fixedTokens = existing.fixed; - if (!toolsWithDefinitions.has(part.toolName)) { - fixedTokens += getToolDefinitionTokens(part.toolName, model); - toolsWithDefinitions.add(part.toolName); - } - - // Add variable tokens (args + results) - const variableTokens = existing.variable + argsTokens + resultTokens; - - consumerMap.set(part.toolName, { fixed: fixedTokens, variable: variableTokens }); - } - } + const existing = consumerMap.get(job.consumer) ?? { fixed: 0, variable: 0 }; + + // Add tool definition tokens if this is the first time we see this tool + let fixedTokens = existing.fixed; + if (toolDefinitions.has(job.consumer) && !toolsWithDefinitions.has(job.consumer)) { + fixedTokens += toolDefinitions.get(job.consumer)!; + toolsWithDefinitions.add(job.consumer); } + + // Add variable tokens + const variableTokens = existing.variable + tokenCount; + + consumerMap.set(job.consumer, { fixed: fixedTokens, variable: variableTokens }); } // Add system message tokens as a consumer if present @@ -183,6 +285,51 @@ export function calculateTokenStats(messages: CmuxMessage[], model: string): Cha consumerMap.set("System", { fixed: 0, variable: systemMessageTokens }); } + return consumerMap; +} + +/** + * Calculate token statistics from raw CmuxMessages + * This is the single source of truth for token counting + * + * @param messages - Array of CmuxMessages from chat history + * @param model - Model string (e.g., "anthropic:claude-opus-4-1") + * @returns ChatStats with token breakdown by consumer and usage history + */ +export async function calculateTokenStats( + messages: CmuxMessage[], + model: string +): Promise { + if (messages.length === 0) { + return { + consumers: [], + totalTokens: 0, + model, + tokenizerName: "No messages", + usageHistory: [], + }; + } + + performance.mark("calculateTokenStatsStart"); + + const tokenizer = await getTokenizerForModel(model); + + // Phase 1: Fetch all tool definitions in parallel (first await point) + const toolNames = collectUniqueToolNames(messages); + const toolDefinitions = await fetchAllToolDefinitions(toolNames, model); + + // Phase 2: Extract sync metadata (no awaits) + const { systemMessageTokens, usageHistory } = extractSyncMetadata(messages, model); + + // Phase 3: Create all token counting jobs (promises start immediately) + const jobs = createTokenCountingJobs(messages, tokenizer); + + // Phase 4: Execute all jobs in parallel (second await point) + const results = await Promise.all(jobs.map((j) => j.promise)); + + // Phase 5: Merge results (no awaits) + const consumerMap = mergeResults(jobs, results, toolDefinitions, systemMessageTokens); + // Calculate total tokens const totalTokens = Array.from(consumerMap.values()).reduce( (sum, val) => sum + val.fixed + val.variable, diff --git a/tests/__mocks__/chalk.js b/tests/__mocks__/chalk.js index acf0a727d..4db122ac5 100644 --- a/tests/__mocks__/chalk.js +++ b/tests/__mocks__/chalk.js @@ -1,7 +1,7 @@ // Mock chalk for Jest (chalk is ESM-only and not needed in test output) -const chalk = new Proxy(() => '', { +const chalk = new Proxy(() => "", { get: () => chalk, - apply: (_target, _thisArg, args) => args[0] + apply: (_target, _thisArg, args) => args[0], }); module.exports = { default: chalk }; diff --git a/tests/ipcMain/forkWorkspace.test.ts b/tests/ipcMain/forkWorkspace.test.ts index f804d3335..efe2280bf 100644 --- a/tests/ipcMain/forkWorkspace.test.ts +++ b/tests/ipcMain/forkWorkspace.test.ts @@ -17,7 +17,6 @@ import { import { detectDefaultTrunkBranch } from "../../src/git"; import { HistoryService } from "../../src/services/historyService"; import { createCmuxMessage } from "../../src/types/message"; -import * as path from "path"; // Skip all tests if TEST_INTEGRATION is not set const describeIntegration = shouldRunIntegrationTests() ? describe : describe.skip; diff --git a/tests/ipcMain/helpers.ts b/tests/ipcMain/helpers.ts index e8467bae5..c1d3e69b0 100644 --- a/tests/ipcMain/helpers.ts +++ b/tests/ipcMain/helpers.ts @@ -8,7 +8,7 @@ import type { import { isInitStart, isInitOutput, isInitEnd } from "../../src/types/ipc"; import type { Result } from "../../src/types/result"; import type { SendMessageError } from "../../src/types/errors"; -import type { WorkspaceMetadataWithPaths } from "../../src/types/workspace"; +import type { FrontendWorkspaceMetadata } from "../../src/types/workspace"; import * as path from "path"; import * as os from "os"; import { detectDefaultTrunkBranch } from "../../src/git"; @@ -86,7 +86,7 @@ export async function createWorkspace( trunkBranch?: string, runtimeConfig?: import("../../src/types/runtime").RuntimeConfig ): Promise< - { success: true; metadata: WorkspaceMetadataWithPaths } | { success: false; error: string } + { success: true; metadata: FrontendWorkspaceMetadata } | { success: false; error: string } > { const resolvedTrunk = typeof trunkBranch === "string" && trunkBranch.trim().length > 0 @@ -99,7 +99,7 @@ export async function createWorkspace( branchName, resolvedTrunk, runtimeConfig - )) as { success: true; metadata: WorkspaceMetadataWithPaths } | { success: false; error: string }; + )) as { success: true; metadata: FrontendWorkspaceMetadata } | { success: false; error: string }; } /** @@ -204,7 +204,7 @@ export async function sendMessageAndWait( ); if (!result.success) { - throw new Error(`Failed to send message: ${result.error}`); + throw new Error(`Failed to send message: ${JSON.stringify(result, null, 2)}`); } // Wait for stream completion diff --git a/tests/ipcMain/openai-web-search.test.ts b/tests/ipcMain/openai-web-search.test.ts index 2670d1687..441692766 100644 --- a/tests/ipcMain/openai-web-search.test.ts +++ b/tests/ipcMain/openai-web-search.test.ts @@ -1,9 +1,4 @@ -import { - setupWorkspace, - shouldRunIntegrationTests, - validateApiKeys, - type TestEnvironment, -} from "./setup"; +import { setupWorkspace, shouldRunIntegrationTests, validateApiKeys } from "./setup"; import { sendMessageWithModel, createEventCollector, assertStreamSuccess } from "./helpers"; // Skip all tests if TEST_INTEGRATION is not set diff --git a/tests/ipcMain/resumeStream.test.ts b/tests/ipcMain/resumeStream.test.ts index fe693a893..9e03af9a4 100644 --- a/tests/ipcMain/resumeStream.test.ts +++ b/tests/ipcMain/resumeStream.test.ts @@ -1,9 +1,4 @@ -import { - setupWorkspace, - shouldRunIntegrationTests, - validateApiKeys, - type TestEnvironment, -} from "./setup"; +import { setupWorkspace, shouldRunIntegrationTests, validateApiKeys } from "./setup"; import { sendMessageWithModel, createEventCollector, waitFor } from "./helpers"; import { IPC_CHANNELS } from "../../src/constants/ipc-constants"; import type { Result } from "../../src/types/result"; diff --git a/tests/ipcMain/sendMessage.test.ts b/tests/ipcMain/sendMessage.test.ts index 5f0f2a9b3..2363c4bc9 100644 --- a/tests/ipcMain/sendMessage.test.ts +++ b/tests/ipcMain/sendMessage.test.ts @@ -5,7 +5,6 @@ import { setupWorkspaceWithoutProvider, shouldRunIntegrationTests, validateApiKeys, - type TestEnvironment, } from "./setup"; import { sendMessageWithModel, diff --git a/tests/runtime/runtime.test.ts b/tests/runtime/runtime.test.ts index 27e4ce020..910e80bfd 100644 --- a/tests/runtime/runtime.test.ts +++ b/tests/runtime/runtime.test.ts @@ -6,6 +6,7 @@ */ // Jest globals are available automatically - no need to import +import * as os from "os"; import * as path from "path"; import { shouldRunIntegrationTests } from "../testUtils"; import { @@ -53,7 +54,8 @@ describeIntegration("Runtime integration tests", () => { ({ type }) => { // Helper to create runtime for this test type // Use a base working directory - TestWorkspace will create subdirectories as needed - const getBaseWorkdir = () => (type === "ssh" ? sshConfig!.workdir : "/tmp"); + // For local runtime, use os.tmpdir() which matches where TestWorkspace creates directories + const getBaseWorkdir = () => (type === "ssh" ? sshConfig!.workdir : os.tmpdir()); const createRuntime = (): Runtime => createTestRuntime(type, getBaseWorkdir(), sshConfig); describe("exec() - Command execution", () => { diff --git a/tests/runtime/test-helpers.ts b/tests/runtime/test-helpers.ts index 3c54f096e..9d85239de 100644 --- a/tests/runtime/test-helpers.ts +++ b/tests/runtime/test-helpers.ts @@ -3,6 +3,7 @@ */ import * as fs from "fs/promises"; +import { realpathSync } from "fs"; import * as os from "os"; import * as path from "path"; import type { Runtime } from "@/runtime/Runtime"; @@ -25,7 +26,9 @@ export function createTestRuntime( ): Runtime { switch (type) { case "local": - return new LocalRuntime(workdir); + // Resolve symlinks (e.g., /tmp -> /private/tmp on macOS) to match git worktree paths + const resolvedWorkdir = realpathSync(workdir); + return new LocalRuntime(resolvedWorkdir); case "ssh": if (!sshConfig) { throw new Error("SSH config required for SSH runtime"); @@ -81,7 +84,9 @@ export class TestWorkspace { return new TestWorkspace(runtime, workspacePath, true); } else { // For local, use temp directory - const workspacePath = await fs.mkdtemp(path.join(os.tmpdir(), "runtime-test-")); + // Resolve symlinks (e.g., /tmp -> /private/tmp on macOS) to avoid git worktree path mismatches + const tempPath = await fs.mkdtemp(path.join(os.tmpdir(), "runtime-test-")); + const workspacePath = await fs.realpath(tempPath); return new TestWorkspace(runtime, workspacePath, false); } } diff --git a/tsconfig.main.json b/tsconfig.main.json index 064488c22..b63625bb8 100644 --- a/tsconfig.main.json +++ b/tsconfig.main.json @@ -12,6 +12,7 @@ "src/main-desktop.ts", "src/constants/**/*", "src/web/**/*", + "src/utils/main/**/*", "src/types/**/*.d.ts" ], "exclude": ["src/App.tsx", "src/main.tsx"] diff --git a/vite.config.ts b/vite.config.ts index c881b9388..26ef9f70e 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -30,9 +30,7 @@ const reactCompilerConfig = { }; // Babel plugins configuration (shared between dev and production) -const babelPlugins = [ - ["babel-plugin-react-compiler", reactCompilerConfig], -]; +const babelPlugins = [["babel-plugin-react-compiler", reactCompilerConfig]]; // Base plugins for both dev and production const basePlugins = [ @@ -81,7 +79,7 @@ export default defineConfig(({ mode }) => ({ }, worker: { format: "es", - plugins: [topLevelAwait()], + plugins: () => [topLevelAwait()], }, server: { host: devServerHost, // Configurable via CMUX_VITE_HOST (defaults to 127.0.0.1 for security)