Skip to content

Commit 2a8acfc

Browse files
committed
fix: remove unused import
1 parent 989657d commit 2a8acfc

File tree

6 files changed

+37
-37
lines changed

6 files changed

+37
-37
lines changed

scripts/check_bundle_size.sh

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@ set -euo pipefail
55
cd "$(dirname "$0")/.."
66

77
# Budgets (in bytes)
8-
MAX_INDEX_GZIP=409600 # 400KB gzipped
9-
MAX_WORKER_SIZE=10240 # 10KB uncompressed
8+
MAX_INDEX_GZIP=409600 # 400KB gzipped
9+
MAX_WORKER_SIZE=10240 # 10KB uncompressed
1010

1111
echo "Checking bundle size budgets..."
1212

@@ -30,18 +30,18 @@ INDEX_SIZE_KB=$((INDEX_SIZE / 1024))
3030
MAX_INDEX_KB=$((MAX_INDEX_GZIP / 1024))
3131

3232
echo "Main bundle (gzipped): ${INDEX_SIZE_KB}KB (budget: ${MAX_INDEX_KB}KB)"
33-
if (( INDEX_SIZE > MAX_INDEX_GZIP )); then
33+
if ((INDEX_SIZE > MAX_INDEX_GZIP)); then
3434
echo "❌ Main bundle exceeds budget by $((INDEX_SIZE - MAX_INDEX_GZIP)) bytes" >&2
3535
exit 1
3636
fi
3737

3838
# Check worker uncompressed size
39-
WORKER_SIZE=$(wc -c < "$WORKER_FILE" | tr -d ' ')
39+
WORKER_SIZE=$(wc -c <"$WORKER_FILE" | tr -d ' ')
4040
WORKER_SIZE_KB=$((WORKER_SIZE / 1024))
4141
MAX_WORKER_KB=$((MAX_WORKER_SIZE / 1024))
4242

4343
echo "Worker bundle: ${WORKER_SIZE_KB}KB (budget: ${MAX_WORKER_KB}KB)"
44-
if (( WORKER_SIZE > MAX_WORKER_SIZE )); then
44+
if ((WORKER_SIZE > MAX_WORKER_SIZE)); then
4545
echo "❌ Worker bundle exceeds budget by $((WORKER_SIZE - MAX_WORKER_SIZE)) bytes" >&2
4646
echo " This likely means models.json or ai-tokenizer leaked into the worker" >&2
4747
exit 1

scripts/check_eager_imports.sh

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env bash
22
# Detects eager imports of heavy packages in startup-critical and renderer/worker files
3-
#
3+
#
44
# Main process: AI SDK packages must be lazy-loaded to maintain fast startup (<4s)
55
# Renderer/Worker: Large data files (models.json) and ai-tokenizer must never be imported
66

@@ -105,18 +105,18 @@ done
105105
# Check bundled worker if it exists
106106
if [ -f dist/tokenStats.worker-*.js ]; then
107107
WORKER_FILE=$(find dist -name 'tokenStats.worker-*.js' | head -1)
108-
WORKER_SIZE=$(wc -c < "$WORKER_FILE" | tr -d ' ')
109-
108+
WORKER_SIZE=$(wc -c <"$WORKER_FILE" | tr -d ' ')
109+
110110
echo "==> Checking worker bundle for heavy imports..."
111-
111+
112112
# If worker is suspiciously large (>50KB), likely has models.json or ai-tokenizer
113-
if (( WORKER_SIZE > 51200 )); then
113+
if ((WORKER_SIZE > 51200)); then
114114
echo "❌ WORKER TOO LARGE: $WORKER_FILE is ${WORKER_SIZE} bytes (>50KB)"
115115
echo " This suggests models.json (701KB) or ai-tokenizer leaked in"
116-
116+
117117
# Try to identify what's in there
118-
if grep -q "models.json" "$WORKER_FILE" 2>/dev/null || \
119-
strings "$WORKER_FILE" 2>/dev/null | grep -q "anthropic\|openai" | head -10; then
118+
if grep -q "models.json" "$WORKER_FILE" 2>/dev/null \
119+
|| strings "$WORKER_FILE" 2>/dev/null | grep -q "anthropic\|openai" | head -10; then
120120
echo " Found model names in bundle - likely models.json"
121121
fi
122122
failed=1

src/utils/main/StreamingTokenTracker.test.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,30 +8,30 @@ import { StreamingTokenTracker } from "./StreamingTokenTracker";
88
describe("StreamingTokenTracker", () => {
99
it("should reinitialize tokenizer when model changes", () => {
1010
const tracker = new StreamingTokenTracker();
11-
11+
1212
// Set first model
1313
tracker.setModel("openai:gpt-4");
1414
const count1 = tracker.countTokens("test");
15-
15+
1616
// Switch to different model
1717
tracker.setModel("anthropic:claude-opus-4");
1818
const count2 = tracker.countTokens("test");
19-
19+
2020
// Both should return valid counts
2121
expect(count1).toBeGreaterThan(0);
2222
expect(count2).toBeGreaterThan(0);
2323
});
24-
24+
2525
it("should not reinitialize when model stays the same", () => {
2626
const tracker = new StreamingTokenTracker();
27-
27+
2828
// Set model twice
2929
tracker.setModel("openai:gpt-4");
3030
const count1 = tracker.countTokens("test");
31-
32-
tracker.setModel("openai:gpt-4"); // Same model
31+
32+
tracker.setModel("openai:gpt-4"); // Same model
3333
const count2 = tracker.countTokens("test");
34-
34+
3535
// Should get same count (cached)
3636
expect(count1).toBe(count2);
3737
});

src/utils/main/StreamingTokenTracker.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ export class StreamingTokenTracker {
1717
/**
1818
* Initialize tokenizer for the current model
1919
* Should be called when model changes or on first stream
20-
*
20+
*
2121
* IMPORTANT: Reinitializes tokenizer when model changes to ensure correct encoding.
2222
* getTokenizerForModel() closes over the model string, so we must create a new
2323
* tokenizer instance when switching models.

src/utils/main/tokenizer.test.ts

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,51 +2,51 @@
22
* Tests for tokenizer cache behavior
33
*/
44

5-
import { describe, it, expect, beforeEach } from "@jest/globals";
5+
import { describe, it, expect } from "@jest/globals";
66
import { getTokenizerForModel } from "./tokenizer";
77

88
describe("tokenizer cache", () => {
99
const testText = "Hello, world!";
10-
10+
1111
it("should use different cache keys for different models", () => {
1212
// Get tokenizers for different models
1313
const gpt4Tokenizer = getTokenizerForModel("openai:gpt-4");
1414
const claudeTokenizer = getTokenizerForModel("anthropic:claude-opus-4");
15-
15+
1616
// Count tokens with first model
1717
const gpt4Count = gpt4Tokenizer.countTokens(testText);
18-
18+
1919
// Count tokens with second model
2020
const claudeCount = claudeTokenizer.countTokens(testText);
21-
21+
2222
// Counts may differ because different encodings
2323
// This test mainly ensures no crash and cache isolation
2424
expect(typeof gpt4Count).toBe("number");
2525
expect(typeof claudeCount).toBe("number");
2626
expect(gpt4Count).toBeGreaterThan(0);
2727
expect(claudeCount).toBeGreaterThan(0);
2828
});
29-
29+
3030
it("should return same count for same (model, text) pair from cache", () => {
3131
const tokenizer = getTokenizerForModel("openai:gpt-4");
32-
32+
3333
// First call
3434
const count1 = tokenizer.countTokens(testText);
35-
35+
3636
// Second call should hit cache
3737
const count2 = tokenizer.countTokens(testText);
38-
38+
3939
expect(count1).toBe(count2);
4040
});
41-
41+
4242
it("should normalize model keys for cache consistency", () => {
4343
// These should map to the same cache key
4444
const tokenizer1 = getTokenizerForModel("anthropic:claude-opus-4");
4545
const tokenizer2 = getTokenizerForModel("anthropic/claude-opus-4");
46-
46+
4747
const count1 = tokenizer1.countTokens(testText);
4848
const count2 = tokenizer2.countTokens(testText);
49-
49+
5050
// Should get same count since they normalize to same model
5151
expect(count1).toBe(count2);
5252
});

src/utils/main/tokenizer.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,10 +68,10 @@ export async function loadTokenizerModules(): Promise<void> {
6868
/**
6969
* LRU cache for token counts by (model, text) pairs
7070
* Avoids re-tokenizing identical strings with the same encoding
71-
*
71+
*
7272
* Key: CRC32 checksum of "model:text" to ensure counts are model-specific
7373
* Value: token count
74-
*
74+
*
7575
* IMPORTANT: Cache key includes model because different encodings produce different counts.
7676
* For async tokenization (approx → exact), the key stays stable so exact overwrites approx.
7777
*/
@@ -89,7 +89,7 @@ const tokenCountCache = new LRUCache<number, number>({
8989
* Avoids re-tokenizing identical strings (system messages, tool definitions, etc.)
9090
*
9191
* Cache key includes model to prevent cross-model count reuse.
92-
*
92+
*
9393
* NOTE: For async tokenization, this returns an approximation immediately and caches
9494
* the accurate count in the background. Subsequent calls with the same (model, text) pair
9595
* will use the cached accurate count once ready.

0 commit comments

Comments
 (0)