Skip to content

Commit 189f090

Browse files
jddunnclaude
andcommitted
feat: implement HybridUtilityAI (was empty placeholder)
Delegates to LLM-based or statistical implementations based on task type. LLM preferred for summarization/classification/keywords, statistical preferred for tokenization/stemming/similarity/readability. Falls back gracefully when one backend is unavailable. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 8764f53 commit 189f090

File tree

2 files changed

+170
-1
lines changed

2 files changed

+170
-1
lines changed
Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
/**
2+
* @fileoverview Hybrid IUtilityAI that delegates to LLM-based or statistical
3+
* implementations depending on the task. LLM methods are preferred for
4+
* summarization, classification, and keyword extraction; statistical methods
5+
* for tokenization, stemming, n-grams, readability, and similarity.
6+
*
7+
* Falls back gracefully: if one backend is unavailable, the other is tried.
8+
*/
9+
10+
import type {
11+
IUtilityAI,
12+
UtilityAIConfigBase,
13+
ParseJsonOptions,
14+
SummarizationOptions,
15+
ClassificationOptions,
16+
ClassificationResult,
17+
KeywordExtractionOptions,
18+
TokenizationOptions,
19+
StemmingOptions,
20+
SimilarityOptions,
21+
SentimentAnalysisOptions,
22+
SentimentResult,
23+
LanguageDetectionOptions,
24+
LanguageDetectionResult,
25+
TextNormalizationOptions,
26+
NGramOptions,
27+
ReadabilityOptions,
28+
ReadabilityResult,
29+
} from './IUtilityAI';
30+
31+
export interface HybridUtilityAIConfig extends UtilityAIConfigBase {
32+
/** LLM-based implementation (used for generative tasks). */
33+
llm?: IUtilityAI;
34+
/** Statistical/NLP implementation (used for deterministic tasks). */
35+
statistical?: IUtilityAI;
36+
}
37+
38+
/**
39+
* Routes each utility method to the most appropriate backend:
40+
* - **LLM**: summarization, classification, keyword extraction, JSON repair
41+
* - **Statistical**: tokenization, stemming, n-grams, readability, similarity
42+
* - **Either with preference**: sentiment, language detection
43+
*
44+
* If the preferred backend is unavailable, falls back to the other.
45+
*/
46+
export class HybridUtilityAI implements IUtilityAI {
47+
public readonly utilityId: string;
48+
private readonly llm: IUtilityAI | undefined;
49+
private readonly stat: IUtilityAI | undefined;
50+
51+
constructor(config: HybridUtilityAIConfig) {
52+
this.llm = config.llm;
53+
this.stat = config.statistical;
54+
if (!this.llm && !this.stat) {
55+
throw new Error('HybridUtilityAI requires at least one backend (llm or statistical)');
56+
}
57+
this.utilityId = config.utilityId ?? `hybrid-${this.llm?.utilityId ?? 'none'}-${this.stat?.utilityId ?? 'none'}`;
58+
}
59+
60+
async initialize(config: UtilityAIConfigBase & Record<string, any>): Promise<void> {
61+
await Promise.all([
62+
this.llm?.initialize?.(config),
63+
this.stat?.initialize?.(config),
64+
]);
65+
}
66+
67+
private preferLLM(): IUtilityAI {
68+
return this.llm ?? this.stat!;
69+
}
70+
71+
private preferStat(): IUtilityAI {
72+
return this.stat ?? this.llm!;
73+
}
74+
75+
// --- LLM-preferred methods ---
76+
77+
async summarize(textToSummarize: string, options?: SummarizationOptions): Promise<string> {
78+
return this.preferLLM().summarize(textToSummarize, options);
79+
}
80+
81+
async classifyText(textToClassify: string, options: ClassificationOptions): Promise<ClassificationResult> {
82+
return this.preferLLM().classifyText(textToClassify, options);
83+
}
84+
85+
async extractKeywords(textToAnalyze: string, options?: KeywordExtractionOptions): Promise<string[]> {
86+
return this.preferLLM().extractKeywords(textToAnalyze, options);
87+
}
88+
89+
async parseJsonSafe<T = any>(jsonString: string, options?: ParseJsonOptions<T>): Promise<T | null> {
90+
// Try statistical (fast parsing) first, fall back to LLM (repair)
91+
try {
92+
const result = await this.preferStat().parseJsonSafe<T>(jsonString, options);
93+
if (result !== null) return result;
94+
} catch { /* fall through */ }
95+
if (this.llm && this.stat) {
96+
return this.llm.parseJsonSafe<T>(jsonString, options);
97+
}
98+
return null;
99+
}
100+
101+
// --- Statistical-preferred methods ---
102+
103+
async tokenize(text: string, options?: TokenizationOptions): Promise<string[]> {
104+
return this.preferStat().tokenize(text, options);
105+
}
106+
107+
async stemTokens(tokens: string[], options?: StemmingOptions): Promise<string[]> {
108+
return this.preferStat().stemTokens(tokens, options);
109+
}
110+
111+
async normalizeText(text: string, options?: TextNormalizationOptions): Promise<string> {
112+
return this.preferStat().normalizeText(text, options);
113+
}
114+
115+
async generateNGrams(tokens: string[], options: NGramOptions): Promise<Record<number, string[][]>> {
116+
return this.preferStat().generateNGrams(tokens, options);
117+
}
118+
119+
async calculateReadability(text: string, options: ReadabilityOptions): Promise<ReadabilityResult> {
120+
return this.preferStat().calculateReadability(text, options);
121+
}
122+
123+
async calculateSimilarity(text1: string, text2: string, options?: SimilarityOptions): Promise<number> {
124+
return this.preferStat().calculateSimilarity(text1, text2, options);
125+
}
126+
127+
// --- Either with preference ---
128+
129+
async analyzeSentiment(text: string, options?: SentimentAnalysisOptions): Promise<SentimentResult> {
130+
return this.preferStat().analyzeSentiment(text, options);
131+
}
132+
133+
async detectLanguage(text: string, options?: LanguageDetectionOptions): Promise<LanguageDetectionResult[]> {
134+
return this.preferStat().detectLanguage(text, options);
135+
}
136+
137+
// --- Health & lifecycle ---
138+
139+
async checkHealth(): Promise<{ isHealthy: boolean; details?: any; dependencies?: Array<{ name: string; isHealthy: boolean; details?: any }> }> {
140+
const deps: Array<{ name: string; isHealthy: boolean; details?: any }> = [];
141+
if (this.llm) {
142+
try {
143+
const h = await this.llm.checkHealth();
144+
deps.push({ name: `llm:${this.llm.utilityId}`, ...h });
145+
} catch (e) {
146+
deps.push({ name: `llm:${this.llm.utilityId}`, isHealthy: false, details: (e as Error).message });
147+
}
148+
}
149+
if (this.stat) {
150+
try {
151+
const h = await this.stat.checkHealth();
152+
deps.push({ name: `stat:${this.stat.utilityId}`, ...h });
153+
} catch (e) {
154+
deps.push({ name: `stat:${this.stat.utilityId}`, isHealthy: false, details: (e as Error).message });
155+
}
156+
}
157+
return {
158+
isHealthy: deps.every((d) => d.isHealthy),
159+
dependencies: deps,
160+
};
161+
}
162+
163+
async shutdown(): Promise<void> {
164+
await Promise.all([
165+
this.llm?.shutdown?.(),
166+
this.stat?.shutdown?.(),
167+
]);
168+
}
169+
}

src/core/ai_utilities/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
export * from './IUtilityAI.js';
22
export * from './LLMUtilityAI.js';
33
export * from './StatisticalUtilityAI.js';
4-
// HybridUtilityAI.ts is currently empty — export when implemented
4+
export * from './HybridUtilityAI.js';

0 commit comments

Comments
 (0)