From 7d55d207df26df67688cfcd4c2d9efd8e6e2bfa6 Mon Sep 17 00:00:00 2001 From: Tom Aylott Date: Tue, 11 Nov 2025 17:34:43 -0500 Subject: [PATCH 1/4] Add basic Effect example extracted from wrapper repo --- typescript/effect-ai/src/basic/example.ts | 137 ++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 typescript/effect-ai/src/basic/example.ts diff --git a/typescript/effect-ai/src/basic/example.ts b/typescript/effect-ai/src/basic/example.ts new file mode 100644 index 0000000..45bb052 --- /dev/null +++ b/typescript/effect-ai/src/basic/example.ts @@ -0,0 +1,137 @@ +/** + * Example: Using OpenRouter with @effect/ai and @effect/ai-openrouter + * + * This example demonstrates idiomatic Effect patterns for AI interactions: + * - Effect.gen for generator-style effect composition + * - Layer-based dependency injection + * - Type-safe error handling with Effect + * - Streaming responses with Effect streams + */ + +import * as Chat from "@effect/ai/Chat" +import * as LanguageModel from "@effect/ai/LanguageModel" +import * as OpenRouterClient from "@effect/ai-openrouter/OpenRouterClient" +import * as OpenRouterLanguageModel from "@effect/ai-openrouter/OpenRouterLanguageModel" +import { FetchHttpClient } from "@effect/platform" +import * as BunContext from "@effect/platform-bun/BunContext" +import { Console, Effect, Layer, Redacted, Stream } from "effect" + +/** + * Main program using Effect.gen for composable effects + * + * Effect.gen is the idiomatic way to write Effect code - it provides + * a generator-based syntax similar to async/await but with full + * error handling and dependency injection capabilities + */ +const program = Effect.gen(function* () { + // Log separator for readability + yield* Console.log("\n=== Example 1: Simple Chat Completion ===\n") + + // Generate text using the language model + // The LanguageModel service is injected via the Effect context + const response = yield* LanguageModel.generateText({ + prompt: "Explain what Effect is in functional programming in 2 sentences.", + }) + + // Access the generated text from the response + yield* Console.log("Response:", response.text) + yield* Console.log("Finish reason:", response.finishReason) + yield* Console.log("Usage:", response.usage) + + // Example 2: Stateful conversation with Chat + yield* Console.log("\n=== Example 2: Stateful Chat Conversation ===\n") + + // Chat.empty creates a new chat session with empty history + // Chat maintains conversation context across multiple turns + const chat = yield* Chat.empty + + // First turn - the model responds to our greeting + const greeting = yield* chat.generateText({ + prompt: "Hi! I'm learning about Effect.", + }) + yield* Console.log("Assistant:", greeting.text) + + // Second turn - the model has context from the previous message + // This demonstrates how Chat maintains conversation state + const followUp = yield* chat.generateText({ + prompt: "What are the main benefits?", + }) + yield* Console.log("Assistant:", followUp.text) + + // Example 3: Streaming responses + yield* Console.log("\n=== Example 3: Streaming Text Generation ===\n") + + yield* Console.log("Streaming response:") + + // streamText returns a Stream of response parts + // Streams in Effect are lazy and composable + // Stream.runForEach processes each part as it arrives + yield* LanguageModel.streamText({ + prompt: "Count from 1 to 5, explaining each number briefly.", + }).pipe( + Stream.runForEach((part) => { + // Only print text deltas to show streaming effect + if (part.type === "text-delta") { + // TODO: print without newlines + return Console.log(part.delta) + } + // Log other part types for demonstration + return Console.log(`[${part.type}]`) + }) + ) + + yield* Console.log("\n=== All examples completed ===") +}) + +/** + * Layer composition for dependency injection + * + * Effect uses Layers to construct the dependency graph. + * Layers are composable and type-safe, ensuring all dependencies + * are satisfied at compile time. + */ + +// Create the OpenRouter HTTP client layer with API key +// Redacted.make ensures the API key is never accidentally logged +const OpenRouterClientLayer = OpenRouterClient.layer({ + apiKey: Redacted.make(process.env.OPENROUTER_API_KEY!), +}).pipe( + // Provide the Fetch HTTP client implementation + // Layer.provide composes layers, satisfying dependencies + Layer.provide(FetchHttpClient.layer) +) + +// Create the language model layer using OpenRouter +// This uses the "openai/gpt-4o-mini" model via OpenRouter +const OpenRouterModelLayer = OpenRouterLanguageModel.layer({ + model: "openai/gpt-4o-mini", + config: { + // Optional: configure model parameters + temperature: 0.7, + max_tokens: 500, + }, +}).pipe( + // The model layer depends on the OpenRouter client + Layer.provide(OpenRouterClientLayer) +) + +/** + * Run the program with dependency injection + * + * Effect.provide supplies all required dependencies (layers) to the program. + * The layers are constructed once and shared across the entire program. + * + * Effect.runPromise executes the Effect and returns a Promise. + * In production, you'd typically use Effect.runFork or other runners + * for better resource management. + */ +await program.pipe( + // Provide the language model layer (includes all dependencies) + Effect.provide(OpenRouterModelLayer), + // Provide the Bun runtime context for platform services + Effect.provide(BunContext.layer), + // Run the effect - returns a Promise + Effect.runPromise +) + +console.log("\n✓ Program completed successfully") From ca0b0e749b53f98593aa4dd12dbefafce8d936be Mon Sep 17 00:00:00 2001 From: Tom Aylott Date: Tue, 11 Nov 2025 18:18:02 -0500 Subject: [PATCH 2/4] Run biome format and fix import sorting --- typescript/effect-ai/src/basic/example.ts | 76 +++++++++++------------ 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/typescript/effect-ai/src/basic/example.ts b/typescript/effect-ai/src/basic/example.ts index 45bb052..78a04c6 100644 --- a/typescript/effect-ai/src/basic/example.ts +++ b/typescript/effect-ai/src/basic/example.ts @@ -8,13 +8,13 @@ * - Streaming responses with Effect streams */ -import * as Chat from "@effect/ai/Chat" -import * as LanguageModel from "@effect/ai/LanguageModel" -import * as OpenRouterClient from "@effect/ai-openrouter/OpenRouterClient" -import * as OpenRouterLanguageModel from "@effect/ai-openrouter/OpenRouterLanguageModel" -import { FetchHttpClient } from "@effect/platform" -import * as BunContext from "@effect/platform-bun/BunContext" -import { Console, Effect, Layer, Redacted, Stream } from "effect" +import * as OpenRouterClient from '@effect/ai-openrouter/OpenRouterClient'; +import * as OpenRouterLanguageModel from '@effect/ai-openrouter/OpenRouterLanguageModel'; +import * as Chat from '@effect/ai/Chat'; +import * as LanguageModel from '@effect/ai/LanguageModel'; +import { FetchHttpClient } from '@effect/platform'; +import * as BunContext from '@effect/platform-bun/BunContext'; +import { Console, Effect, Layer, Redacted, Stream } from 'effect'; /** * Main program using Effect.gen for composable effects @@ -25,63 +25,63 @@ import { Console, Effect, Layer, Redacted, Stream } from "effect" */ const program = Effect.gen(function* () { // Log separator for readability - yield* Console.log("\n=== Example 1: Simple Chat Completion ===\n") + yield* Console.log('\n=== Example 1: Simple Chat Completion ===\n'); // Generate text using the language model // The LanguageModel service is injected via the Effect context const response = yield* LanguageModel.generateText({ - prompt: "Explain what Effect is in functional programming in 2 sentences.", - }) + prompt: 'Explain what Effect is in functional programming in 2 sentences.', + }); // Access the generated text from the response - yield* Console.log("Response:", response.text) - yield* Console.log("Finish reason:", response.finishReason) - yield* Console.log("Usage:", response.usage) + yield* Console.log('Response:', response.text); + yield* Console.log('Finish reason:', response.finishReason); + yield* Console.log('Usage:', response.usage); // Example 2: Stateful conversation with Chat - yield* Console.log("\n=== Example 2: Stateful Chat Conversation ===\n") + yield* Console.log('\n=== Example 2: Stateful Chat Conversation ===\n'); // Chat.empty creates a new chat session with empty history // Chat maintains conversation context across multiple turns - const chat = yield* Chat.empty + const chat = yield* Chat.empty; // First turn - the model responds to our greeting const greeting = yield* chat.generateText({ prompt: "Hi! I'm learning about Effect.", - }) - yield* Console.log("Assistant:", greeting.text) + }); + yield* Console.log('Assistant:', greeting.text); // Second turn - the model has context from the previous message // This demonstrates how Chat maintains conversation state const followUp = yield* chat.generateText({ - prompt: "What are the main benefits?", - }) - yield* Console.log("Assistant:", followUp.text) + prompt: 'What are the main benefits?', + }); + yield* Console.log('Assistant:', followUp.text); // Example 3: Streaming responses - yield* Console.log("\n=== Example 3: Streaming Text Generation ===\n") + yield* Console.log('\n=== Example 3: Streaming Text Generation ===\n'); - yield* Console.log("Streaming response:") + yield* Console.log('Streaming response:'); // streamText returns a Stream of response parts // Streams in Effect are lazy and composable // Stream.runForEach processes each part as it arrives yield* LanguageModel.streamText({ - prompt: "Count from 1 to 5, explaining each number briefly.", + prompt: 'Count from 1 to 5, explaining each number briefly.', }).pipe( Stream.runForEach((part) => { // Only print text deltas to show streaming effect - if (part.type === "text-delta") { + if (part.type === 'text-delta') { // TODO: print without newlines - return Console.log(part.delta) + return Console.log(part.delta); } // Log other part types for demonstration - return Console.log(`[${part.type}]`) - }) - ) + return Console.log(`[${part.type}]`); + }), + ); - yield* Console.log("\n=== All examples completed ===") -}) + yield* Console.log('\n=== All examples completed ==='); +}); /** * Layer composition for dependency injection @@ -98,13 +98,13 @@ const OpenRouterClientLayer = OpenRouterClient.layer({ }).pipe( // Provide the Fetch HTTP client implementation // Layer.provide composes layers, satisfying dependencies - Layer.provide(FetchHttpClient.layer) -) + Layer.provide(FetchHttpClient.layer), +); // Create the language model layer using OpenRouter // This uses the "openai/gpt-4o-mini" model via OpenRouter const OpenRouterModelLayer = OpenRouterLanguageModel.layer({ - model: "openai/gpt-4o-mini", + model: 'openai/gpt-4o-mini', config: { // Optional: configure model parameters temperature: 0.7, @@ -112,8 +112,8 @@ const OpenRouterModelLayer = OpenRouterLanguageModel.layer({ }, }).pipe( // The model layer depends on the OpenRouter client - Layer.provide(OpenRouterClientLayer) -) + Layer.provide(OpenRouterClientLayer), +); /** * Run the program with dependency injection @@ -131,7 +131,7 @@ await program.pipe( // Provide the Bun runtime context for platform services Effect.provide(BunContext.layer), // Run the effect - returns a Promise - Effect.runPromise -) + Effect.runPromise, +); -console.log("\n✓ Program completed successfully") +console.log('\n✓ Program completed successfully'); From 42724046144c59081d66c6331f65f6fc879c2921 Mon Sep 17 00:00:00 2001 From: Tom Aylott Date: Wed, 12 Nov 2025 18:07:33 -0500 Subject: [PATCH 3/4] Remove TODO and use process.stdout.write for streaming without newlines --- typescript/effect-ai/src/basic/example.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/typescript/effect-ai/src/basic/example.ts b/typescript/effect-ai/src/basic/example.ts index 78a04c6..08c830a 100644 --- a/typescript/effect-ai/src/basic/example.ts +++ b/typescript/effect-ai/src/basic/example.ts @@ -72,7 +72,6 @@ const program = Effect.gen(function* () { Stream.runForEach((part) => { // Only print text deltas to show streaming effect if (part.type === 'text-delta') { - // TODO: print without newlines return Console.log(part.delta); } // Log other part types for demonstration From eec029a2a8a24dcd9c48d3a5213fce918b938696 Mon Sep 17 00:00:00 2001 From: Tom Aylott Date: Wed, 12 Nov 2025 18:08:56 -0500 Subject: [PATCH 4/4] Use process.stdout.write for streaming without newlines --- typescript/effect-ai/src/basic/example.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/typescript/effect-ai/src/basic/example.ts b/typescript/effect-ai/src/basic/example.ts index 08c830a..1ea44a3 100644 --- a/typescript/effect-ai/src/basic/example.ts +++ b/typescript/effect-ai/src/basic/example.ts @@ -72,7 +72,7 @@ const program = Effect.gen(function* () { Stream.runForEach((part) => { // Only print text deltas to show streaming effect if (part.type === 'text-delta') { - return Console.log(part.delta); + return Effect.sync(() => process.stdout.write(part.delta)); } // Log other part types for demonstration return Console.log(`[${part.type}]`);