Skip to content

Commit b9e03ff

Browse files
committed
feat: enhance AI extension with streaming support and new SDK features
- Added support for streaming chat events in the AI extension, including new `ChatStream` functionality in the LLM providers. - Introduced new structures for handling streaming events, such as `ChatStreamEvent` and `ClientStreamEvent`. - Enhanced the SDK with new components for managing agent workflows and improved error handling. - Updated the dashboard extension to include real-time metrics and UI improvements. - Cleaned up configuration options and improved documentation for better usability.
1 parent 62f9d4e commit b9e03ff

118 files changed

Lines changed: 37968 additions & 545 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.gitignore

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,16 @@ test-*
1818
*.a
1919
*.out
2020

21+
# Go compiled binaries (executables without extensions)
22+
/forge
23+
/forge-*
24+
cmd/*/main
25+
cmd/*/*
26+
!cmd/*/*.go
27+
!cmd/*/*.mod
28+
!cmd/*/*.sum
29+
!cmd/*/go.*
30+
2131
# Architecture file
2232
arch.md
2333

extensions/ai/extension.go

Lines changed: 30 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -14,29 +14,34 @@ import (
1414

1515
// Extension implements forge.Extension for AI.
1616
type Extension struct {
17-
config Config
18-
ai internal.AI
19-
llmManager *llm.LLMManager
20-
logger forge.Logger
21-
metrics forge.Metrics
22-
app forge.App
17+
config Config
18+
configProvided bool // Track if config was explicitly provided via options
19+
ai internal.AI
20+
llmManager *llm.LLMManager
21+
logger forge.Logger
22+
metrics forge.Metrics
23+
app forge.App
2324
}
2425

2526
// NewExtension creates a new AI extension with variadic options.
2627
func NewExtension(opts ...ConfigOption) *Extension {
2728
config := DefaultConfig()
29+
hasOpts := len(opts) > 0
2830
for _, opt := range opts {
2931
opt(&config)
3032
}
3133

3234
return &Extension{
33-
config: config,
35+
config: config,
36+
configProvided: hasOpts,
3437
}
3538
}
3639

3740
// NewExtensionWithConfig creates a new AI extension with a complete config.
3841
func NewExtensionWithConfig(config Config) *Extension {
39-
return NewExtension(WithConfig(config))
42+
ext := NewExtension(WithConfig(config))
43+
ext.configProvided = true // Mark as explicitly configured
44+
return ext
4045
}
4146

4247
// Name returns the extension name.
@@ -72,21 +77,28 @@ func (e *Extension) Register(app forge.App) error {
7277
e.metrics = m
7378
}
7479

75-
// Get configuration
76-
if configMgr, err := forge.GetConfigManager(app.Container()); err == nil {
77-
if err := configMgr.Bind("extensions.ai", &e.config); err != nil {
78-
if e.logger != nil {
79-
e.logger.Info("using default AI config", forge.F("reason", err.Error()))
80-
}
81-
82-
if err := configMgr.Bind("ai", &e.config); err != nil {
80+
// Get configuration from config manager only if not explicitly provided via options
81+
if !e.configProvided {
82+
if configMgr, err := forge.GetConfigManager(app.Container()); err == nil {
83+
if err := configMgr.Bind("extensions.ai", &e.config); err != nil {
8384
if e.logger != nil {
84-
e.logger.Info("using default AI config", forge.F("reason", err.Error()))
85+
e.logger.Debug("no extensions.ai config found", forge.F("reason", err.Error()))
8586
}
8687

87-
e.config = DefaultConfig()
88+
if err := configMgr.Bind("ai", &e.config); err != nil {
89+
if e.logger != nil {
90+
e.logger.Debug("no ai config found, using defaults", forge.F("reason", err.Error()))
91+
}
92+
// DefaultConfig already applied in NewExtension
93+
}
8894
}
8995
}
96+
} else {
97+
if e.logger != nil {
98+
e.logger.Debug("using programmatically provided AI config",
99+
forge.F("provider", e.config.LLM.DefaultProvider),
100+
forge.F("providers", len(e.config.LLM.Providers)))
101+
}
90102
}
91103

92104
// Convert to internal config

extensions/ai/llm/chat.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,14 @@ type ChatStreamEvent struct {
108108
Error string `json:"error,omitempty"`
109109
Metadata map[string]any `json:"metadata,omitempty"`
110110
RequestID string `json:"request_id"`
111+
112+
// Block-level streaming support (for providers like Anthropic)
113+
// BlockType indicates the type of content block (thinking, text, tool_use)
114+
BlockType string `json:"block_type,omitempty"`
115+
// BlockIndex is the index of the content block within the message
116+
BlockIndex int `json:"block_index,omitempty"`
117+
// BlockState indicates the state of the block (start, delta, stop)
118+
BlockState string `json:"block_state,omitempty"`
111119
}
112120

113121
// ChatStreamHandler handles streaming chat events.

extensions/ai/llm/manager.go

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -465,6 +465,75 @@ func (m *LLMManager) GetProviders() map[string]LLMProvider {
465465
return providers
466466
}
467467

468+
// ChatStream performs a streaming chat completion request.
469+
func (m *LLMManager) ChatStream(ctx context.Context, request ChatRequest, handler func(ChatStreamEvent) error) error {
470+
startTime := time.Now()
471+
472+
// Get provider
473+
providerName := request.Provider
474+
if providerName == "" {
475+
providerName = m.config.DefaultProvider
476+
}
477+
478+
provider, err := m.GetProvider(providerName)
479+
if err != nil {
480+
return err
481+
}
482+
483+
// Apply timeout
484+
if m.config.Timeout > 0 {
485+
var cancel context.CancelFunc
486+
ctx, cancel = context.WithTimeout(ctx, m.config.Timeout)
487+
defer cancel()
488+
}
489+
490+
// Check if provider supports streaming
491+
streamer, ok := provider.(StreamingProvider)
492+
if !ok {
493+
// Fallback: use regular Chat and simulate streaming
494+
// IMPORTANT: Set Stream to false for non-streaming providers to prevent
495+
// the provider from returning SSE-formatted data that can't be parsed
496+
request.Stream = false
497+
response, err := provider.Chat(ctx, request)
498+
if err != nil {
499+
return err
500+
}
501+
502+
// Simulate streaming by sending a single event with full content
503+
if len(response.Choices) > 0 {
504+
event := ChatStreamEvent{
505+
Type: "message",
506+
ID: response.ID,
507+
Model: response.Model,
508+
Provider: response.Provider,
509+
Choices: response.Choices,
510+
Usage: response.Usage,
511+
RequestID: response.RequestID,
512+
}
513+
if err := handler(event); err != nil {
514+
return err
515+
}
516+
}
517+
518+
// Send done event
519+
doneEvent := ChatStreamEvent{
520+
Type: "done",
521+
RequestID: request.RequestID,
522+
}
523+
return handler(doneEvent)
524+
}
525+
526+
// Use native streaming - set stream flag for streaming providers
527+
request.Stream = true
528+
err = streamer.ChatStream(ctx, request, handler)
529+
530+
// Update statistics
531+
latency := time.Since(startTime)
532+
m.updateStats(providerName, "chat_stream", latency, err, nil)
533+
534+
return err
535+
}
536+
468537
// HealthCheck performs a health check on all providers.
469538
func (m *LLMManager) HealthCheck(ctx context.Context) error {
470539
m.mu.RLock()

extensions/ai/llm/provider.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,13 @@ type LLMProvider interface {
2121
HealthCheck(ctx context.Context) error
2222
}
2323

24+
// StreamingProvider defines the interface for LLM providers that support streaming.
25+
type StreamingProvider interface {
26+
LLMProvider
27+
// ChatStream performs a streaming chat completion request
28+
ChatStream(ctx context.Context, request ChatRequest, handler func(ChatStreamEvent) error) error
29+
}
30+
2431
// LLMUsage represents usage statistics for an LLM provider.
2532
type LLMUsage struct {
2633
InputTokens int64 `json:"input_tokens"`

0 commit comments

Comments
 (0)