From 4f5044cc1e73afab0a9b28e1fa09faaa66990c47 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:21:31 -0400 Subject: [PATCH 01/21] docs: add spec for data transformation CEL functions (#14) Co-Authored-By: Claude Opus 4.6 (1M context) --- .../2026-03-24-data-transformation-design.md | 174 ++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 docs/superpowers/specs/2026-03-24-data-transformation-design.md diff --git a/docs/superpowers/specs/2026-03-24-data-transformation-design.md b/docs/superpowers/specs/2026-03-24-data-transformation-design.md new file mode 100644 index 0000000..f679f84 --- /dev/null +++ b/docs/superpowers/specs/2026-03-24-data-transformation-design.md @@ -0,0 +1,174 @@ +# Data Transformation — CEL Functions & Documentation + +**Date:** 2026-03-24 +**Issue:** [#14 — Data Transformation Step](https://github.com/dvflw/mantle/issues/14) +**Status:** Draft + +## Problem + +Mantle workflows can pass data between steps via CEL expressions, but lack the tools to reshape that data. The common pattern — fetch from API, normalize for a DB schema, store — requires either manual field-by-field construction (not possible in CEL today) or routing through the AI connector (slow, expensive, non-deterministic for structural transforms). + +## Discovery: Existing Hidden Capabilities + +CEL's default environment includes macros that already work in Mantle but were never documented or tested: + +- `.map(item, expr)` — transform each element in a list +- `.filter(item, expr)` — keep elements matching a predicate +- `.exists(item, expr)` — true if any element matches +- `.all(item, expr)` — true if all elements match +- `.exists_one(item, expr)` — true if exactly one matches + +These need documentation and tests, not implementation. + +## Design + +### Custom CEL Functions + +All functions registered in `internal/cel/functions.go` via `cel.Function()` options passed to `cel.NewEnv()`. Pure functions, no side effects. + +#### String Functions (methods on string type) + +| Function | Example | Result | +|----------|---------|--------| +| `toLower()` | `"HELLO".toLower()` | `"hello"` | +| `toUpper()` | `"hello".toUpper()` | `"HELLO"` | +| `trim()` | `" hello ".trim()` | `"hello"` | +| `replace(old, new)` | `"foo-bar".replace("-", "_")` | `"foo_bar"` | +| `split(delim)` | `"a,b,c".split(",")` | `["a", "b", "c"]` | + +#### Type Coercion (global functions) + +| Function | Example | Result | +|----------|---------|--------| +| `parseInt(string)` | `parseInt("42")` | `42` | +| `parseFloat(string)` | `parseFloat("3.14")` | `3.14` | +| `toString(any)` | `toString(42)` | `"42"` | + +#### Object Construction (global function) + +| Function | Example | Result | +|----------|---------|--------| +| `obj(k1, v1, k2, v2, ...)` | `obj("name", "alice", "age", 30)` | `{"name": "alice", "age": 30}` | + +Errors on odd number of args or non-string keys. Enables building maps for DB inserts and API payloads. + +#### Null Coalescing (global function) + +| Function | Example | Result | +|----------|---------|--------| +| `default(value, fallback)` | `default(steps.x.output.json.name, "unknown")` | value if non-null, else `"unknown"` | + +#### JSON (global functions) + +| Function | Example | Result | +|----------|---------|--------| +| `jsonEncode(value)` | `jsonEncode(obj("a", 1))` | `'{"a":1}'` | +| `jsonDecode(string)` | `jsonDecode('{"a":1}')` | `{"a": 1}` | + +#### Date/Time (global functions) + +| Function | Example | Result | +|----------|---------|--------| +| `timestamp(string)` | `timestamp("2026-03-24T19:00:00Z")` | timestamp value | +| `formatTimestamp(ts, layout)` | `formatTimestamp(ts, "2006-01-02")` | `"2026-03-24"` | + +Uses Go time layout strings. + +#### Collections (global function) + +| Function | Example | Result | +|----------|---------|--------| +| `flatten(list)` | `flatten([[1,2],[3,4]])` | `[1,2,3,4]` | + +### Integration Point + +In `internal/cel/cel.go`, the `NewEvaluator` function passes function options to `cel.NewEnv()`: + +```go +func NewEvaluator() (*Evaluator, error) { + env, err := cel.NewEnv( + cel.Variable("steps", cel.MapType(cel.StringType, cel.DynType)), + cel.Variable("inputs", cel.MapType(cel.StringType, cel.DynType)), + cel.Variable("env", cel.MapType(cel.StringType, cel.StringType)), + cel.Variable("trigger", cel.MapType(cel.StringType, cel.DynType)), + // Custom functions + customFunctions()..., + ) + // ... +} +``` + +`customFunctions()` is defined in `functions.go` and returns `[]cel.EnvOption`. + +### Error Handling + +All errors surface through the existing `Eval` error path: +- Type mismatches: `parseInt("abc")` → evaluation error +- `obj()` with odd args → evaluation error +- `obj()` with non-string keys → evaluation error +- `jsonDecode()` with invalid JSON → evaluation error +- `timestamp()` with unparseable string → evaluation error + +No new error types needed. + +## Documentation + +### CEL Expressions Reference Update + +Update `site/src/content/docs/concepts/expressions.md` to add: +- All custom functions organized by category +- The already-working macros (`.map()`, `.filter()`, `.exists()`, `.all()`, `.exists_one()`) +- Examples for each function + +### New: Data Transformations Guide + +New page at `site/src/content/docs/getting-started/data-transformations.md` covering three patterns: + +**Pattern 1 — Structural transforms (CEL only):** +API result → `.map()` + `obj()` → Postgres INSERT. No AI needed. For when the transform is a known schema mapping. + +**Pattern 2 — AI-powered transforms:** +Unstructured data → AI connector with `output_schema` → structured output. For when the transform requires interpretation, classification, or natural language understanding. + +**Pattern 3 — Hybrid:** +Fetch → CEL for structural normalization → AI for enrichment/classification → Store. Combines both approaches. + +Each pattern includes a complete example workflow YAML. + +### New Example Workflows + +- `examples/data-transform-api-to-db.yaml` — Fetch API → CEL `.map()` + `obj()` → Postgres INSERT (the exact use case from the issue) +- `examples/ai-data-enrichment.yaml` — Fetch data → AI classify/enrich with structured output → store + +## Files Changed + +### Modified + +| File | Change | +|------|--------| +| `internal/cel/cel.go` | Pass `customFunctions()` options to `cel.NewEnv()` | +| `site/src/content/docs/concepts/expressions.md` | Add function reference, document macros | + +### New + +| File | Purpose | +|------|---------| +| `internal/cel/functions.go` | All custom function definitions | +| `internal/cel/functions_test.go` | Table-driven tests for every custom function | +| `internal/cel/macros_test.go` | Tests for built-in macros (lock in existing behavior) | +| `site/src/content/docs/getting-started/data-transformations.md` | Transformation patterns guide | +| `examples/data-transform-api-to-db.yaml` | Structural transform example workflow | +| `examples/ai-data-enrichment.yaml` | AI transform example workflow | + +## Non-Goals + +- **Custom user-defined functions** — no plugin/extension API for CEL functions +- **Loops or control flow** — CEL is intentionally non-Turing-complete +- **Regex** — deferring to a future issue; CEL's `matches()` function could be enabled later +- **New connector type** — transformations happen in CEL expressions, not as a separate step type + +## Testing Strategy + +- **`functions_test.go`** — table-driven: each function gets happy path + error cases (wrong types, empty inputs, edge cases) +- **`macros_test.go`** — tests for `.map()`, `.filter()`, `.exists()`, `.all()`, `.exists_one()` with list data to lock in behavior +- **Existing tests unaffected** — custom functions are additive; no behavior changes to existing expressions From a07483a240a57770667cef25df8695be9ae296f8 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:29:50 -0400 Subject: [PATCH 02/21] docs: add implementation plan for data transformation CEL functions (#14) 12-task plan: macro tests, string/type/collection/JSON/time functions, obj() construction, docs updates, and example workflows. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plans/2026-03-24-data-transformation.md | 1383 +++++++++++++++++ 1 file changed, 1383 insertions(+) create mode 100644 docs/superpowers/plans/2026-03-24-data-transformation.md diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md new file mode 100644 index 0000000..90e0bc4 --- /dev/null +++ b/docs/superpowers/plans/2026-03-24-data-transformation.md @@ -0,0 +1,1383 @@ +# Data Transformation CEL Functions Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Add custom CEL functions (string, type coercion, object construction, JSON, date/time, collections, null coalescing) and comprehensive documentation for data transformation patterns. + +**Architecture:** Custom functions are registered as `cel.Function` options in `cel.NewEnv()`. A new `functions.go` file defines all functions; `cel.go` is modified only to pass them through. Documentation covers both the new functions and the already-working-but-undocumented macros (`.map()`, `.filter()`, etc.). + +**Tech Stack:** Go, cel-go v0.27.0 (`cel.Function`, `cel.Overload`, `cel.UnaryBinding`/`cel.BinaryBinding`/`cel.FunctionBinding`), `encoding/json`, `time` + +**Spec:** `docs/superpowers/specs/2026-03-24-data-transformation-design.md` + +--- + +### Task 1: Test and document built-in macros + +**Files:** +- Create: `internal/cel/macros_test.go` + +These macros already work but have no tests. This task locks in their behavior. + +- [ ] **Step 1: Write tests for built-in macros** + +In `internal/cel/macros_test.go`: + +```go +package cel + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newListContext() *Context { + return &Context{ + Steps: map[string]map[string]any{ + "fetch": { + "output": map[string]any{ + "items": []any{ + map[string]any{"name": "alice", "age": int64(30)}, + map[string]any{"name": "bob", "age": int64(17)}, + map[string]any{"name": "charlie", "age": int64(25)}, + }, + }, + }, + }, + Inputs: map[string]any{}, + } +} + +func TestMacro_Map(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.map(item, item.name)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, []any{"alice", "bob", "charlie"}, result) +} + +func TestMacro_Filter(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age >= 21)`, newListContext()) + require.NoError(t, err) + + items, ok := result.([]any) + require.True(t, ok) + assert.Len(t, items, 2) +} + +func TestMacro_Exists(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.exists(item, item.name == "bob")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, true, result) + + result, err = eval.Eval(`steps.fetch.output.items.exists(item, item.name == "dave")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, false, result) +} + +func TestMacro_All(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.all(item, item.age > 0)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, true, result) + + result, err = eval.Eval(`steps.fetch.output.items.all(item, item.age >= 21)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, false, result) +} + +func TestMacro_ExistsOne(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.exists_one(item, item.name == "alice")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, true, result) +} + +func TestMacro_MapAndFilter_Chained(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age >= 21).map(item, item.name)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, []any{"alice", "charlie"}, result) +} +``` + +- [ ] **Step 2: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestMacro_" -v` +Expected: PASS — all macros already work, we're just adding coverage + +- [ ] **Step 3: Commit** + +```bash +git add internal/cel/macros_test.go +git commit -m "test(cel): add coverage for built-in map/filter/exists/all macros" +``` + +--- + +### Task 2: String functions — toLower, toUpper, trim + +**Files:** +- Create: `internal/cel/functions.go` +- Create: `internal/cel/functions_test.go` +- Modify: `internal/cel/cel.go:30-36` + +- [ ] **Step 1: Write failing tests** + +In `internal/cel/functions_test.go`: + +```go +package cel + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFunc_ToLower(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + }{ + {"basic", `"HELLO".toLower()`, "hello"}, + {"mixed", `"Hello World".toLower()`, "hello world"}, + {"already_lower", `"hello".toLower()`, "hello"}, + {"empty", `"".toLower()`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_ToUpper(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + }{ + {"basic", `"hello".toUpper()`, "HELLO"}, + {"mixed", `"Hello World".toUpper()`, "HELLO WORLD"}, + {"empty", `"".toUpper()`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_Trim(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + }{ + {"spaces", `" hello ".trim()`, "hello"}, + {"tabs", "\"\\thello\\t\".trim()", "hello"}, + {"no_whitespace", `"hello".trim()`, "hello"}, + {"empty", `"".trim()`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run "TestFunc_ToLower|TestFunc_ToUpper|TestFunc_Trim" -v` +Expected: FAIL — functions not registered + +- [ ] **Step 3: Create functions.go with string functions and wire into cel.go** + +In `internal/cel/functions.go`: + +```go +package cel + +import ( + "strings" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// customFunctions returns all custom CEL function options for the Mantle environment. +func customFunctions() []cel.EnvOption { + return []cel.EnvOption{ + stringFunctions(), + } +} + +func stringFunctions() cel.EnvOption { + return cel.Lib(&stringLib{}) +} + +type stringLib struct{} + +func (l *stringLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("toLower", + cel.MemberOverload("string_toLower", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(strings.ToLower(string(val.(types.String)))) + }), + ), + ), + cel.Function("toUpper", + cel.MemberOverload("string_toUpper", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(strings.ToUpper(string(val.(types.String)))) + }), + ), + ), + cel.Function("trim", + cel.MemberOverload("string_trim", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(strings.TrimSpace(string(val.(types.String)))) + }), + ), + ), + } +} + +func (l *stringLib) ProgramOptions() []cel.ProgramOption { + return nil +} +``` + +In `internal/cel/cel.go`, update `NewEvaluator` (lines 30-36) to include custom functions: + +```go +func NewEvaluator() (*Evaluator, error) { + opts := []cel.EnvOption{ + cel.Variable("steps", cel.MapType(cel.StringType, cel.DynType)), + cel.Variable("inputs", cel.MapType(cel.StringType, cel.DynType)), + cel.Variable("env", cel.MapType(cel.StringType, cel.StringType)), + cel.Variable("trigger", cel.MapType(cel.StringType, cel.DynType)), + } + opts = append(opts, customFunctions()...) + + env, err := cel.NewEnv(opts...) + if err != nil { + return nil, fmt.Errorf("creating CEL environment: %w", err) + } + return &Evaluator{env: env, envCache: envVars()}, nil +} +``` + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestFunc_ToLower|TestFunc_ToUpper|TestFunc_Trim" -v` +Expected: PASS + +- [ ] **Step 5: Run all existing CEL tests to verify no regression** + +Run: `go test ./internal/cel/ -v` +Expected: PASS — all existing tests still pass + +- [ ] **Step 6: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go internal/cel/cel.go +git commit -m "feat(cel): add toLower, toUpper, trim string functions" +``` + +--- + +### Task 3: String functions — replace and split + +**Files:** +- Modify: `internal/cel/functions.go` +- Modify: `internal/cel/functions_test.go` + +- [ ] **Step 1: Write failing tests** + +Append to `internal/cel/functions_test.go`: + +```go +func TestFunc_Replace(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + }{ + {"basic", `"foo-bar".replace("-", "_")`, "foo_bar"}, + {"multiple", `"a.b.c".replace(".", "/")`, "a/b/c"}, + {"no_match", `"hello".replace("x", "y")`, "hello"}, + {"empty_replacement", `"hello".replace("l", "")`, "heo"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_Split(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + }{ + {"comma", `"a,b,c".split(",")`, []any{"a", "b", "c"}}, + {"space", `"hello world".split(" ")`, []any{"hello", "world"}}, + {"no_match", `"hello".split(",")`, []any{"hello"}}, + {"empty_string", `"".split(",")`, []any{""}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run "TestFunc_Replace|TestFunc_Split" -v` +Expected: FAIL + +- [ ] **Step 3: Add replace and split to stringLib.CompileOptions** + +In `functions.go`, add to `stringLib.CompileOptions()`: + +```go + cel.Function("replace", + cel.MemberOverload("string_replace", + []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, + cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := string(args[0].(types.String)) + old := string(args[1].(types.String)) + new := string(args[2].(types.String)) + return types.String(strings.ReplaceAll(s, old, new)) + }), + ), + ), + cel.Function("split", + cel.MemberOverload("string_split", + []*cel.Type{cel.StringType, cel.StringType}, + cel.ListType(cel.StringType), + cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { + s := string(lhs.(types.String)) + sep := string(rhs.(types.String)) + parts := strings.Split(s, sep) + result := make([]ref.Val, len(parts)) + for i, p := range parts { + result[i] = types.String(p) + } + return types.DefaultTypeAdapter.NativeToValue(parts) + }), + ), + ), +``` + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestFunc_Replace|TestFunc_Split" -v` +Expected: PASS + +- [ ] **Step 5: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go +git commit -m "feat(cel): add replace and split string functions" +``` + +--- + +### Task 4: Type coercion — parseInt, parseFloat, toString + +**Files:** +- Modify: `internal/cel/functions.go` +- Modify: `internal/cel/functions_test.go` + +- [ ] **Step 1: Write failing tests** + +Append to `internal/cel/functions_test.go`: + +```go +func TestFunc_ParseInt(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + wantErr bool + }{ + {"valid", `parseInt("42")`, int64(42), false}, + {"negative", `parseInt("-7")`, int64(-7), false}, + {"zero", `parseInt("0")`, int64(0), false}, + {"invalid", `parseInt("abc")`, nil, true}, + {"float_string", `parseInt("3.14")`, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } + }) + } +} + +func TestFunc_ParseFloat(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + wantErr bool + }{ + {"valid", `parseFloat("3.14")`, 3.14, false}, + {"integer", `parseFloat("42")`, 42.0, false}, + {"negative", `parseFloat("-1.5")`, -1.5, false}, + {"invalid", `parseFloat("abc")`, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } + }) + } +} + +func TestFunc_ToString(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + }{ + {"int", `toString(42)`, "42"}, + {"bool", `toString(true)`, "true"}, + {"string", `toString("hello")`, "hello"}, + {"float", `toString(3.14)`, "3.14"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run "TestFunc_ParseInt|TestFunc_ParseFloat|TestFunc_ToString" -v` +Expected: FAIL + +- [ ] **Step 3: Add type coercion functions** + +In `functions.go`, add a new library and register it in `customFunctions()`: + +```go +func typeFunctions() cel.EnvOption { + return cel.Lib(&typeLib{}) +} + +type typeLib struct{} + +func (l *typeLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("parseInt", + cel.Overload("parseInt_string", + []*cel.Type{cel.StringType}, + cel.IntType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return types.NewErr("parseInt: %v", err) + } + return types.Int(n) + }), + ), + ), + cel.Function("parseFloat", + cel.Overload("parseFloat_string", + []*cel.Type{cel.StringType}, + cel.DoubleType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return types.NewErr("parseFloat: %v", err) + } + return types.Double(f) + }), + ), + ), + cel.Function("toString", + cel.Overload("toString_any", + []*cel.Type{cel.DynType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(fmt.Sprintf("%v", val.Value())) + }), + ), + ), + } +} + +func (l *typeLib) ProgramOptions() []cel.ProgramOption { + return nil +} +``` + +Add `"fmt"` and `"strconv"` to imports. Update `customFunctions()`: + +```go +func customFunctions() []cel.EnvOption { + return []cel.EnvOption{ + stringFunctions(), + typeFunctions(), + } +} +``` + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestFunc_ParseInt|TestFunc_ParseFloat|TestFunc_ToString" -v` +Expected: PASS + +- [ ] **Step 5: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go +git commit -m "feat(cel): add parseInt, parseFloat, toString type coercion functions" +``` + +--- + +### Task 5: Object construction — obj() + +**Files:** +- Modify: `internal/cel/functions.go` +- Modify: `internal/cel/functions_test.go` + +- [ ] **Step 1: Write failing tests** + +Append to `internal/cel/functions_test.go`: + +```go +func TestFunc_Obj(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + want any + wantErr bool + }{ + { + "basic", + `obj("name", "alice", "age", 30)`, + map[string]any{"name": "alice", "age": int64(30)}, + false, + }, + { + "single_pair", + `obj("key", "value")`, + map[string]any{"key": "value"}, + false, + }, + { + "nested_with_step", + `obj("status", steps.fetch.output.status)`, + map[string]any{"status": int64(200)}, + false, + }, + { + "odd_args", + `obj("key")`, + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } + }) + } +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run TestFunc_Obj -v` +Expected: FAIL + +- [ ] **Step 3: Add obj function** + +In `functions.go`, add: + +```go +func collectionFunctions() cel.EnvOption { + return cel.Lib(&collectionLib{}) +} + +type collectionLib struct{} + +func (l *collectionLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("obj", + cel.Overload("obj_kvpairs", + nil, // variadic — accepts any number of args + cel.DynType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + if len(args)%2 != 0 { + return types.NewErr("obj: requires even number of arguments (key-value pairs), got %d", len(args)) + } + m := make(map[string]any, len(args)/2) + for i := 0; i < len(args); i += 2 { + key, ok := args[i].(types.String) + if !ok { + return types.NewErr("obj: key at position %d must be a string, got %s", i, args[i].Type()) + } + m[string(key)] = refToNative(args[i+1]) + } + return types.DefaultTypeAdapter.NativeToValue(m) + }), + ), + ), + } +} + +func (l *collectionLib) ProgramOptions() []cel.ProgramOption { + return nil +} +``` + +Update `customFunctions()`: + +```go +func customFunctions() []cel.EnvOption { + return []cel.EnvOption{ + stringFunctions(), + typeFunctions(), + collectionFunctions(), + } +} +``` + +Note: `refToNative` is defined in `cel.go` and accessible since both files are in the same package. + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run TestFunc_Obj -v` +Expected: PASS + +- [ ] **Step 5: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go +git commit -m "feat(cel): add obj() map construction function" +``` + +--- + +### Task 6: Utility functions — default, flatten + +**Files:** +- Modify: `internal/cel/functions.go` +- Modify: `internal/cel/functions_test.go` + +- [ ] **Step 1: Write failing tests** + +Append to `internal/cel/functions_test.go`: + +```go +func TestFunc_Default(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + // Test with a value that exists. + result, err := eval.Eval(`default(inputs.url, "fallback")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "https://example.com", result) + + // Test with a missing key — CEL map access on missing key errors, + // so default should catch that. We test with a direct null/0 fallback. + result, err = eval.Eval(`default("", "fallback")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "", result) // empty string is not null, returns as-is +} + +func TestFunc_Flatten(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + ctx := &Context{ + Steps: map[string]map[string]any{ + "data": { + "output": map[string]any{ + "nested": []any{ + []any{int64(1), int64(2)}, + []any{int64(3), int64(4)}, + }, + }, + }, + }, + Inputs: map[string]any{}, + } + + result, err := eval.Eval(`flatten(steps.data.output.nested)`, ctx) + require.NoError(t, err) + assert.Equal(t, []any{int64(1), int64(2), int64(3), int64(4)}, result) +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run "TestFunc_Default|TestFunc_Flatten" -v` +Expected: FAIL + +- [ ] **Step 3: Add default and flatten to collectionLib** + +In `functions.go`, add to `collectionLib.CompileOptions()`: + +```go + cel.Function("default", + cel.Overload("default_any_any", + []*cel.Type{cel.DynType, cel.DynType}, + cel.DynType, + cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { + if types.IsError(lhs) || types.IsUnknown(lhs) { + return rhs + } + return lhs + }), + ), + ), + cel.Function("flatten", + cel.Overload("flatten_list", + []*cel.Type{cel.ListType(cel.DynType)}, + cel.ListType(cel.DynType), + cel.UnaryBinding(func(val ref.Val) ref.Val { + list := val.(traits.Lister) + var result []ref.Val + it := list.Iterator() + for it.HasNext() == types.True { + item := it.Next() + if sub, ok := item.(traits.Lister); ok { + subIt := sub.Iterator() + for subIt.HasNext() == types.True { + result = append(result, subIt.Next()) + } + } else { + result = append(result, item) + } + } + return types.DefaultTypeAdapter.NativeToValue(nativeSlice(result)) + }), + ), + ), +``` + +Add a helper function in `functions.go`: + +```go +func nativeSlice(vals []ref.Val) []any { + result := make([]any, len(vals)) + for i, v := range vals { + result[i] = refToNative(v) + } + return result +} +``` + +Add `"github.com/google/cel-go/common/types/traits"` to imports. + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestFunc_Default|TestFunc_Flatten" -v` +Expected: PASS + +- [ ] **Step 5: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go +git commit -m "feat(cel): add default() null coalescing and flatten() functions" +``` + +--- + +### Task 7: JSON functions — jsonEncode, jsonDecode + +**Files:** +- Modify: `internal/cel/functions.go` +- Modify: `internal/cel/functions_test.go` + +- [ ] **Step 1: Write failing tests** + +Append to `internal/cel/functions_test.go`: + +```go +func TestFunc_JsonEncode(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonEncode(obj("name", "alice", "age", 30))`, newTestContext()) + require.NoError(t, err) + + // JSON key order may vary, so parse and compare. + var parsed map[string]any + require.NoError(t, json.Unmarshal([]byte(result.(string)), &parsed)) + assert.Equal(t, "alice", parsed["name"]) + assert.Equal(t, float64(30), parsed["age"]) // JSON numbers decode as float64 +} + +func TestFunc_JsonDecode(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + wantErr bool + }{ + {"object", `jsonDecode("{\"name\":\"alice\"}")`, false}, + {"array", `jsonDecode("[1,2,3]")`, false}, + {"invalid", `jsonDecode("not json")`, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} +``` + +Add `"encoding/json"` to test file imports. + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run "TestFunc_Json" -v` +Expected: FAIL + +- [ ] **Step 3: Add JSON functions** + +In `functions.go`, add a new library: + +```go +func jsonFunctions() cel.EnvOption { + return cel.Lib(&jsonLib{}) +} + +type jsonLib struct{} + +func (l *jsonLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("jsonEncode", + cel.Overload("jsonEncode_any", + []*cel.Type{cel.DynType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + native := refToNative(val) + b, err := json.Marshal(native) + if err != nil { + return types.NewErr("jsonEncode: %v", err) + } + return types.String(string(b)) + }), + ), + ), + cel.Function("jsonDecode", + cel.Overload("jsonDecode_string", + []*cel.Type{cel.StringType}, + cel.DynType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + var result any + if err := json.Unmarshal([]byte(s), &result); err != nil { + return types.NewErr("jsonDecode: %v", err) + } + return types.DefaultTypeAdapter.NativeToValue(result) + }), + ), + ), + } +} + +func (l *jsonLib) ProgramOptions() []cel.ProgramOption { + return nil +} +``` + +Add `"encoding/json"` to `functions.go` imports. Update `customFunctions()`: + +```go +func customFunctions() []cel.EnvOption { + return []cel.EnvOption{ + stringFunctions(), + typeFunctions(), + collectionFunctions(), + jsonFunctions(), + } +} +``` + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestFunc_Json" -v` +Expected: PASS + +- [ ] **Step 5: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go +git commit -m "feat(cel): add jsonEncode and jsonDecode functions" +``` + +--- + +### Task 8: Date/time functions — timestamp, formatTimestamp + +**Files:** +- Modify: `internal/cel/functions.go` +- Modify: `internal/cel/functions_test.go` + +- [ ] **Step 1: Write failing tests** + +Append to `internal/cel/functions_test.go`: + +```go +func TestFunc_Timestamp(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + tests := []struct { + name string + expr string + wantErr bool + }{ + {"iso8601", `timestamp("2026-03-24T19:00:00Z")`, false}, + {"with_offset", `timestamp("2026-03-24T14:00:00-05:00")`, false}, + {"invalid", `timestamp("not a date")`, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestFunc_FormatTimestamp(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`formatTimestamp(timestamp("2026-03-24T19:00:00Z"), "2006-01-02")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "2026-03-24", result) + + result, err = eval.Eval(`formatTimestamp(timestamp("2026-03-24T19:30:45Z"), "15:04:05")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "19:30:45", result) +} +``` + +- [ ] **Step 2: Run tests to verify they fail** + +Run: `go test ./internal/cel/ -run "TestFunc_Timestamp|TestFunc_FormatTimestamp" -v` +Expected: FAIL + +- [ ] **Step 3: Add date/time functions** + +In `functions.go`, add: + +```go +func timeFunctions() cel.EnvOption { + return cel.Lib(&timeLib{}) +} + +type timeLib struct{} + +func (l *timeLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("timestamp", + cel.Overload("timestamp_string", + []*cel.Type{cel.StringType}, + cel.TimestampType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return types.NewErr("timestamp: %v", err) + } + return types.Timestamp{Time: t} + }), + ), + ), + cel.Function("formatTimestamp", + cel.Overload("formatTimestamp_timestamp_string", + []*cel.Type{cel.TimestampType, cel.StringType}, + cel.StringType, + cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { + ts := lhs.(types.Timestamp) + layout := string(rhs.(types.String)) + return types.String(ts.Time.Format(layout)) + }), + ), + ), + } +} + +func (l *timeLib) ProgramOptions() []cel.ProgramOption { + return nil +} +``` + +Add `"time"` to imports. Update `customFunctions()`: + +```go +func customFunctions() []cel.EnvOption { + return []cel.EnvOption{ + stringFunctions(), + typeFunctions(), + collectionFunctions(), + jsonFunctions(), + timeFunctions(), + } +} +``` + +- [ ] **Step 4: Run tests to verify they pass** + +Run: `go test ./internal/cel/ -run "TestFunc_Timestamp|TestFunc_FormatTimestamp" -v` +Expected: PASS + +- [ ] **Step 5: Run full test suite** + +Run: `go test ./internal/cel/ -v` +Expected: PASS — all tests including existing ones + +- [ ] **Step 6: Commit** + +```bash +git add internal/cel/functions.go internal/cel/functions_test.go +git commit -m "feat(cel): add timestamp and formatTimestamp date/time functions" +``` + +--- + +### Task 9: Update CEL expressions documentation + +**Files:** +- Modify: `site/src/content/docs/concepts/expressions.md` + +This task is delegated to the technical writer agent. + +- [ ] **Step 1: Read the current expressions.md** + +Read: `site/src/content/docs/concepts/expressions.md` + +- [ ] **Step 2: Add new sections for custom functions and macros** + +After the existing content, add sections covering: + +**Built-in List Macros:** +- `.map(item, expr)` — transform each element +- `.filter(item, expr)` — keep matching elements +- `.exists(item, expr)` — true if any match +- `.all(item, expr)` — true if all match +- `.exists_one(item, expr)` — true if exactly one matches +- Chaining example: `.filter(...).map(...)` + +**String Functions:** +- `toLower()`, `toUpper()`, `trim()`, `replace(old, new)`, `split(delim)` + +**Type Coercion:** +- `parseInt(string)`, `parseFloat(string)`, `toString(any)` + +**Object Construction:** +- `obj(key, value, ...)` with usage examples for building params maps + +**Utility Functions:** +- `default(value, fallback)` +- `flatten(list)` + +**JSON Functions:** +- `jsonEncode(value)`, `jsonDecode(string)` + +**Date/Time Functions:** +- `timestamp(string)`, `formatTimestamp(ts, layout)` with Go layout reference + +Each function should have a brief description and a YAML example showing usage in a workflow step. + +- [ ] **Step 3: Verify site builds** + +Run: `cd site && npm run build` +Expected: success + +- [ ] **Step 4: Commit** + +```bash +git add site/src/content/docs/concepts/expressions.md +git commit -m "docs: add custom CEL functions and macros to expressions reference (#14)" +``` + +--- + +### Task 10: Create data transformations guide + +**Files:** +- Create: `site/src/content/docs/getting-started/data-transformations.md` + +This task is delegated to the technical writer agent. + +- [ ] **Step 1: Create the guide** + +Write `site/src/content/docs/getting-started/data-transformations.md` covering three patterns: + +**Pattern 1 — Structural transforms (CEL only):** +Complete workflow example: fetch user list from API → `.map()` + `obj()` to reshape each record → Postgres INSERT. Show the full YAML with CEL expressions in params. + +**Pattern 2 — AI-powered transforms:** +Complete workflow example: fetch raw text/HTML → AI connector with `output_schema` to extract structured data → store results. Explain when to use AI vs CEL (interpretation vs reshaping). + +**Pattern 3 — Hybrid:** +Complete workflow example: fetch data → CEL for field extraction and normalization → AI for classification/enrichment → Postgres store. Show how to combine both approaches. + +Include a decision guide: "Use CEL when the mapping is known and structural. Use AI when the transform requires interpretation, classification, or natural language understanding." + +- [ ] **Step 2: Verify site builds** + +Run: `cd site && npm run build` +Expected: success + +- [ ] **Step 3: Commit** + +```bash +git add site/src/content/docs/getting-started/data-transformations.md +git commit -m "docs: add data transformation patterns guide (#14)" +``` + +--- + +### Task 11: Create example workflows + +**Files:** +- Create: `examples/data-transform-api-to-db.yaml` +- Create: `examples/ai-data-enrichment.yaml` + +- [ ] **Step 1: Create structural transform example** + +In `examples/data-transform-api-to-db.yaml`: + +```yaml +name: data-transform-api-to-db +description: > + Fetches user data from an API, transforms each record using CEL + expressions to match a database schema, and inserts the normalized + records into Postgres. Demonstrates map(), obj(), toLower(), and + type coercion without requiring an AI model. + +steps: + - name: fetch-users + action: http/request + timeout: "15s" + params: + method: GET + url: "https://jsonplaceholder.typicode.com/users" + headers: + Accept: "application/json" + + - name: store-users + action: postgres/query + credential: app-db + params: + query: "INSERT INTO users (username, email, city) VALUES ($1, $2, $3)" + params: "{{ steps['fetch-users'].output.json.map(u, [u.username.toLower(), u.email.toLower(), u.address.city]) }}" +``` + +- [ ] **Step 2: Create AI enrichment example** + +In `examples/ai-data-enrichment.yaml`: + +```yaml +name: ai-data-enrichment +description: > + Fetches support tickets, uses an AI model to classify priority and + extract key entities, then stores the enriched data. Demonstrates + using AI for transforms that require interpretation rather than + simple structural mapping. + +inputs: + ticket_api_url: + type: string + description: URL to fetch support tickets from + +steps: + - name: fetch-tickets + action: http/request + timeout: "15s" + params: + method: GET + url: "{{ inputs.ticket_api_url }}" + headers: + Accept: "application/json" + + - name: classify + action: ai/completion + credential: openai + timeout: "60s" + params: + model: gpt-4o + system_prompt: > + You are a support ticket classifier. Given a ticket, determine + the priority (critical, high, medium, low), category, and extract + any mentioned product names or error codes. + prompt: "Classify this ticket: {{ steps['fetch-tickets'].output.body }}" + output_schema: + type: object + properties: + priority: + type: string + enum: [critical, high, medium, low] + category: + type: string + products: + type: array + items: + type: string + error_codes: + type: array + items: + type: string + required: [priority, category, products, error_codes] + additionalProperties: false + + - name: store-enriched + action: postgres/query + credential: app-db + if: "steps.classify.output.json.priority == 'critical' || steps.classify.output.json.priority == 'high'" + params: + query: > + INSERT INTO urgent_tickets (priority, category, products, raw_body) + VALUES ($1, $2, $3, $4) + params: + - "{{ steps.classify.output.json.priority }}" + - "{{ steps.classify.output.json.category }}" + - "{{ jsonEncode(steps.classify.output.json.products) }}" + - "{{ steps['fetch-tickets'].output.body }}" +``` + +- [ ] **Step 3: Commit** + +```bash +git add examples/data-transform-api-to-db.yaml examples/ai-data-enrichment.yaml +git commit -m "feat: add data transformation and AI enrichment example workflows (#14)" +``` + +--- + +### Task 12: Final validation + +- [ ] **Step 1: Run full test suite** + +Run: `go test ./internal/cel/ -v` +Expected: PASS — all function tests, macro tests, and existing tests + +- [ ] **Step 2: Run go vet** + +Run: `go vet ./internal/cel/` +Expected: clean + +- [ ] **Step 3: Verify site builds** + +Run: `cd site && npm run build` +Expected: success + +- [ ] **Step 4: Run full project test suite** + +Run: `go test ./... -short` +Expected: PASS From d1b4959174fc197390e42eb1b2683e5e49b14f35 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:35:20 -0400 Subject: [PATCH 03/21] test(cel): add coverage for built-in map/filter/exists/all macros --- internal/cel/macros_test.go | 90 +++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 internal/cel/macros_test.go diff --git a/internal/cel/macros_test.go b/internal/cel/macros_test.go new file mode 100644 index 0000000..beb855e --- /dev/null +++ b/internal/cel/macros_test.go @@ -0,0 +1,90 @@ +package cel + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newListContext() *Context { + return &Context{ + Steps: map[string]map[string]any{ + "fetch": { + "output": map[string]any{ + "items": []any{ + map[string]any{"name": "alice", "age": int64(30)}, + map[string]any{"name": "bob", "age": int64(17)}, + map[string]any{"name": "charlie", "age": int64(25)}, + }, + }, + }, + }, + Inputs: map[string]any{}, + } +} + +func TestMacro_Map(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.map(item, item.name)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, []any{"alice", "bob", "charlie"}, result) +} + +func TestMacro_Filter(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age >= 21)`, newListContext()) + require.NoError(t, err) + + items, ok := result.([]any) + require.True(t, ok) + assert.Len(t, items, 2) +} + +func TestMacro_Exists(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.exists(item, item.name == "bob")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, true, result) + + result, err = eval.Eval(`steps.fetch.output.items.exists(item, item.name == "dave")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, false, result) +} + +func TestMacro_All(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.all(item, item.age > 0)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, true, result) + + result, err = eval.Eval(`steps.fetch.output.items.all(item, item.age >= 21)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, false, result) +} + +func TestMacro_ExistsOne(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.exists_one(item, item.name == "alice")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, true, result) +} + +func TestMacro_MapAndFilter_Chained(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age >= 21).map(item, item.name)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, []any{"alice", "charlie"}, result) +} From c1b7879014357f9ead9e71176d38b84a23a4b69b Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:36:48 -0400 Subject: [PATCH 04/21] feat(cel): add toLower, toUpper, trim string functions Registers custom string member overloads via cel.Lib and wires them into NewEvaluator through a central customFunctions() registration point. Co-Authored-By: Claude Sonnet 4.6 --- internal/cel/cel.go | 7 +++- internal/cel/functions.go | 58 +++++++++++++++++++++++++++ internal/cel/functions_test.go | 73 ++++++++++++++++++++++++++++++++++ 3 files changed, 136 insertions(+), 2 deletions(-) create mode 100644 internal/cel/functions.go create mode 100644 internal/cel/functions_test.go diff --git a/internal/cel/cel.go b/internal/cel/cel.go index 525fab0..f54f3b4 100644 --- a/internal/cel/cel.go +++ b/internal/cel/cel.go @@ -28,12 +28,15 @@ type Evaluator struct { // NewEvaluator creates a CEL evaluator with the standard Mantle expression environment. func NewEvaluator() (*Evaluator, error) { - env, err := cel.NewEnv( + opts := []cel.EnvOption{ cel.Variable("steps", cel.MapType(cel.StringType, cel.DynType)), cel.Variable("inputs", cel.MapType(cel.StringType, cel.DynType)), cel.Variable("env", cel.MapType(cel.StringType, cel.StringType)), cel.Variable("trigger", cel.MapType(cel.StringType, cel.DynType)), - ) + } + opts = append(opts, customFunctions()...) + + env, err := cel.NewEnv(opts...) if err != nil { return nil, fmt.Errorf("creating CEL environment: %w", err) } diff --git a/internal/cel/functions.go b/internal/cel/functions.go new file mode 100644 index 0000000..eba9314 --- /dev/null +++ b/internal/cel/functions.go @@ -0,0 +1,58 @@ +package cel + +import ( + "strings" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// customFunctions returns all custom CEL function options for the Mantle environment. +func customFunctions() []cel.EnvOption { + return []cel.EnvOption{ + stringFunctions(), + } +} + +func stringFunctions() cel.EnvOption { + return cel.Lib(&stringLib{}) +} + +type stringLib struct{} + +func (l *stringLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("toLower", + cel.MemberOverload("string_toLower", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(strings.ToLower(string(val.(types.String)))) + }), + ), + ), + cel.Function("toUpper", + cel.MemberOverload("string_toUpper", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(strings.ToUpper(string(val.(types.String)))) + }), + ), + ), + cel.Function("trim", + cel.MemberOverload("string_trim", + []*cel.Type{cel.StringType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(strings.TrimSpace(string(val.(types.String)))) + }), + ), + ), + } +} + +func (l *stringLib) ProgramOptions() []cel.ProgramOption { + return nil +} diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go new file mode 100644 index 0000000..dc6bfff --- /dev/null +++ b/internal/cel/functions_test.go @@ -0,0 +1,73 @@ +package cel + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFunc_ToLower(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"basic", `"HELLO".toLower()`, "hello"}, + {"mixed", `"Hello World".toLower()`, "hello world"}, + {"already_lower", `"hello".toLower()`, "hello"}, + {"empty", `"".toLower()`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_ToUpper(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"basic", `"hello".toUpper()`, "HELLO"}, + {"mixed", `"Hello World".toUpper()`, "HELLO WORLD"}, + {"empty", `"".toUpper()`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_Trim(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"spaces", `" hello ".trim()`, "hello"}, + {"tabs", "\"\\thello\\t\".trim()", "hello"}, + {"no_whitespace", `"hello".trim()`, "hello"}, + {"empty", `"".trim()`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} From a4f674452adba8de002e61e9942323d84d676d3d Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:40:59 -0400 Subject: [PATCH 05/21] feat(cel): add replace and split string functions --- internal/cel/functions.go | 24 +++++++++++++++++++ internal/cel/functions_test.go | 44 ++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/internal/cel/functions.go b/internal/cel/functions.go index eba9314..81dd84a 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -50,6 +50,30 @@ func (l *stringLib) CompileOptions() []cel.EnvOption { }), ), ), + cel.Function("replace", + cel.MemberOverload("string_replace", + []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, + cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := string(args[0].(types.String)) + old := string(args[1].(types.String)) + newStr := string(args[2].(types.String)) + return types.String(strings.ReplaceAll(s, old, newStr)) + }), + ), + ), + cel.Function("split", + cel.MemberOverload("string_split", + []*cel.Type{cel.StringType, cel.StringType}, + cel.ListType(cel.StringType), + cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { + s := string(lhs.(types.String)) + sep := string(rhs.(types.String)) + parts := strings.Split(s, sep) + return types.DefaultTypeAdapter.NativeToValue(parts) + }), + ), + ), } } diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index dc6bfff..43e57a0 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -71,3 +71,47 @@ func TestFunc_Trim(t *testing.T) { }) } } + +func TestFunc_Replace(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"basic", `"hello world".replace("world", "CEL")`, "hello CEL"}, + {"multiple", `"aabbaa".replace("aa", "x")`, "xbbx"}, + {"no_match", `"hello".replace("xyz", "abc")`, "hello"}, + {"empty_replacement", `"hello world".replace("world", "")`, "hello "}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_Split(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"comma", `"a,b,c".split(",")`, []any{"a", "b", "c"}}, + {"space", `"hello world foo".split(" ")`, []any{"hello", "world", "foo"}}, + {"no_match", `"hello".split(",")`, []any{"hello"}}, + {"empty", `"".split(",")`, []any{""}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} From 3913d47e82d2a027969d092237b11bdc801abb46 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:41:54 -0400 Subject: [PATCH 06/21] feat(cel): add parseInt, parseFloat, toString type coercion functions --- internal/cel/functions.go | 57 ++++++++++++++++++++++++ internal/cel/functions_test.go | 79 ++++++++++++++++++++++++++++++++++ 2 files changed, 136 insertions(+) diff --git a/internal/cel/functions.go b/internal/cel/functions.go index 81dd84a..f294f17 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -1,6 +1,8 @@ package cel import ( + "fmt" + "strconv" "strings" "github.com/google/cel-go/cel" @@ -12,6 +14,7 @@ import ( func customFunctions() []cel.EnvOption { return []cel.EnvOption{ stringFunctions(), + typeFunctions(), } } @@ -80,3 +83,57 @@ func (l *stringLib) CompileOptions() []cel.EnvOption { func (l *stringLib) ProgramOptions() []cel.ProgramOption { return nil } + +// ── Type coercion functions ─────────────────────────────────────────────────── + +func typeFunctions() cel.EnvOption { + return cel.Lib(&typeLib{}) +} + +type typeLib struct{} + +func (l *typeLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("parseInt", + cel.Overload("parseInt_string", + []*cel.Type{cel.StringType}, + cel.IntType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return types.NewErr("parseInt: %v", err) + } + return types.Int(n) + }), + ), + ), + cel.Function("parseFloat", + cel.Overload("parseFloat_string", + []*cel.Type{cel.StringType}, + cel.DoubleType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return types.NewErr("parseFloat: %v", err) + } + return types.Double(f) + }), + ), + ), + cel.Function("toString", + cel.Overload("toString_any", + []*cel.Type{cel.DynType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + return types.String(fmt.Sprintf("%v", val.Value())) + }), + ), + ), + } +} + +func (l *typeLib) ProgramOptions() []cel.ProgramOption { + return nil +} diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 43e57a0..598a60c 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -115,3 +115,82 @@ func TestFunc_Split(t *testing.T) { }) } } + +// Task 4: parseInt, parseFloat, toString + +func TestFunc_ParseInt(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + wantErr bool + }{ + {"basic", `parseInt("42")`, int64(42), false}, + {"negative", `parseInt("-7")`, int64(-7), false}, + {"zero", `parseInt("0")`, int64(0), false}, + {"invalid", `parseInt("abc")`, nil, true}, + {"float_string", `parseInt("3.14")`, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } + }) + } +} + +func TestFunc_ParseFloat(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + wantErr bool + }{ + {"basic", `parseFloat("3.14")`, float64(3.14), false}, + {"integer_string", `parseFloat("42")`, float64(42), false}, + {"negative", `parseFloat("-1.5")`, float64(-1.5), false}, + {"invalid", `parseFloat("abc")`, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } + }) + } +} + +func TestFunc_ToString(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"int", `toString(42)`, "42"}, + {"bool", `toString(true)`, "true"}, + {"string", `toString("hello")`, "hello"}, + {"float", `toString(1.5)`, "1.5"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} From 0e5d8e9935d3b860774f22aa117dbfcf3231acd6 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:42:28 -0400 Subject: [PATCH 07/21] feat(cel): add obj() map construction function --- internal/cel/functions.go | 65 ++++++++++++++++++++++++++++++++++ internal/cel/functions_test.go | 45 +++++++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/internal/cel/functions.go b/internal/cel/functions.go index f294f17..bde1d42 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -15,6 +15,7 @@ func customFunctions() []cel.EnvOption { return []cel.EnvOption{ stringFunctions(), typeFunctions(), + collectionFunctions(), } } @@ -137,3 +138,67 @@ func (l *typeLib) CompileOptions() []cel.EnvOption { func (l *typeLib) ProgramOptions() []cel.ProgramOption { return nil } + +// ── Collection functions ────────────────────────────────────────────────────── + +func collectionFunctions() cel.EnvOption { + return cel.Lib(&collectionLib{}) +} + +type collectionLib struct{} + +func (l *collectionLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + // obj() — register fixed-arity overloads for 1–5 key-value pairs. + // CEL does not support true variadic functions without macros, so we register + // overloads for each supported arity. All overloads share the same binding + // via objBinding. + cel.Function("obj", + cel.Overload("obj_2", + []*cel.Type{cel.DynType, cel.DynType}, + cel.DynType, + cel.FunctionBinding(objBinding), + ), + cel.Overload("obj_4", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, + cel.FunctionBinding(objBinding), + ), + cel.Overload("obj_6", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, + cel.FunctionBinding(objBinding), + ), + cel.Overload("obj_8", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, + cel.FunctionBinding(objBinding), + ), + cel.Overload("obj_10", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, + cel.FunctionBinding(objBinding), + ), + ), + } +} + +func (l *collectionLib) ProgramOptions() []cel.ProgramOption { + return nil +} + +// objBinding is the shared implementation for all obj() fixed-arity overloads. +func objBinding(args ...ref.Val) ref.Val { + if len(args)%2 != 0 { + return types.NewErr("obj: requires even number of arguments (key-value pairs), got %d", len(args)) + } + m := make(map[string]any, len(args)/2) + for i := 0; i < len(args); i += 2 { + key, ok := args[i].(types.String) + if !ok { + return types.NewErr("obj: key at position %d must be a string, got %s", i, args[i].Type()) + } + m[string(key)] = refToNative(args[i+1]) + } + return types.DefaultTypeAdapter.NativeToValue(m) +} diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 598a60c..462b387 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -194,3 +194,48 @@ func TestFunc_ToString(t *testing.T) { }) } } + +// Task 5: obj() + +func TestFunc_Obj(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + wantErr bool + }{ + { + name: "basic map", + expr: `obj("name", "alice", "age", 30)`, + want: map[string]any{"name": "alice", "age": int64(30)}, + }, + { + name: "single pair", + expr: `obj("key", "value")`, + want: map[string]any{"key": "value"}, + }, + { + name: "nested with step reference", + expr: `obj("status", steps.fetch.output.status)`, + want: map[string]any{"status": int64(200)}, + }, + { + name: "odd_args", + expr: `obj("key")`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } + }) + } +} From ccb63e5bdc806f352ffeb043e965c2338947d54e Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:43:03 -0400 Subject: [PATCH 08/21] feat(cel): add default() null coalescing and flatten() functions --- internal/cel/functions.go | 36 +++++++++++++++++++++++++++ internal/cel/functions_test.go | 45 ++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/internal/cel/functions.go b/internal/cel/functions.go index bde1d42..ba1ff8d 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -8,6 +8,7 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" ) // customFunctions returns all custom CEL function options for the Mantle environment. @@ -149,6 +150,41 @@ type collectionLib struct{} func (l *collectionLib) CompileOptions() []cel.EnvOption { return []cel.EnvOption{ + cel.Function("default", + cel.Overload("default_any_any", + []*cel.Type{cel.DynType, cel.DynType}, + cel.DynType, + cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { + if types.IsError(lhs) || types.IsUnknown(lhs) { + return rhs + } + return lhs + }), + ), + ), + cel.Function("flatten", + cel.Overload("flatten_list", + []*cel.Type{cel.ListType(cel.DynType)}, + cel.ListType(cel.DynType), + cel.UnaryBinding(func(val ref.Val) ref.Val { + list := val.(traits.Lister) + var result []any + it := list.Iterator() + for it.HasNext() == types.True { + item := it.Next() + if sub, ok := item.(traits.Lister); ok { + subIt := sub.Iterator() + for subIt.HasNext() == types.True { + result = append(result, refToNative(subIt.Next())) + } + } else { + result = append(result, refToNative(item)) + } + } + return types.DefaultTypeAdapter.NativeToValue(result) + }), + ), + ), // obj() — register fixed-arity overloads for 1–5 key-value pairs. // CEL does not support true variadic functions without macros, so we register // overloads for each supported arity. All overloads share the same binding diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 462b387..6fe076b 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -239,3 +239,48 @@ func TestFunc_Obj(t *testing.T) { }) } } + +// Task 6: default() and flatten() + +func TestFunc_Default(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + {"value exists returns value", `default("hello", "fallback")`, "hello"}, + {"empty string returns empty string", `default("", "fallback")`, ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestFunc_Flatten(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + { + name: "nested lists → flat list", + expr: `flatten([[1, 2], [3, 4], [5]])`, + want: []any{int64(1), int64(2), int64(3), int64(4), int64(5)}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} From b0e4d64cb51c2d9d343e6e8c40f9b6b15d96d21f Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:43:46 -0400 Subject: [PATCH 09/21] feat(cel): add jsonEncode and jsonDecode functions --- internal/cel/functions.go | 47 ++++++++++++++++++++++++ internal/cel/functions_test.go | 66 ++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) diff --git a/internal/cel/functions.go b/internal/cel/functions.go index ba1ff8d..2712704 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -1,6 +1,7 @@ package cel import ( + "encoding/json" "fmt" "strconv" "strings" @@ -17,6 +18,7 @@ func customFunctions() []cel.EnvOption { stringFunctions(), typeFunctions(), collectionFunctions(), + jsonFunctions(), } } @@ -238,3 +240,48 @@ func objBinding(args ...ref.Val) ref.Val { } return types.DefaultTypeAdapter.NativeToValue(m) } + +// ── JSON functions ──────────────────────────────────────────────────────────── + +func jsonFunctions() cel.EnvOption { + return cel.Lib(&jsonLib{}) +} + +type jsonLib struct{} + +func (l *jsonLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("jsonEncode", + cel.Overload("jsonEncode_any", + []*cel.Type{cel.DynType}, + cel.StringType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + native := refToNative(val) + b, err := json.Marshal(native) + if err != nil { + return types.NewErr("jsonEncode: %v", err) + } + return types.String(string(b)) + }), + ), + ), + cel.Function("jsonDecode", + cel.Overload("jsonDecode_string", + []*cel.Type{cel.StringType}, + cel.DynType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + var result any + if err := json.Unmarshal([]byte(s), &result); err != nil { + return types.NewErr("jsonDecode: %v", err) + } + return types.DefaultTypeAdapter.NativeToValue(result) + }), + ), + ), + } +} + +func (l *jsonLib) ProgramOptions() []cel.ProgramOption { + return nil +} diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 6fe076b..9feb476 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -1,6 +1,7 @@ package cel import ( + "encoding/json" "testing" "github.com/stretchr/testify/assert" @@ -284,3 +285,68 @@ func TestFunc_Flatten(t *testing.T) { }) } } + +// Task 7: jsonEncode and jsonDecode + +func TestFunc_JsonEncode(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonEncode(obj("name", "alice", "score", 99))`, newTestContext()) + require.NoError(t, err) + + s, ok := result.(string) + require.True(t, ok, "expected string result, got %T", result) + + var parsed map[string]any + require.NoError(t, json.Unmarshal([]byte(s), &parsed)) + assert.Equal(t, "alice", parsed["name"]) + assert.Equal(t, float64(99), parsed["score"]) +} + +func TestFunc_JsonDecode(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + wantErr bool + check func(t *testing.T, result any) + }{ + { + name: "object", + expr: `jsonDecode("{\"name\":\"bob\",\"age\":25}")`, + check: func(t *testing.T, result any) { + m, ok := result.(map[string]any) + require.True(t, ok) + assert.Equal(t, "bob", m["name"]) + assert.Equal(t, float64(25), m["age"]) + }, + }, + { + name: "array", + expr: `jsonDecode("[1,2,3]")`, + check: func(t *testing.T, result any) { + assert.NotNil(t, result) + }, + }, + { + name: "invalid", + expr: `jsonDecode("not json")`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + if tt.check != nil { + tt.check(t, result) + } + } + }) + } +} From a636201d792fc69ddc0200e65387d77b48dc37db Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:44:19 -0400 Subject: [PATCH 10/21] feat(cel): add timestamp and formatTimestamp date/time functions --- internal/cel/functions.go | 44 +++++++++++++++++++++++++++ internal/cel/functions_test.go | 55 ++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/internal/cel/functions.go b/internal/cel/functions.go index 2712704..a0273b1 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -5,6 +5,7 @@ import ( "fmt" "strconv" "strings" + "time" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" @@ -19,6 +20,7 @@ func customFunctions() []cel.EnvOption { typeFunctions(), collectionFunctions(), jsonFunctions(), + timeFunctions(), } } @@ -285,3 +287,45 @@ func (l *jsonLib) CompileOptions() []cel.EnvOption { func (l *jsonLib) ProgramOptions() []cel.ProgramOption { return nil } + +// ── Time functions ──────────────────────────────────────────────────────────── + +func timeFunctions() cel.EnvOption { + return cel.Lib(&timeLib{}) +} + +type timeLib struct{} + +func (l *timeLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("parseTimestamp", + cel.Overload("parseTimestamp_string", + []*cel.Type{cel.StringType}, + cel.TimestampType, + cel.UnaryBinding(func(val ref.Val) ref.Val { + s := string(val.(types.String)) + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return types.NewErr("parseTimestamp: %v", err) + } + return types.Timestamp{Time: t} + }), + ), + ), + cel.Function("formatTimestamp", + cel.Overload("formatTimestamp_timestamp_string", + []*cel.Type{cel.TimestampType, cel.StringType}, + cel.StringType, + cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { + ts := lhs.(types.Timestamp) + layout := string(rhs.(types.String)) + return types.String(ts.Time.Format(layout)) + }), + ), + ), + } +} + +func (l *timeLib) ProgramOptions() []cel.ProgramOption { + return nil +} diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 9feb476..aaac44c 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -350,3 +350,58 @@ func TestFunc_JsonDecode(t *testing.T) { }) } } + +// Task 8: timestamp and formatTimestamp + +func TestFunc_Timestamp(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + wantErr bool + }{ + {"iso8601", `parseTimestamp("2024-01-15T00:00:00Z")`, false}, + {"with_offset", `parseTimestamp("2024-06-01T12:30:00+05:30")`, false}, + {"invalid", `parseTimestamp("not-a-date")`, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.NotNil(t, result) + } + }) + } +} + +func TestFunc_FormatTimestamp(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + tests := []struct { + name string + expr string + want any + }{ + { + name: "date format", + expr: `formatTimestamp(parseTimestamp("2024-01-15T00:00:00Z"), "2006-01-02")`, + want: "2024-01-15", + }, + { + name: "time format", + expr: `formatTimestamp(parseTimestamp("2024-01-15T14:30:00Z"), "15:04")`, + want: "14:30", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := eval.Eval(tt.expr, newTestContext()) + require.NoError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} From 3f42c170ec4407f49ea67ed247aed4f64b91d64c Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:46:31 -0400 Subject: [PATCH 11/21] docs: add custom CEL functions and macros to expressions reference (#14) Co-Authored-By: Claude Sonnet 4.6 --- site/src/content/docs/concepts/expressions.md | 366 ++++++++++++++++++ 1 file changed, 366 insertions(+) diff --git a/site/src/content/docs/concepts/expressions.md b/site/src/content/docs/concepts/expressions.md index fea86da..fc58d0d 100644 --- a/site/src/content/docs/concepts/expressions.md +++ b/site/src/content/docs/concepts/expressions.md @@ -256,6 +256,372 @@ The data flows like this: Each step can only reference outputs from steps that have completed before it runs. The engine detects these references automatically and treats them as implicit dependencies. When combined with explicit `depends_on` declarations, this enables parallel execution — see [Execution Model](/docs/concepts/execution). +## List Macros + +CEL provides built-in macros for working with lists. These operate on any list value — step output arrays, input arrays, or lists constructed inline. + +### `.map(item, expr)` + +Transforms each element in a list by evaluating `expr` for every `item`. + +```yaml +steps: + - name: extract-titles + action: http/request + params: + method: POST + url: "https://api.example.com/batch" + body: + # Produce a list of title strings from a list of article objects + titles: "{{ steps.fetch.output.json.articles.map(a, a.title) }}" +``` + +### `.filter(item, expr)` + +Returns a new list containing only the elements for which `expr` is true. + +```yaml +steps: + - name: notify-failures + action: http/request + if: "size(steps.results.output.json.jobs.filter(j, j.status == 'failed')) > 0" + params: + method: POST + url: "https://hooks.example.com/alert" + body: + failed_jobs: "{{ steps.results.output.json.jobs.filter(j, j.status == 'failed') }}" +``` + +### `.exists(item, expr)` + +Returns `true` if at least one element satisfies `expr`. + +```yaml +steps: + - name: escalate + action: http/request + # Run this step only if any result has a critical severity + if: "steps.scan.output.json.findings.exists(f, f.severity == 'critical')" + params: + method: POST + url: "https://api.example.com/escalate" +``` + +### `.all(item, expr)` + +Returns `true` if every element satisfies `expr`. + +```yaml +steps: + - name: mark-complete + action: http/request + # Only mark complete when every task is done + if: "steps.fetch.output.json.tasks.all(t, t.done == true)" + params: + method: PATCH + url: "https://api.example.com/projects/{{ inputs.project_id }}" + body: + status: "complete" +``` + +### `.exists_one(item, expr)` + +Returns `true` if exactly one element satisfies `expr`. + +```yaml +steps: + - name: assign-owner + action: http/request + # Assign only when there is exactly one eligible owner + if: "steps.fetch.output.json.members.exists_one(m, m.role == 'lead')" + params: + method: POST + url: "https://api.example.com/assignments" +``` + +### Chaining `.filter()` and `.map()` + +Filter and map can be chained to first narrow a list and then reshape it. + +```yaml +steps: + - name: summarize-errors + action: ai/completion + params: + provider: openai + model: gpt-4o + prompt: > + Summarize these error messages: + {{ steps.logs.output.json.entries + .filter(e, e.level == 'error') + .map(e, e.message) }} +``` + +## String Functions + +Mantle registers the following string functions on top of CEL's built-in string methods. + +### `toLower()` + +Converts a string to lowercase. + +```yaml +steps: + - name: normalize-tag + action: http/request + params: + method: POST + url: "https://api.example.com/tags" + body: + tag: "{{ steps.input.output.json.label.toLower() }}" +``` + +### `toUpper()` + +Converts a string to uppercase. + +```yaml +steps: + - name: set-env-key + action: http/request + params: + method: POST + url: "https://api.example.com/config" + body: + key: "{{ inputs.variable_name.toUpper() }}" +``` + +### `trim()` + +Removes leading and trailing whitespace. + +```yaml +steps: + - name: clean-input + action: http/request + params: + method: POST + url: "https://api.example.com/search" + body: + query: "{{ inputs.search_term.trim() }}" +``` + +### `replace(old, new)` + +Replaces all occurrences of `old` with `new`. + +```yaml +steps: + - name: slugify + action: http/request + params: + method: POST + url: "https://api.example.com/pages" + body: + slug: "{{ inputs.title.toLower().replace(' ', '-') }}" +``` + +### `split(delimiter)` + +Splits a string into a list of strings at each occurrence of `delimiter`. + +```yaml +steps: + - name: process-tags + action: http/request + params: + method: POST + url: "https://api.example.com/items" + body: + # Convert "a,b,c" to ["a", "b", "c"] + tags: "{{ inputs.tag_string.split(',') }}" +``` + +## Type Coercion + +These functions parse and convert values between types. They produce an evaluation error on invalid input — use `default()` to handle failure gracefully. + +### `parseInt(string)` + +Parses a decimal string to an integer. Errors if the string is not a valid integer. + +```yaml +steps: + - name: paginate + action: http/request + params: + method: GET + url: "https://api.example.com/results" + body: + page: "{{ parseInt(inputs.page_string) }}" +``` + +### `parseFloat(string)` + +Parses a string to a floating-point number. Errors if the string is not a valid float. + +```yaml +steps: + - name: apply-threshold + action: http/request + if: "parseFloat(steps.score.output.body) > 0.75" + params: + method: POST + url: "https://api.example.com/approve" +``` + +### `toString(value)` + +Converts any value to its string representation. + +```yaml +steps: + - name: build-message + action: http/request + params: + method: POST + url: "https://hooks.example.com/notify" + body: + text: "Processed {{ toString(steps.count.output.json.total) }} records." +``` + +## Object Construction + +### `obj(key, value, ...)` + +Builds a map from alternating key-value arguments. Accepts any number of key-value pairs. + +```yaml +steps: + - name: create-record + action: http/request + params: + method: POST + url: "https://api.example.com/records" + body: + record: "{{ obj('name', inputs.name, 'status', 'pending', 'source', 'mantle') }}" +``` + +`obj()` is particularly useful combined with `.map()` to reshape a list of objects into a different structure: + +```yaml +steps: + - name: reformat-users + action: http/request + params: + method: POST + url: "https://api.example.com/import" + body: + # Reshape each user to only include id and display_name + users: > + {{ steps.fetch.output.json.users.map(u, + obj('id', u.id, 'display_name', u.first_name + ' ' + u.last_name)) }} +``` + +## Utility Functions + +### `default(value, fallback)` + +Returns `value` if it is non-null and does not produce an error; returns `fallback` otherwise. Use this to handle optional fields without a `has()` guard. + +```yaml +steps: + - name: notify + action: http/request + params: + method: POST + url: "https://hooks.example.com/notify" + body: + # Use a default region when the field is absent from the response + region: "{{ default(steps.fetch.output.json.region, 'us-east-1') }}" +``` + +### `flatten(list)` + +Flattens one level of nesting from a list of lists. + +```yaml +steps: + - name: collect-all-items + action: http/request + params: + method: POST + url: "https://api.example.com/process" + body: + # Each page returns a list; flatten to get a single list of items + items: "{{ flatten(steps.paginate.output.json.pages.map(p, p.items)) }}" +``` + +## JSON Functions + +### `jsonEncode(value)` + +Serializes any value to a JSON string. Useful when a downstream API expects a JSON-encoded string field rather than a structured object. + +```yaml +steps: + - name: store-metadata + action: http/request + params: + method: PUT + url: "https://api.example.com/records/{{ inputs.id }}" + body: + # The target API expects metadata as a JSON string, not an object + metadata_json: "{{ jsonEncode(steps.fetch.output.json.metadata) }}" +``` + +### `jsonDecode(string)` + +Parses a JSON string to a structured value. Use this when a step returns a JSON-encoded string inside a field rather than a parsed object. + +```yaml +steps: + - name: parse-config + action: http/request + params: + method: POST + url: "https://api.example.com/apply" + body: + # steps.load.output.json.config_str is a JSON string — decode it first + settings: "{{ jsonDecode(steps.load.output.json.config_str).settings }}" +``` + +## Date/Time Functions + +### `parseTimestamp(string)` + +Parses an ISO 8601 / RFC 3339 string to a CEL timestamp value. Named `parseTimestamp` rather than `timestamp` to avoid collision with CEL's built-in `timestamp()` constructor. + +```yaml +steps: + - name: check-expiry + action: http/request + if: "parseTimestamp(steps.fetch.output.json.expires_at) < now" + params: + method: POST + url: "https://api.example.com/renew" + body: + resource_id: "{{ inputs.resource_id }}" +``` + +### `formatTimestamp(timestamp, layout)` + +Formats a timestamp value to a string using a [Go time layout](https://pkg.go.dev/time#Layout). The reference time for Go layouts is `Mon Jan 2 15:04:05 MST 2006`. + +```yaml +steps: + - name: create-report + action: http/request + params: + method: POST + url: "https://api.example.com/reports" + body: + # Format as "2006-01-02" (Go layout for YYYY-MM-DD) + report_date: "{{ formatTimestamp(parseTimestamp(steps.fetch.output.json.created_at), '2006-01-02') }}" + # Format with time for a human-readable label + label: "Report for {{ formatTimestamp(now, 'Jan 2, 2006') }}" +``` + ## Limitations - **`env.*` is restricted** — only environment variables with the `MANTLE_ENV_` prefix are available. This prevents accidental exposure of system secrets through CEL. From e735241dfa0b9398fba986530e8aa3807cf2f550 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:46:32 -0400 Subject: [PATCH 12/21] feat: add data transformation and AI enrichment example workflows (#14) --- examples/ai-data-enrichment.yaml | 65 ++++++++++++++++++++++++++ examples/data-transform-api-to-db.yaml | 23 +++++++++ 2 files changed, 88 insertions(+) create mode 100644 examples/ai-data-enrichment.yaml create mode 100644 examples/data-transform-api-to-db.yaml diff --git a/examples/ai-data-enrichment.yaml b/examples/ai-data-enrichment.yaml new file mode 100644 index 0000000..4c30771 --- /dev/null +++ b/examples/ai-data-enrichment.yaml @@ -0,0 +1,65 @@ +name: ai-data-enrichment +description: > + Fetches support tickets, uses an AI model to classify priority and + extract key entities, then stores the enriched data. Demonstrates + using AI for transforms that require interpretation rather than + simple structural mapping. + +inputs: + ticket_api_url: + type: string + description: URL to fetch support tickets from + +steps: + - name: fetch-tickets + action: http/request + timeout: "15s" + params: + method: GET + url: "{{ inputs.ticket_api_url }}" + headers: + Accept: "application/json" + + - name: classify + action: ai/completion + credential: openai + timeout: "60s" + params: + model: gpt-4o + system_prompt: > + You are a support ticket classifier. Given a ticket, determine + the priority (critical, high, medium, low), category, and extract + any mentioned product names or error codes. + prompt: "Classify this ticket: {{ steps['fetch-tickets'].output.body }}" + output_schema: + type: object + properties: + priority: + type: string + enum: [critical, high, medium, low] + category: + type: string + products: + type: array + items: + type: string + error_codes: + type: array + items: + type: string + required: [priority, category, products, error_codes] + additionalProperties: false + + - name: store-enriched + action: postgres/query + credential: app-db + if: "steps.classify.output.json.priority == 'critical' || steps.classify.output.json.priority == 'high'" + params: + query: > + INSERT INTO urgent_tickets (priority, category, products, raw_body) + VALUES ($1, $2, $3, $4) + params: + - "{{ steps.classify.output.json.priority }}" + - "{{ steps.classify.output.json.category }}" + - "{{ jsonEncode(steps.classify.output.json.products) }}" + - "{{ steps['fetch-tickets'].output.body }}" diff --git a/examples/data-transform-api-to-db.yaml b/examples/data-transform-api-to-db.yaml new file mode 100644 index 0000000..6cbec2f --- /dev/null +++ b/examples/data-transform-api-to-db.yaml @@ -0,0 +1,23 @@ +name: data-transform-api-to-db +description: > + Fetches user data from an API, transforms each record using CEL + expressions to match a database schema, and inserts the normalized + records into Postgres. Demonstrates map(), obj(), toLower(), and + type coercion without requiring an AI model. + +steps: + - name: fetch-users + action: http/request + timeout: "15s" + params: + method: GET + url: "https://jsonplaceholder.typicode.com/users" + headers: + Accept: "application/json" + + - name: store-users + action: postgres/query + credential: app-db + params: + query: "INSERT INTO users (username, email, city) VALUES ($1, $2, $3)" + params: "{{ steps['fetch-users'].output.json.map(u, [u.username.toLower(), u.email.toLower(), u.address.city]) }}" From 7085f68bca5775665eb53b3a5a68553147f69642 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:46:58 -0400 Subject: [PATCH 13/21] docs: add data transformation patterns guide (#14) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Covers three patterns — CEL-only structural transforms, AI-powered semantic transforms, and hybrid workflows that combine both. Includes a decision guide and a quick-reference table of all available CEL extension functions. Co-Authored-By: Claude Sonnet 4.6 --- .../getting-started/data-transformations.md | 308 ++++++++++++++++++ 1 file changed, 308 insertions(+) create mode 100644 site/src/content/docs/getting-started/data-transformations.md diff --git a/site/src/content/docs/getting-started/data-transformations.md b/site/src/content/docs/getting-started/data-transformations.md new file mode 100644 index 0000000..92212cc --- /dev/null +++ b/site/src/content/docs/getting-started/data-transformations.md @@ -0,0 +1,308 @@ +# Data Transformations + +Workflows rarely move data from one place to another without changing its shape. A user record from one API uses camelCase and ISO dates; your database expects snake_case and Unix timestamps. A support ticket arrives as unstructured text; your alerting system needs a priority label and extracted entity names. + +Mantle handles both categories: mechanical transforms using CEL expressions, and semantic transforms using the AI connector. This guide covers each pattern and when to reach for each one. + +## Pattern 1: Structural Transforms with CEL + +Use CEL when the mapping between source and target is known and deterministic -- field renaming, case normalization, type coercion, and filtering. CEL runs in the engine with no external calls, so structural transforms add no latency and no cost. + +**Example:** An upstream API returns user records with camelCase fields and a date of birth string. Your database expects snake_case and rejects the original field names. + +Source: + +```json +{"firstName": "Alice", "lastName": "Smith", "dob": "1995-03-24"} +``` + +Target: + +```json +{"name": "alice smith", "birth_date": "1995-03-24"} +``` + +Here is a complete workflow that fetches a list of user records, reshapes each one, and writes the results to Postgres: + +```yaml +name: normalize-users +description: > + Fetch user records from the upstream API, normalize field names and + casing, then insert into the local database. + +steps: + - name: fetch-users + action: http/request + params: + method: GET + url: "https://api.example.com/users" + + - name: insert-users + action: http/request + params: + method: POST + url: "https://internal.example.com/db/users/batch" + headers: + Content-Type: "application/json" + body: + records: > + {{ steps['fetch-users'].output.json.users.map(u, + obj( + 'name', toLower(u.firstName + ' ' + u.lastName), + 'birth_date', u.dob + ) + ) }} +``` + +What the CEL expression does: + +- `.map(u, ...)` -- iterates the `users` list, binding each element to `u` +- `obj('name', ..., 'birth_date', ...)` -- constructs a new object with the renamed keys +- `toLower(...)` -- normalizes the full name to lowercase +- String concatenation (`+`) joins first and last name with a space + +The output of the `map()` call is a new list of objects ready for the batch insert. Nothing left the workflow engine. + +## Pattern 2: AI-Powered Transforms + +Use the AI connector when the transform requires interpretation, classification, or understanding that a deterministic expression cannot provide. Common cases: classifying priority from free-form text, extracting named entities, generating summaries, or translating between domain vocabularies. + +**Example:** Raw support tickets arrive as unstructured text. Your team needs each ticket classified by priority, categorized by product area, and tagged with any affected usernames or order IDs mentioned in the body. + +Here is a workflow that fetches open tickets, uses the AI connector to extract structured data, and conditionally routes high-priority items to a separate store: + +```yaml +name: classify-tickets +description: > + Fetch open support tickets, classify each one using structured AI output, + and store high-priority tickets in the escalation queue. + +steps: + - name: fetch-tickets + action: http/request + params: + method: GET + url: "https://support.example.com/api/tickets?status=open" + + - name: classify + action: ai/completion + params: + provider: openai + credential: openai + model: gpt-4o-mini + prompt: > + Classify the following support ticket. Extract the priority, product + area, and any entity identifiers (usernames, order IDs) mentioned. + + Ticket: + {{ steps['fetch-tickets'].output.json.tickets[0].body }} + output_schema: + type: object + properties: + priority: + type: string + enum: [low, medium, high, critical] + product_area: + type: string + enum: [billing, authentication, api, ui, other] + entities: + type: array + items: + type: object + properties: + type: + type: string + value: + type: string + required: [priority, product_area, entities] + + - name: store-escalation + action: http/request + if: > + steps.classify.output.json.priority == 'high' || + steps.classify.output.json.priority == 'critical' + params: + method: POST + url: "https://internal.example.com/escalation-queue" + headers: + Content-Type: "application/json" + body: + ticket_id: "{{ steps['fetch-tickets'].output.json.tickets[0].id }}" + priority: "{{ steps.classify.output.json.priority }}" + product_area: "{{ steps.classify.output.json.product_area }}" + entities: "{{ steps.classify.output.json.entities }}" +``` + +Key points: + +- `output_schema` tells the AI connector to return structured JSON matching the schema, not free-form text. The engine validates the response against the schema before making it available as `output.json`. +- The `if` field on `store-escalation` is a bare CEL expression that reads from the AI step's structured output. CEL works on the schema-validated object directly. +- For bulk processing, wrap this pattern in a `forEach` or a parent workflow that fans out over the ticket list. + +See the [AI Workflows guide](/docs/getting-started/ai-workflows) for credential setup and the full `output_schema` reference. + +## Pattern 3: Hybrid Transforms + +Combine CEL for structural normalization with the AI connector for semantic enrichment. Use CEL first to extract and reshape the fields you need, then pass the cleaned data to the AI step. This keeps prompts concise and keeps AI costs proportional to the semantic work required. + +**Example:** A product reviews feed includes raw ratings, dates, and freeform review text mixed with metadata you do not need. You want to store normalized records enriched with sentiment labels and key themes. + +```yaml +name: enrich-reviews +description: > + Fetch product reviews, normalize structure with CEL, enrich each review + with AI-classified sentiment and themes, then store the enriched records. + +steps: + - name: fetch-reviews + action: http/request + params: + method: GET + url: "https://api.example.com/products/{{ inputs.product_id }}/reviews" + + - name: normalize + action: http/request + params: + method: POST + url: "https://internal.example.com/transform/passthrough" + headers: + Content-Type: "application/json" + body: + reviews: > + {{ steps['fetch-reviews'].output.json.data.map(r, + obj( + 'id', r.reviewId, + 'rating', r.starRating, + 'reviewed_at', r.submittedAt, + 'text', trim(r.body) + ) + ) }} + + - name: enrich + action: ai/completion + params: + provider: openai + credential: openai + model: gpt-4o-mini + prompt: > + Analyze the following product reviews and classify the sentiment + and key themes for each one. + + Reviews: + {{ steps.normalize.output.json.reviews }} + output_schema: + type: array + items: + type: object + properties: + id: + type: string + sentiment: + type: string + enum: [positive, neutral, negative] + themes: + type: array + items: + type: string + required: [id, sentiment, themes] + + - name: store + action: http/request + params: + method: POST + url: "https://internal.example.com/db/reviews/batch" + headers: + Content-Type: "application/json" + body: + records: "{{ steps.enrich.output.json }}" + +inputs: + product_id: + type: string + description: The product ID to fetch and enrich reviews for +``` + +The three-stage pattern: + +1. **Fetch** -- pull raw data from the source +2. **Normalize (CEL)** -- extract only the fields you need, rename them, trim whitespace, coerce types +3. **Enrich (AI)** -- pass the clean, minimal payload to the AI step; the smaller the input, the lower the token cost and the more reliable the output + +The AI step receives already-cleaned data, so the prompt stays focused on the semantic task rather than field mapping instructions. + +## When to Use CEL vs AI + +| Situation | Use | +|---|---| +| Rename or reorder fields | CEL | +| Change string case | CEL | +| Parse or format dates and timestamps | CEL | +| Filter a list by a field value | CEL | +| Convert types (string to int, etc.) | CEL | +| Compose values from multiple fields | CEL | +| Classify text into known categories | AI | +| Extract named entities from prose | AI | +| Determine sentiment or tone | AI | +| Summarize freeform content | AI | +| Map between domain vocabularies without a fixed rule | AI | +| Structural reshape + semantic enrichment | Hybrid | + +The decision is usually straightforward: if you could write the rule as an `if` statement in Go, use CEL. If you would struggle to enumerate all the cases, use AI. + +## Available Functions Reference + +These are the Mantle CEL extensions available in workflow expressions. For full signatures, examples, and edge cases, see the [Expressions guide](/docs/concepts/expressions). + +**List macros** + +| Function | Description | +|---|---| +| `.map(var, expr)` | Transform each element, returning a new list | +| `.filter(var, expr)` | Return elements where `expr` evaluates to true | +| `.exists(var, expr)` | True if any element satisfies `expr` | +| `.all(var, expr)` | True if every element satisfies `expr` | + +**String** + +| Function | Description | +|---|---| +| `toLower(s)` | Convert string to lowercase | +| `toUpper(s)` | Convert string to uppercase | +| `trim(s)` | Remove leading and trailing whitespace | +| `replace(s, old, new)` | Replace all occurrences of `old` with `new` | +| `split(s, sep)` | Split string into a list on separator | + +**Object construction** + +| Function | Description | +|---|---| +| `obj(key, val, ...)` | Construct a map from alternating key-value arguments | + +**Type coercion** + +| Function | Description | +|---|---| +| `parseInt(s)` | Parse string to integer | +| `parseFloat(s)` | Parse string to float | +| `toString(v)` | Convert any value to its string representation | + +**JSON** + +| Function | Description | +|---|---| +| `jsonEncode(v)` | Serialize a value to a JSON string | +| `jsonDecode(s)` | Parse a JSON string into a CEL value | + +**Time** + +| Function | Description | +|---|---| +| `parseTimestamp(s)` | Parse an RFC 3339 string into a timestamp | +| `formatTimestamp(t, layout)` | Format a timestamp using a Go time layout string | + +**Utility** + +| Function | Description | +|---|---| +| `default(v, fallback)` | Return `v` if it is set and non-null, otherwise `fallback` | +| `flatten(list)` | Flatten a list of lists into a single list | From ee7f873bb0c836b5ef0f6b4d81144bc75f365836 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 20:55:14 -0400 Subject: [PATCH 14/21] =?UTF-8?q?fix:=20address=20CodeRabbit=20review=20?= =?UTF-8?q?=E2=80=94=20null=20handling,=20date=20formats,=20docs,=20tests?= =?UTF-8?q?=20(#14)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - default(): fall through to rhs when lhs is null (types.NullValue) - parseTimestamp(): try RFC3339Nano, bare ISO date, US date, and named-month formats before erroring - expressions.md: remove undeclared `now` variable from two examples - data-transformations.md: add missing exists_one row to list-macros table - data-transform-api-to-db.yaml: simplify to single-user fetch+insert (fixes broken batch param shape) - functions_test.go: add null/fallback tests for default(), edge-case tests for flatten(), and new format tests for parseTimestamp() Co-Authored-By: Claude Sonnet 4.6 --- examples/data-transform-api-to-db.yaml | 19 ++++++++------- internal/cel/functions.go | 19 +++++++++++---- internal/cel/functions_test.go | 24 +++++++++++++++++++ site/src/content/docs/concepts/expressions.md | 4 ++-- .../getting-started/data-transformations.md | 1 + 5 files changed, 52 insertions(+), 15 deletions(-) diff --git a/examples/data-transform-api-to-db.yaml b/examples/data-transform-api-to-db.yaml index 6cbec2f..7570dad 100644 --- a/examples/data-transform-api-to-db.yaml +++ b/examples/data-transform-api-to-db.yaml @@ -1,23 +1,26 @@ name: data-transform-api-to-db description: > - Fetches user data from an API, transforms each record using CEL - expressions to match a database schema, and inserts the normalized - records into Postgres. Demonstrates map(), obj(), toLower(), and - type coercion without requiring an AI model. + Fetches a user from an API, transforms the record using CEL expressions + to match a database schema, and inserts the normalized data into Postgres. + Demonstrates obj(), toLower(), and string functions without requiring + an AI model. steps: - - name: fetch-users + - name: fetch-user action: http/request timeout: "15s" params: method: GET - url: "https://jsonplaceholder.typicode.com/users" + url: "https://jsonplaceholder.typicode.com/users/1" headers: Accept: "application/json" - - name: store-users + - name: store-user action: postgres/query credential: app-db params: query: "INSERT INTO users (username, email, city) VALUES ($1, $2, $3)" - params: "{{ steps['fetch-users'].output.json.map(u, [u.username.toLower(), u.email.toLower(), u.address.city]) }}" + args: + - "{{ steps['fetch-user'].output.json.username.toLower() }}" + - "{{ steps['fetch-user'].output.json.email.toLower() }}" + - "{{ steps['fetch-user'].output.json.address.city }}" diff --git a/internal/cel/functions.go b/internal/cel/functions.go index a0273b1..2cfe3c7 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -159,7 +159,7 @@ func (l *collectionLib) CompileOptions() []cel.EnvOption { []*cel.Type{cel.DynType, cel.DynType}, cel.DynType, cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { - if types.IsError(lhs) || types.IsUnknown(lhs) { + if types.IsError(lhs) || types.IsUnknown(lhs) || lhs == types.NullValue { return rhs } return lhs @@ -304,11 +304,20 @@ func (l *timeLib) CompileOptions() []cel.EnvOption { cel.TimestampType, cel.UnaryBinding(func(val ref.Val) ref.Val { s := string(val.(types.String)) - t, err := time.Parse(time.RFC3339, s) - if err != nil { - return types.NewErr("parseTimestamp: %v", err) + layouts := []string{ + time.RFC3339, + time.RFC3339Nano, + "2006-01-02T15:04:05", + "2006-01-02", + "01/02/2006", + "Jan 2, 2006", + } + for _, layout := range layouts { + if t, err := time.Parse(layout, s); err == nil { + return types.Timestamp{Time: t} + } } - return types.Timestamp{Time: t} + return types.NewErr("parseTimestamp: unable to parse %q (tried RFC3339, ISO 8601 date, and common formats)", s) }), ), ), diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index aaac44c..7479c78 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -253,6 +253,8 @@ func TestFunc_Default(t *testing.T) { }{ {"value exists returns value", `default("hello", "fallback")`, "hello"}, {"empty string returns empty string", `default("", "fallback")`, ""}, + {"null returns fallback", `default(null, "fallback")`, "fallback"}, + {"non-null int unchanged", `default(42, 0)`, int64(42)}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -276,6 +278,11 @@ func TestFunc_Flatten(t *testing.T) { expr: `flatten([[1, 2], [3, 4], [5]])`, want: []any{int64(1), int64(2), int64(3), int64(4), int64(5)}, }, + { + name: "mixed nested and non-nested", + expr: `flatten([[1], [3, 4]])`, + want: []any{int64(1), int64(3), int64(4)}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -284,6 +291,20 @@ func TestFunc_Flatten(t *testing.T) { assert.Equal(t, tt.want, result) }) } + + // Empty list: flatten([]) — result may be nil or empty slice. + t.Run("empty list", func(t *testing.T) { + ctx := newTestContext() + ctx.Inputs["empty"] = []any{} + result, err := eval.Eval(`flatten(inputs.empty)`, ctx) + require.NoError(t, err) + // CEL may return nil or an empty slice for an empty list; both are acceptable. + if result != nil { + list, ok := result.([]any) + require.True(t, ok, "expected []any, got %T", result) + assert.Empty(t, list) + } + }) } // Task 7: jsonEncode and jsonDecode @@ -364,6 +385,9 @@ func TestFunc_Timestamp(t *testing.T) { {"iso8601", `parseTimestamp("2024-01-15T00:00:00Z")`, false}, {"with_offset", `parseTimestamp("2024-06-01T12:30:00+05:30")`, false}, {"invalid", `parseTimestamp("not-a-date")`, true}, + {"date_only", `parseTimestamp("2026-03-24")`, false}, + {"us_date", `parseTimestamp("03/24/2026")`, false}, + {"rfc3339nano", `parseTimestamp("2026-03-24T19:00:00.123456789Z")`, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/site/src/content/docs/concepts/expressions.md b/site/src/content/docs/concepts/expressions.md index fc58d0d..e52497c 100644 --- a/site/src/content/docs/concepts/expressions.md +++ b/site/src/content/docs/concepts/expressions.md @@ -596,7 +596,7 @@ Parses an ISO 8601 / RFC 3339 string to a CEL timestamp value. Named `parseTimes steps: - name: check-expiry action: http/request - if: "parseTimestamp(steps.fetch.output.json.expires_at) < now" + if: "parseTimestamp(steps.fetch.output.json.expires_at) < parseTimestamp(\"2026-12-31T00:00:00Z\")" params: method: POST url: "https://api.example.com/renew" @@ -619,7 +619,7 @@ steps: # Format as "2006-01-02" (Go layout for YYYY-MM-DD) report_date: "{{ formatTimestamp(parseTimestamp(steps.fetch.output.json.created_at), '2006-01-02') }}" # Format with time for a human-readable label - label: "Report for {{ formatTimestamp(now, 'Jan 2, 2006') }}" + label: "Report for {{ formatTimestamp(parseTimestamp(steps.fetch.output.json.created_at), 'Jan 2, 2006') }}" ``` ## Limitations diff --git a/site/src/content/docs/getting-started/data-transformations.md b/site/src/content/docs/getting-started/data-transformations.md index 92212cc..03e42df 100644 --- a/site/src/content/docs/getting-started/data-transformations.md +++ b/site/src/content/docs/getting-started/data-transformations.md @@ -260,6 +260,7 @@ These are the Mantle CEL extensions available in workflow expressions. For full | `.map(var, expr)` | Transform each element, returning a new list | | `.filter(var, expr)` | Return elements where `expr` evaluates to true | | `.exists(var, expr)` | True if any element satisfies `expr` | +| `.exists_one(var, expr)` | True if exactly one element satisfies `expr` | | `.all(var, expr)` | True if every element satisfies `expr` | **String** From 3473d2a420aadf2f6ff3a80826aa75ce78f4c4d8 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 21:15:15 -0400 Subject: [PATCH 15/21] =?UTF-8?q?fix:=20address=20PR=20#21=20review=20?= =?UTF-8?q?=E2=80=94=20non-strict=20default,=20jsonDecode=20precision,=20d?= =?UTF-8?q?ocs=20accuracy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plans/2026-03-24-data-transformation.md | 46 ++++++++++--------- .../2026-03-24-data-transformation-design.md | 13 +++--- examples/ai-data-enrichment.yaml | 2 +- examples/data-transform-api-to-db.yaml | 3 +- internal/cel/functions.go | 34 +++++++++++++- internal/cel/functions_test.go | 8 +++- site/src/content/docs/concepts/expressions.md | 2 +- .../getting-started/data-transformations.md | 20 ++++---- 8 files changed, 83 insertions(+), 45 deletions(-) diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md index 90e0bc4..a0702be 100644 --- a/docs/superpowers/plans/2026-03-24-data-transformation.md +++ b/docs/superpowers/plans/2026-03-24-data-transformation.md @@ -12,7 +12,7 @@ --- -### Task 1: Test and document built-in macros +## Task 1: Test and document built-in macros **Files:** - Create: `internal/cel/macros_test.go` @@ -130,7 +130,7 @@ git commit -m "test(cel): add coverage for built-in map/filter/exists/all macros --- -### Task 2: String functions — toLower, toUpper, trim +## Task 2: String functions — toLower, toUpper, trim **Files:** - Create: `internal/cel/functions.go` @@ -329,7 +329,7 @@ git commit -m "feat(cel): add toLower, toUpper, trim string functions" --- -### Task 3: String functions — replace and split +## Task 3: String functions — replace and split **Files:** - Modify: `internal/cel/functions.go` @@ -441,7 +441,7 @@ git commit -m "feat(cel): add replace and split string functions" --- -### Task 4: Type coercion — parseInt, parseFloat, toString +## Task 4: Type coercion — parseInt, parseFloat, toString **Files:** - Modify: `internal/cel/functions.go` @@ -621,7 +621,9 @@ git commit -m "feat(cel): add parseInt, parseFloat, toString type coercion funct --- -### Task 5: Object construction — obj() +## Task 5: Object construction — obj() + +> **Implementation note:** The plan originally specified a variadic `obj()` overload, but cel-go does not support true variadic functions without macros. The implementation uses fixed-arity overloads for 2, 4, 6, 8, and 10 arguments (1–5 key-value pairs), all sharing a single `objBinding` function. **Files:** - Modify: `internal/cel/functions.go` @@ -755,7 +757,7 @@ git commit -m "feat(cel): add obj() map construction function" --- -### Task 6: Utility functions — default, flatten +## Task 6: Utility functions — default, flatten **Files:** - Modify: `internal/cel/functions.go` @@ -881,7 +883,7 @@ git commit -m "feat(cel): add default() null coalescing and flatten() functions" --- -### Task 7: JSON functions — jsonEncode, jsonDecode +## Task 7: JSON functions — jsonEncode, jsonDecode **Files:** - Modify: `internal/cel/functions.go` @@ -1015,7 +1017,7 @@ git commit -m "feat(cel): add jsonEncode and jsonDecode functions" --- -### Task 8: Date/time functions — timestamp, formatTimestamp +## Task 8: Date/time functions — parseTimestamp, formatTimestamp **Files:** - Modify: `internal/cel/functions.go` @@ -1035,9 +1037,9 @@ func TestFunc_Timestamp(t *testing.T) { expr string wantErr bool }{ - {"iso8601", `timestamp("2026-03-24T19:00:00Z")`, false}, - {"with_offset", `timestamp("2026-03-24T14:00:00-05:00")`, false}, - {"invalid", `timestamp("not a date")`, true}, + {"iso8601", `parseTimestamp("2026-03-24T19:00:00Z")`, false}, + {"with_offset", `parseTimestamp("2026-03-24T14:00:00-05:00")`, false}, + {"invalid", `parseTimestamp("not a date")`, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1055,11 +1057,11 @@ func TestFunc_FormatTimestamp(t *testing.T) { eval, err := NewEvaluator() require.NoError(t, err) - result, err := eval.Eval(`formatTimestamp(timestamp("2026-03-24T19:00:00Z"), "2006-01-02")`, newTestContext()) + result, err := eval.Eval(`formatTimestamp(parseTimestamp("2026-03-24T19:00:00Z"), "2006-01-02")`, newTestContext()) require.NoError(t, err) assert.Equal(t, "2026-03-24", result) - result, err = eval.Eval(`formatTimestamp(timestamp("2026-03-24T19:30:45Z"), "15:04:05")`, newTestContext()) + result, err = eval.Eval(`formatTimestamp(parseTimestamp("2026-03-24T19:30:45Z"), "15:04:05")`, newTestContext()) require.NoError(t, err) assert.Equal(t, "19:30:45", result) } @@ -1068,7 +1070,7 @@ func TestFunc_FormatTimestamp(t *testing.T) { - [ ] **Step 2: Run tests to verify they fail** Run: `go test ./internal/cel/ -run "TestFunc_Timestamp|TestFunc_FormatTimestamp" -v` -Expected: FAIL +Expected: FAIL (functions registered as `parseTimestamp`, not `timestamp`) - [ ] **Step 3: Add date/time functions** @@ -1083,15 +1085,15 @@ type timeLib struct{} func (l *timeLib) CompileOptions() []cel.EnvOption { return []cel.EnvOption{ - cel.Function("timestamp", - cel.Overload("timestamp_string", + cel.Function("parseTimestamp", + cel.Overload("parseTimestamp_string", []*cel.Type{cel.StringType}, cel.TimestampType, cel.UnaryBinding(func(val ref.Val) ref.Val { s := string(val.(types.String)) t, err := time.Parse(time.RFC3339, s) if err != nil { - return types.NewErr("timestamp: %v", err) + return types.NewErr("parseTimestamp: %v", err) } return types.Timestamp{Time: t} }), @@ -1144,12 +1146,12 @@ Expected: PASS — all tests including existing ones ```bash git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add timestamp and formatTimestamp date/time functions" +git commit -m "feat(cel): add parseTimestamp and formatTimestamp date/time functions" ``` --- -### Task 9: Update CEL expressions documentation +## Task 9: Update CEL expressions documentation **Files:** - Modify: `site/src/content/docs/concepts/expressions.md` @@ -1207,7 +1209,7 @@ git commit -m "docs: add custom CEL functions and macros to expressions referenc --- -### Task 10: Create data transformations guide +## Task 10: Create data transformations guide **Files:** - Create: `site/src/content/docs/getting-started/data-transformations.md` @@ -1243,7 +1245,7 @@ git commit -m "docs: add data transformation patterns guide (#14)" --- -### Task 11: Create example workflows +## Task 11: Create example workflows **Files:** - Create: `examples/data-transform-api-to-db.yaml` @@ -1360,7 +1362,7 @@ git commit -m "feat: add data transformation and AI enrichment example workflows --- -### Task 12: Final validation +## Task 12: Final validation - [ ] **Step 1: Run full test suite** diff --git a/docs/superpowers/specs/2026-03-24-data-transformation-design.md b/docs/superpowers/specs/2026-03-24-data-transformation-design.md index f679f84..15197f7 100644 --- a/docs/superpowers/specs/2026-03-24-data-transformation-design.md +++ b/docs/superpowers/specs/2026-03-24-data-transformation-design.md @@ -69,7 +69,7 @@ Errors on odd number of args or non-string keys. Enables building maps for DB in | Function | Example | Result | |----------|---------|--------| -| `timestamp(string)` | `timestamp("2026-03-24T19:00:00Z")` | timestamp value | +| `parseTimestamp(string)` | `parseTimestamp("2026-03-24T19:00:00Z")` | timestamp value | | `formatTimestamp(ts, layout)` | `formatTimestamp(ts, "2006-01-02")` | `"2026-03-24"` | Uses Go time layout strings. @@ -86,14 +86,15 @@ In `internal/cel/cel.go`, the `NewEvaluator` function passes function options to ```go func NewEvaluator() (*Evaluator, error) { - env, err := cel.NewEnv( + opts := []cel.EnvOption{ cel.Variable("steps", cel.MapType(cel.StringType, cel.DynType)), cel.Variable("inputs", cel.MapType(cel.StringType, cel.DynType)), cel.Variable("env", cel.MapType(cel.StringType, cel.StringType)), cel.Variable("trigger", cel.MapType(cel.StringType, cel.DynType)), - // Custom functions - customFunctions()..., - ) + } + opts = append(opts, customFunctions()...) + + env, err := cel.NewEnv(opts...) // ... } ``` @@ -107,7 +108,7 @@ All errors surface through the existing `Eval` error path: - `obj()` with odd args → evaluation error - `obj()` with non-string keys → evaluation error - `jsonDecode()` with invalid JSON → evaluation error -- `timestamp()` with unparseable string → evaluation error +- `parseTimestamp()` with unparseable string → evaluation error No new error types needed. diff --git a/examples/ai-data-enrichment.yaml b/examples/ai-data-enrichment.yaml index 4c30771..db1c88f 100644 --- a/examples/ai-data-enrichment.yaml +++ b/examples/ai-data-enrichment.yaml @@ -58,7 +58,7 @@ steps: query: > INSERT INTO urgent_tickets (priority, category, products, raw_body) VALUES ($1, $2, $3, $4) - params: + args: - "{{ steps.classify.output.json.priority }}" - "{{ steps.classify.output.json.category }}" - "{{ jsonEncode(steps.classify.output.json.products) }}" diff --git a/examples/data-transform-api-to-db.yaml b/examples/data-transform-api-to-db.yaml index 7570dad..4ea795f 100644 --- a/examples/data-transform-api-to-db.yaml +++ b/examples/data-transform-api-to-db.yaml @@ -2,8 +2,7 @@ name: data-transform-api-to-db description: > Fetches a user from an API, transforms the record using CEL expressions to match a database schema, and inserts the normalized data into Postgres. - Demonstrates obj(), toLower(), and string functions without requiring - an AI model. + Demonstrates toLower() and string functions without requiring an AI model. steps: - name: fetch-user diff --git a/internal/cel/functions.go b/internal/cel/functions.go index 2cfe3c7..d799b3d 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -158,6 +158,7 @@ func (l *collectionLib) CompileOptions() []cel.EnvOption { cel.Overload("default_any_any", []*cel.Type{cel.DynType, cel.DynType}, cel.DynType, + cel.OverloadIsNonStrict(), cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { if types.IsError(lhs) || types.IsUnknown(lhs) || lhs == types.NullValue { return rhs @@ -273,11 +274,13 @@ func (l *jsonLib) CompileOptions() []cel.EnvOption { cel.DynType, cel.UnaryBinding(func(val ref.Val) ref.Val { s := string(val.(types.String)) + dec := json.NewDecoder(strings.NewReader(s)) + dec.UseNumber() var result any - if err := json.Unmarshal([]byte(s), &result); err != nil { + if err := dec.Decode(&result); err != nil { return types.NewErr("jsonDecode: %v", err) } - return types.DefaultTypeAdapter.NativeToValue(result) + return types.DefaultTypeAdapter.NativeToValue(normalizeJSONNumbers(result)) }), ), ), @@ -338,3 +341,30 @@ func (l *timeLib) CompileOptions() []cel.EnvOption { func (l *timeLib) ProgramOptions() []cel.ProgramOption { return nil } + +// normalizeJSONNumbers walks a decoded JSON structure and converts json.Number +// values to int64 (if the number is an integer) or float64. +func normalizeJSONNumbers(v any) any { + switch val := v.(type) { + case json.Number: + if i, err := val.Int64(); err == nil { + return i + } + if f, err := val.Float64(); err == nil { + return f + } + return val.String() + case map[string]any: + for k, v := range val { + val[k] = normalizeJSONNumbers(v) + } + return val + case []any: + for i, v := range val { + val[i] = normalizeJSONNumbers(v) + } + return val + default: + return v + } +} diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 7479c78..be92333 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -227,6 +227,12 @@ func TestFunc_Obj(t *testing.T) { expr: `obj("key")`, wantErr: true, }, + { + "non_string_key", + `obj(1, "value")`, + nil, + true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -341,7 +347,7 @@ func TestFunc_JsonDecode(t *testing.T) { m, ok := result.(map[string]any) require.True(t, ok) assert.Equal(t, "bob", m["name"]) - assert.Equal(t, float64(25), m["age"]) + assert.Equal(t, int64(25), m["age"]) }, }, { diff --git a/site/src/content/docs/concepts/expressions.md b/site/src/content/docs/concepts/expressions.md index e52497c..35ee60f 100644 --- a/site/src/content/docs/concepts/expressions.md +++ b/site/src/content/docs/concepts/expressions.md @@ -489,7 +489,7 @@ steps: ### `obj(key, value, ...)` -Builds a map from alternating key-value arguments. Accepts any number of key-value pairs. +Builds a map from alternating key-value arguments. Supports up to 5 key-value pairs (10 arguments) due to cel-go's fixed-arity overload requirement — CEL does not support true variadic functions without macros. For maps with more than 5 pairs, use nested `obj()` calls or construct the value with `jsonDecode`. ```yaml steps: diff --git a/site/src/content/docs/getting-started/data-transformations.md b/site/src/content/docs/getting-started/data-transformations.md index 03e42df..2d92d75 100644 --- a/site/src/content/docs/getting-started/data-transformations.md +++ b/site/src/content/docs/getting-started/data-transformations.md @@ -48,7 +48,7 @@ steps: records: > {{ steps['fetch-users'].output.json.users.map(u, obj( - 'name', toLower(u.firstName + ' ' + u.lastName), + 'name', (u.firstName + ' ' + u.lastName).toLower(), 'birth_date', u.dob ) ) }} @@ -58,7 +58,7 @@ What the CEL expression does: - `.map(u, ...)` -- iterates the `users` list, binding each element to `u` - `obj('name', ..., 'birth_date', ...)` -- constructs a new object with the renamed keys -- `toLower(...)` -- normalizes the full name to lowercase +- `.toLower()` -- normalizes the full name to lowercase (method call on the concatenated string) - String concatenation (`+`) joins first and last name with a space The output of the `map()` call is a new list of objects ready for the batch insert. Nothing left the workflow engine. @@ -174,7 +174,7 @@ steps: 'id', r.reviewId, 'rating', r.starRating, 'reviewed_at', r.submittedAt, - 'text', trim(r.body) + 'text', r.body.trim() ) ) }} @@ -265,13 +265,13 @@ These are the Mantle CEL extensions available in workflow expressions. For full **String** -| Function | Description | -|---|---| -| `toLower(s)` | Convert string to lowercase | -| `toUpper(s)` | Convert string to uppercase | -| `trim(s)` | Remove leading and trailing whitespace | -| `replace(s, old, new)` | Replace all occurrences of `old` with `new` | -| `split(s, sep)` | Split string into a list on separator | +| Function | Example | Description | +|---|---|---| +| `s.toLower()` | `"HELLO".toLower()` | Convert string to lowercase | +| `s.toUpper()` | `"hello".toUpper()` | Convert string to uppercase | +| `s.trim()` | `" a ".trim()` | Remove leading and trailing whitespace | +| `s.replace(old, new)` | `"a-b".replace("-", "_")` | Replace all occurrences of `old` with `new` | +| `s.split(sep)` | `"a,b".split(",")` | Split string into a list on separator | **Object construction** From 2d826be953b1445734a60bc7334ea3cd4662721f Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 21:42:49 -0400 Subject: [PATCH 16/21] =?UTF-8?q?fix:=20address=20PR=20#21=20review=20roun?= =?UTF-8?q?d=203=20=E2=80=94=20test=20coverage,=20trailing=20JSON,=20docs?= =?UTF-8?q?=20accuracy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plans/2026-03-24-data-transformation.md | 54 ++++++++++++------- internal/cel/functions.go | 4 ++ internal/cel/functions_test.go | 34 ++++++++---- .../getting-started/data-transformations.md | 6 +-- 4 files changed, 65 insertions(+), 33 deletions(-) diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md index a0702be..6d5adb2 100644 --- a/docs/superpowers/plans/2026-03-24-data-transformation.md +++ b/docs/superpowers/plans/2026-03-24-data-transformation.md @@ -822,8 +822,9 @@ In `functions.go`, add to `collectionLib.CompileOptions()`: cel.Overload("default_any_any", []*cel.Type{cel.DynType, cel.DynType}, cel.DynType, + cel.OverloadIsNonStrict(), cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { - if types.IsError(lhs) || types.IsUnknown(lhs) { + if types.IsError(lhs) || types.IsUnknown(lhs) || lhs == types.NullValue { return rhs } return lhs @@ -836,20 +837,20 @@ In `functions.go`, add to `collectionLib.CompileOptions()`: cel.ListType(cel.DynType), cel.UnaryBinding(func(val ref.Val) ref.Val { list := val.(traits.Lister) - var result []ref.Val + var result []any it := list.Iterator() for it.HasNext() == types.True { item := it.Next() if sub, ok := item.(traits.Lister); ok { subIt := sub.Iterator() for subIt.HasNext() == types.True { - result = append(result, subIt.Next()) + result = append(result, refToNative(subIt.Next())) } } else { - result = append(result, item) + result = append(result, refToNative(item)) } } - return types.DefaultTypeAdapter.NativeToValue(nativeSlice(result)) + return types.DefaultTypeAdapter.NativeToValue(result) }), ), ), @@ -974,11 +975,13 @@ func (l *jsonLib) CompileOptions() []cel.EnvOption { cel.DynType, cel.UnaryBinding(func(val ref.Val) ref.Val { s := string(val.(types.String)) + dec := json.NewDecoder(strings.NewReader(s)) + dec.UseNumber() var result any - if err := json.Unmarshal([]byte(s), &result); err != nil { + if err := dec.Decode(&result); err != nil { return types.NewErr("jsonDecode: %v", err) } - return types.DefaultTypeAdapter.NativeToValue(result) + return types.DefaultTypeAdapter.NativeToValue(normalizeJSONNumbers(result)) }), ), ), @@ -1091,11 +1094,20 @@ func (l *timeLib) CompileOptions() []cel.EnvOption { cel.TimestampType, cel.UnaryBinding(func(val ref.Val) ref.Val { s := string(val.(types.String)) - t, err := time.Parse(time.RFC3339, s) - if err != nil { - return types.NewErr("parseTimestamp: %v", err) + layouts := []string{ + time.RFC3339, + time.RFC3339Nano, + "2006-01-02T15:04:05", + "2006-01-02", + "01/02/2006", + "Jan 2, 2006", + } + for _, layout := range layouts { + if t, err := time.Parse(layout, s); err == nil { + return types.Timestamp{Time: t} + } } - return types.Timestamp{Time: t} + return types.NewErr("parseTimestamp: unable to parse %q (tried RFC3339, ISO 8601 date, and common formats)", s) }), ), ), @@ -1258,27 +1270,29 @@ In `examples/data-transform-api-to-db.yaml`: ```yaml name: data-transform-api-to-db description: > - Fetches user data from an API, transforms each record using CEL - expressions to match a database schema, and inserts the normalized - records into Postgres. Demonstrates map(), obj(), toLower(), and - type coercion without requiring an AI model. + Fetches a user from an API, transforms the record using CEL expressions + to match a database schema, and inserts the normalized data into Postgres. + Demonstrates toLower() and string functions without requiring an AI model. steps: - - name: fetch-users + - name: fetch-user action: http/request timeout: "15s" params: method: GET - url: "https://jsonplaceholder.typicode.com/users" + url: "https://jsonplaceholder.typicode.com/users/1" headers: Accept: "application/json" - - name: store-users + - name: store-user action: postgres/query credential: app-db params: query: "INSERT INTO users (username, email, city) VALUES ($1, $2, $3)" - params: "{{ steps['fetch-users'].output.json.map(u, [u.username.toLower(), u.email.toLower(), u.address.city]) }}" + args: + - "{{ steps['fetch-user'].output.json.username.toLower() }}" + - "{{ steps['fetch-user'].output.json.email.toLower() }}" + - "{{ steps['fetch-user'].output.json.address.city }}" ``` - [ ] **Step 2: Create AI enrichment example** @@ -1346,7 +1360,7 @@ steps: query: > INSERT INTO urgent_tickets (priority, category, products, raw_body) VALUES ($1, $2, $3, $4) - params: + args: - "{{ steps.classify.output.json.priority }}" - "{{ steps.classify.output.json.category }}" - "{{ jsonEncode(steps.classify.output.json.products) }}" diff --git a/internal/cel/functions.go b/internal/cel/functions.go index d799b3d..806f7c8 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -280,6 +280,10 @@ func (l *jsonLib) CompileOptions() []cel.EnvOption { if err := dec.Decode(&result); err != nil { return types.NewErr("jsonDecode: %v", err) } + // Reject trailing data (e.g., "{} {}") + if dec.More() { + return types.NewErr("jsonDecode: unexpected trailing data after JSON value") + } return types.DefaultTypeAdapter.NativeToValue(normalizeJSONNumbers(result)) }), ), diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index be92333..43e089f 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -253,20 +253,31 @@ func TestFunc_Default(t *testing.T) { eval, err := NewEvaluator() require.NoError(t, err) tests := []struct { - name string - expr string - want any + name string + expr string + want any + wantErr bool }{ - {"value exists returns value", `default("hello", "fallback")`, "hello"}, - {"empty string returns empty string", `default("", "fallback")`, ""}, - {"null returns fallback", `default(null, "fallback")`, "fallback"}, - {"non-null int unchanged", `default(42, 0)`, int64(42)}, + {"value exists returns value", `default("hello", "fallback")`, "hello", false}, + {"empty string returns empty string", `default("", "fallback")`, "", false}, + {"null returns fallback", `default(null, "fallback")`, "fallback", false}, + {"non-null int unchanged", `default(42, 0)`, int64(42), false}, + { + "missing_key_returns_fallback", + `default(steps.fetch.output.missing, "fallback")`, + "fallback", + false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.want, result) + } }) } } @@ -362,6 +373,11 @@ func TestFunc_JsonDecode(t *testing.T) { expr: `jsonDecode("not json")`, wantErr: true, }, + { + name: "trailing_data", + expr: `jsonDecode("{} {}")`, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/site/src/content/docs/getting-started/data-transformations.md b/site/src/content/docs/getting-started/data-transformations.md index 2d92d75..88fd021 100644 --- a/site/src/content/docs/getting-started/data-transformations.md +++ b/site/src/content/docs/getting-started/data-transformations.md @@ -86,9 +86,8 @@ steps: - name: classify action: ai/completion + credential: openai params: - provider: openai - credential: openai model: gpt-4o-mini prompt: > Classify the following support ticket. Extract the priority, product @@ -180,9 +179,8 @@ steps: - name: enrich action: ai/completion + credential: openai params: - provider: openai - credential: openai model: gpt-4o-mini prompt: > Analyze the following product reviews and classify the sentiment From 9a70a0ef012da37c5780e8e4ecb742156c43824f Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 22:00:09 -0400 Subject: [PATCH 17/21] =?UTF-8?q?fix:=20address=20PR=20#21=20review=20roun?= =?UTF-8?q?d=204=20=E2=80=94=20EOF=20trailing=20check,=20docs,=20plan=20cl?= =?UTF-8?q?eanup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - jsonDecode: replace dec.More() with dec.Decode(&trailing) EOF check to catch all trailing data (e.g., "{}]", "1}") - Add trailing_bracket and trailing_brace regression tests - Update parseTimestamp description in data-transformations.md to list all accepted formats - Plan: rename TestFunc_Timestamp to TestFunc_ParseTimestamp, add golangci-lint to final validation checklist Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plans/2026-03-24-data-transformation.md | 11 +++++++---- internal/cel/functions.go | 6 ++++-- internal/cel/functions_test.go | 10 ++++++++++ .../docs/getting-started/data-transformations.md | 2 +- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md index 6d5adb2..f8bc103 100644 --- a/docs/superpowers/plans/2026-03-24-data-transformation.md +++ b/docs/superpowers/plans/2026-03-24-data-transformation.md @@ -1031,7 +1031,7 @@ git commit -m "feat(cel): add jsonEncode and jsonDecode functions" Append to `internal/cel/functions_test.go`: ```go -func TestFunc_Timestamp(t *testing.T) { +func TestFunc_ParseTimestamp(t *testing.T) { eval, err := NewEvaluator() require.NoError(t, err) @@ -1072,8 +1072,8 @@ func TestFunc_FormatTimestamp(t *testing.T) { - [ ] **Step 2: Run tests to verify they fail** -Run: `go test ./internal/cel/ -run "TestFunc_Timestamp|TestFunc_FormatTimestamp" -v` -Expected: FAIL (functions registered as `parseTimestamp`, not `timestamp`) +Run: `go test ./internal/cel/ -run "TestFunc_ParseTimestamp|TestFunc_FormatTimestamp" -v` +Expected: FAIL — functions not registered yet - [ ] **Step 3: Add date/time functions** @@ -1383,11 +1383,14 @@ git commit -m "feat: add data transformation and AI enrichment example workflows Run: `go test ./internal/cel/ -v` Expected: PASS — all function tests, macro tests, and existing tests -- [ ] **Step 2: Run go vet** +- [ ] **Step 2: Run go vet and golangci-lint** Run: `go vet ./internal/cel/` Expected: clean +Run: `golangci-lint run ./...` +Expected: clean + - [ ] **Step 3: Verify site builds** Run: `cd site && npm run build` diff --git a/internal/cel/functions.go b/internal/cel/functions.go index 806f7c8..a275020 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -3,6 +3,7 @@ package cel import ( "encoding/json" "fmt" + "io" "strconv" "strings" "time" @@ -280,8 +281,9 @@ func (l *jsonLib) CompileOptions() []cel.EnvOption { if err := dec.Decode(&result); err != nil { return types.NewErr("jsonDecode: %v", err) } - // Reject trailing data (e.g., "{} {}") - if dec.More() { + // Reject trailing data by attempting a second decode — must hit EOF. + var trailing json.RawMessage + if err := dec.Decode(&trailing); err != io.EOF { return types.NewErr("jsonDecode: unexpected trailing data after JSON value") } return types.DefaultTypeAdapter.NativeToValue(normalizeJSONNumbers(result)) diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 43e089f..5b48e33 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -378,6 +378,16 @@ func TestFunc_JsonDecode(t *testing.T) { expr: `jsonDecode("{} {}")`, wantErr: true, }, + { + name: "trailing_bracket", + expr: `jsonDecode("{}]")`, + wantErr: true, + }, + { + name: "trailing_brace", + expr: `jsonDecode("1}")`, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/site/src/content/docs/getting-started/data-transformations.md b/site/src/content/docs/getting-started/data-transformations.md index 88fd021..f5ce96a 100644 --- a/site/src/content/docs/getting-started/data-transformations.md +++ b/site/src/content/docs/getting-started/data-transformations.md @@ -296,7 +296,7 @@ These are the Mantle CEL extensions available in workflow expressions. For full | Function | Description | |---|---| -| `parseTimestamp(s)` | Parse an RFC 3339 string into a timestamp | +| `parseTimestamp(s)` | Parse a date/time string into a timestamp (accepts RFC 3339, RFC 3339 Nano, bare ISO datetimes, date-only, US dates, and named-month formats) | | `formatTimestamp(t, layout)` | Format a timestamp using a Go time layout string | **Utility** From ed5845ceefde1d178f5c6515727022296c7e9843 Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 22:15:44 -0400 Subject: [PATCH 18/21] =?UTF-8?q?fix:=20address=20PR=20#21=20review=20roun?= =?UTF-8?q?d=205=20=E2=80=94=20precision,=20flatten,=20plan=20accuracy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - normalizeJSONNumbers: only use float64 for decimals/exponents, preserve overflow integers as strings to avoid silent precision loss - flatten: initialize result as make([]any, 0) so empty lists return non-nil empty slice - Add large_integer_preserved test for jsonDecode - Plan: remove dead split() variable, replace variadic obj() with fixed-arity overloads, add jsonDecode EOF check, fix timestamp refs - data-transformations.md: note obj() 5-pair limit Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plans/2026-03-24-data-transformation.md | 47 ++++++++++--------- internal/cel/functions.go | 20 +++++--- internal/cel/functions_test.go | 22 ++++++--- .../getting-started/data-transformations.md | 2 +- 4 files changed, 54 insertions(+), 37 deletions(-) diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md index f8bc103..a3edebf 100644 --- a/docs/superpowers/plans/2026-03-24-data-transformation.md +++ b/docs/superpowers/plans/2026-03-24-data-transformation.md @@ -417,10 +417,6 @@ In `functions.go`, add to `stringLib.CompileOptions()`: s := string(lhs.(types.String)) sep := string(rhs.(types.String)) parts := strings.Split(s, sep) - result := make([]ref.Val, len(parts)) - for i, p := range parts { - result[i] = types.String(p) - } return types.DefaultTypeAdapter.NativeToValue(parts) }), ), @@ -701,25 +697,25 @@ type collectionLib struct{} func (l *collectionLib) CompileOptions() []cel.EnvOption { return []cel.EnvOption{ + // obj() — register fixed-arity overloads for 1–5 key-value pairs. + // CEL does not support true variadic functions, so we register + // overloads for 2/4/6/8/10 args sharing a common objBinding helper. cel.Function("obj", - cel.Overload("obj_kvpairs", - nil, // variadic — accepts any number of args - cel.DynType, - cel.FunctionBinding(func(args ...ref.Val) ref.Val { - if len(args)%2 != 0 { - return types.NewErr("obj: requires even number of arguments (key-value pairs), got %d", len(args)) - } - m := make(map[string]any, len(args)/2) - for i := 0; i < len(args); i += 2 { - key, ok := args[i].(types.String) - if !ok { - return types.NewErr("obj: key at position %d must be a string, got %s", i, args[i].Type()) - } - m[string(key)] = refToNative(args[i+1]) - } - return types.DefaultTypeAdapter.NativeToValue(m) - }), - ), + cel.Overload("obj_2", + []*cel.Type{cel.DynType, cel.DynType}, + cel.DynType, cel.FunctionBinding(objBinding)), + cel.Overload("obj_4", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, cel.FunctionBinding(objBinding)), + cel.Overload("obj_6", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, cel.FunctionBinding(objBinding)), + cel.Overload("obj_8", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, cel.FunctionBinding(objBinding)), + cel.Overload("obj_10", + []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, + cel.DynType, cel.FunctionBinding(objBinding)), ), } } @@ -981,6 +977,11 @@ func (l *jsonLib) CompileOptions() []cel.EnvOption { if err := dec.Decode(&result); err != nil { return types.NewErr("jsonDecode: %v", err) } + // Reject trailing data by attempting a second decode — must hit EOF. + var trailing json.RawMessage + if err := dec.Decode(&trailing); err != io.EOF { + return types.NewErr("jsonDecode: unexpected trailing data after JSON value") + } return types.DefaultTypeAdapter.NativeToValue(normalizeJSONNumbers(result)) }), ), @@ -1203,7 +1204,7 @@ After the existing content, add sections covering: - `jsonEncode(value)`, `jsonDecode(string)` **Date/Time Functions:** -- `timestamp(string)`, `formatTimestamp(ts, layout)` with Go layout reference +- `parseTimestamp(string)`, `formatTimestamp(ts, layout)` with Go layout reference Each function should have a brief description and a YAML example showing usage in a workflow step. diff --git a/internal/cel/functions.go b/internal/cel/functions.go index a275020..cbe1d50 100644 --- a/internal/cel/functions.go +++ b/internal/cel/functions.go @@ -174,7 +174,7 @@ func (l *collectionLib) CompileOptions() []cel.EnvOption { cel.ListType(cel.DynType), cel.UnaryBinding(func(val ref.Val) ref.Val { list := val.(traits.Lister) - var result []any + result := make([]any, 0) it := list.Iterator() for it.HasNext() == types.True { item := it.Next() @@ -349,17 +349,25 @@ func (l *timeLib) ProgramOptions() []cel.ProgramOption { } // normalizeJSONNumbers walks a decoded JSON structure and converts json.Number -// values to int64 (if the number is an integer) or float64. +// values to int64 (if the number is an integer) or float64 (if it has a decimal +// or exponent). Numbers that overflow int64 are preserved as strings to avoid +// silent precision loss. func normalizeJSONNumbers(v any) any { switch val := v.(type) { case json.Number: + s := val.String() + // Only attempt float64 for numbers with decimal point or exponent. + if strings.ContainsAny(s, ".eE") { + if f, err := val.Float64(); err == nil { + return f + } + return s + } + // Integer — try int64, fall back to string for overflow. if i, err := val.Int64(); err == nil { return i } - if f, err := val.Float64(); err == nil { - return f - } - return val.String() + return s case map[string]any: for k, v := range val { val[k] = normalizeJSONNumbers(v) diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index 5b48e33..aeb7f18 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -309,18 +309,15 @@ func TestFunc_Flatten(t *testing.T) { }) } - // Empty list: flatten([]) — result may be nil or empty slice. + // Empty list: flatten([]) — must return a non-nil empty []any. t.Run("empty list", func(t *testing.T) { ctx := newTestContext() ctx.Inputs["empty"] = []any{} result, err := eval.Eval(`flatten(inputs.empty)`, ctx) require.NoError(t, err) - // CEL may return nil or an empty slice for an empty list; both are acceptable. - if result != nil { - list, ok := result.([]any) - require.True(t, ok, "expected []any, got %T", result) - assert.Empty(t, list) - } + list, ok := result.([]any) + require.True(t, ok, "expected []any, got %T", result) + assert.Empty(t, list) }) } @@ -388,6 +385,17 @@ func TestFunc_JsonDecode(t *testing.T) { expr: `jsonDecode("1}")`, wantErr: true, }, + { + name: "large_integer_preserved", + expr: `jsonDecode("9223372036854775808")`, + check: func(t *testing.T, result any) { + // int64 max is 9223372036854775807 — this overflows. + // Should be preserved as string, not silently converted to float64. + s, ok := result.(string) + require.True(t, ok, "expected string for overflow int, got %T", result) + assert.Equal(t, "9223372036854775808", s) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/site/src/content/docs/getting-started/data-transformations.md b/site/src/content/docs/getting-started/data-transformations.md index f5ce96a..49d3bfa 100644 --- a/site/src/content/docs/getting-started/data-transformations.md +++ b/site/src/content/docs/getting-started/data-transformations.md @@ -275,7 +275,7 @@ These are the Mantle CEL extensions available in workflow expressions. For full | Function | Description | |---|---| -| `obj(key, val, ...)` | Construct a map from alternating key-value arguments | +| `obj(key, val, ...)` | Construct a map from alternating key-value arguments (up to 5 pairs / 10 args) | **Type coercion** From 7ca272a75d7291e9a853bc0fa2671f35bf87d8ba Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 22:20:52 -0400 Subject: [PATCH 19/21] fix(plan): correct test selector to TestFunc_ParseTimestamp Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/superpowers/plans/2026-03-24-data-transformation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md index a3edebf..48163b2 100644 --- a/docs/superpowers/plans/2026-03-24-data-transformation.md +++ b/docs/superpowers/plans/2026-03-24-data-transformation.md @@ -1147,7 +1147,7 @@ func customFunctions() []cel.EnvOption { - [ ] **Step 4: Run tests to verify they pass** -Run: `go test ./internal/cel/ -run "TestFunc_Timestamp|TestFunc_FormatTimestamp" -v` +Run: `go test ./internal/cel/ -run "TestFunc_ParseTimestamp|TestFunc_FormatTimestamp" -v` Expected: PASS - [ ] **Step 5: Run full test suite** From 2fd37e9cabffdda7f7d557147bd502d0f2f597cd Mon Sep 17 00:00:00 2001 From: Michael McNees Date: Tue, 24 Mar 2026 22:22:44 -0400 Subject: [PATCH 20/21] chore: remove force-added superpowers docs from tracking These internal process docs (specs, plans) are already in .gitignore but were force-added during development. Files remain on disk but are no longer tracked by git. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plans/2026-03-24-data-transformation.md | 1403 ----------------- .../2026-03-24-init-connection-recovery.md | 981 ------------ .../2026-03-24-data-transformation-design.md | 175 -- ...6-03-24-init-connection-recovery-design.md | 174 -- 4 files changed, 2733 deletions(-) delete mode 100644 docs/superpowers/plans/2026-03-24-data-transformation.md delete mode 100644 docs/superpowers/plans/2026-03-24-init-connection-recovery.md delete mode 100644 docs/superpowers/specs/2026-03-24-data-transformation-design.md delete mode 100644 docs/superpowers/specs/2026-03-24-init-connection-recovery-design.md diff --git a/docs/superpowers/plans/2026-03-24-data-transformation.md b/docs/superpowers/plans/2026-03-24-data-transformation.md deleted file mode 100644 index 48163b2..0000000 --- a/docs/superpowers/plans/2026-03-24-data-transformation.md +++ /dev/null @@ -1,1403 +0,0 @@ -# Data Transformation CEL Functions Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Add custom CEL functions (string, type coercion, object construction, JSON, date/time, collections, null coalescing) and comprehensive documentation for data transformation patterns. - -**Architecture:** Custom functions are registered as `cel.Function` options in `cel.NewEnv()`. A new `functions.go` file defines all functions; `cel.go` is modified only to pass them through. Documentation covers both the new functions and the already-working-but-undocumented macros (`.map()`, `.filter()`, etc.). - -**Tech Stack:** Go, cel-go v0.27.0 (`cel.Function`, `cel.Overload`, `cel.UnaryBinding`/`cel.BinaryBinding`/`cel.FunctionBinding`), `encoding/json`, `time` - -**Spec:** `docs/superpowers/specs/2026-03-24-data-transformation-design.md` - ---- - -## Task 1: Test and document built-in macros - -**Files:** -- Create: `internal/cel/macros_test.go` - -These macros already work but have no tests. This task locks in their behavior. - -- [ ] **Step 1: Write tests for built-in macros** - -In `internal/cel/macros_test.go`: - -```go -package cel - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newListContext() *Context { - return &Context{ - Steps: map[string]map[string]any{ - "fetch": { - "output": map[string]any{ - "items": []any{ - map[string]any{"name": "alice", "age": int64(30)}, - map[string]any{"name": "bob", "age": int64(17)}, - map[string]any{"name": "charlie", "age": int64(25)}, - }, - }, - }, - }, - Inputs: map[string]any{}, - } -} - -func TestMacro_Map(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`steps.fetch.output.items.map(item, item.name)`, newListContext()) - require.NoError(t, err) - assert.Equal(t, []any{"alice", "bob", "charlie"}, result) -} - -func TestMacro_Filter(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age >= 21)`, newListContext()) - require.NoError(t, err) - - items, ok := result.([]any) - require.True(t, ok) - assert.Len(t, items, 2) -} - -func TestMacro_Exists(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`steps.fetch.output.items.exists(item, item.name == "bob")`, newListContext()) - require.NoError(t, err) - assert.Equal(t, true, result) - - result, err = eval.Eval(`steps.fetch.output.items.exists(item, item.name == "dave")`, newListContext()) - require.NoError(t, err) - assert.Equal(t, false, result) -} - -func TestMacro_All(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`steps.fetch.output.items.all(item, item.age > 0)`, newListContext()) - require.NoError(t, err) - assert.Equal(t, true, result) - - result, err = eval.Eval(`steps.fetch.output.items.all(item, item.age >= 21)`, newListContext()) - require.NoError(t, err) - assert.Equal(t, false, result) -} - -func TestMacro_ExistsOne(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`steps.fetch.output.items.exists_one(item, item.name == "alice")`, newListContext()) - require.NoError(t, err) - assert.Equal(t, true, result) -} - -func TestMacro_MapAndFilter_Chained(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age >= 21).map(item, item.name)`, newListContext()) - require.NoError(t, err) - assert.Equal(t, []any{"alice", "charlie"}, result) -} -``` - -- [ ] **Step 2: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestMacro_" -v` -Expected: PASS — all macros already work, we're just adding coverage - -- [ ] **Step 3: Commit** - -```bash -git add internal/cel/macros_test.go -git commit -m "test(cel): add coverage for built-in map/filter/exists/all macros" -``` - ---- - -## Task 2: String functions — toLower, toUpper, trim - -**Files:** -- Create: `internal/cel/functions.go` -- Create: `internal/cel/functions_test.go` -- Modify: `internal/cel/cel.go:30-36` - -- [ ] **Step 1: Write failing tests** - -In `internal/cel/functions_test.go`: - -```go -package cel - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFunc_ToLower(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - }{ - {"basic", `"HELLO".toLower()`, "hello"}, - {"mixed", `"Hello World".toLower()`, "hello world"}, - {"already_lower", `"hello".toLower()`, "hello"}, - {"empty", `"".toLower()`, ""}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) - }) - } -} - -func TestFunc_ToUpper(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - }{ - {"basic", `"hello".toUpper()`, "HELLO"}, - {"mixed", `"Hello World".toUpper()`, "HELLO WORLD"}, - {"empty", `"".toUpper()`, ""}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) - }) - } -} - -func TestFunc_Trim(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - }{ - {"spaces", `" hello ".trim()`, "hello"}, - {"tabs", "\"\\thello\\t\".trim()", "hello"}, - {"no_whitespace", `"hello".trim()`, "hello"}, - {"empty", `"".trim()`, ""}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) - }) - } -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run "TestFunc_ToLower|TestFunc_ToUpper|TestFunc_Trim" -v` -Expected: FAIL — functions not registered - -- [ ] **Step 3: Create functions.go with string functions and wire into cel.go** - -In `internal/cel/functions.go`: - -```go -package cel - -import ( - "strings" - - "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" -) - -// customFunctions returns all custom CEL function options for the Mantle environment. -func customFunctions() []cel.EnvOption { - return []cel.EnvOption{ - stringFunctions(), - } -} - -func stringFunctions() cel.EnvOption { - return cel.Lib(&stringLib{}) -} - -type stringLib struct{} - -func (l *stringLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ - cel.Function("toLower", - cel.MemberOverload("string_toLower", - []*cel.Type{cel.StringType}, - cel.StringType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - return types.String(strings.ToLower(string(val.(types.String)))) - }), - ), - ), - cel.Function("toUpper", - cel.MemberOverload("string_toUpper", - []*cel.Type{cel.StringType}, - cel.StringType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - return types.String(strings.ToUpper(string(val.(types.String)))) - }), - ), - ), - cel.Function("trim", - cel.MemberOverload("string_trim", - []*cel.Type{cel.StringType}, - cel.StringType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - return types.String(strings.TrimSpace(string(val.(types.String)))) - }), - ), - ), - } -} - -func (l *stringLib) ProgramOptions() []cel.ProgramOption { - return nil -} -``` - -In `internal/cel/cel.go`, update `NewEvaluator` (lines 30-36) to include custom functions: - -```go -func NewEvaluator() (*Evaluator, error) { - opts := []cel.EnvOption{ - cel.Variable("steps", cel.MapType(cel.StringType, cel.DynType)), - cel.Variable("inputs", cel.MapType(cel.StringType, cel.DynType)), - cel.Variable("env", cel.MapType(cel.StringType, cel.StringType)), - cel.Variable("trigger", cel.MapType(cel.StringType, cel.DynType)), - } - opts = append(opts, customFunctions()...) - - env, err := cel.NewEnv(opts...) - if err != nil { - return nil, fmt.Errorf("creating CEL environment: %w", err) - } - return &Evaluator{env: env, envCache: envVars()}, nil -} -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestFunc_ToLower|TestFunc_ToUpper|TestFunc_Trim" -v` -Expected: PASS - -- [ ] **Step 5: Run all existing CEL tests to verify no regression** - -Run: `go test ./internal/cel/ -v` -Expected: PASS — all existing tests still pass - -- [ ] **Step 6: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go internal/cel/cel.go -git commit -m "feat(cel): add toLower, toUpper, trim string functions" -``` - ---- - -## Task 3: String functions — replace and split - -**Files:** -- Modify: `internal/cel/functions.go` -- Modify: `internal/cel/functions_test.go` - -- [ ] **Step 1: Write failing tests** - -Append to `internal/cel/functions_test.go`: - -```go -func TestFunc_Replace(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - }{ - {"basic", `"foo-bar".replace("-", "_")`, "foo_bar"}, - {"multiple", `"a.b.c".replace(".", "/")`, "a/b/c"}, - {"no_match", `"hello".replace("x", "y")`, "hello"}, - {"empty_replacement", `"hello".replace("l", "")`, "heo"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) - }) - } -} - -func TestFunc_Split(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - }{ - {"comma", `"a,b,c".split(",")`, []any{"a", "b", "c"}}, - {"space", `"hello world".split(" ")`, []any{"hello", "world"}}, - {"no_match", `"hello".split(",")`, []any{"hello"}}, - {"empty_string", `"".split(",")`, []any{""}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) - }) - } -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run "TestFunc_Replace|TestFunc_Split" -v` -Expected: FAIL - -- [ ] **Step 3: Add replace and split to stringLib.CompileOptions** - -In `functions.go`, add to `stringLib.CompileOptions()`: - -```go - cel.Function("replace", - cel.MemberOverload("string_replace", - []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, - cel.StringType, - cel.FunctionBinding(func(args ...ref.Val) ref.Val { - s := string(args[0].(types.String)) - old := string(args[1].(types.String)) - new := string(args[2].(types.String)) - return types.String(strings.ReplaceAll(s, old, new)) - }), - ), - ), - cel.Function("split", - cel.MemberOverload("string_split", - []*cel.Type{cel.StringType, cel.StringType}, - cel.ListType(cel.StringType), - cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { - s := string(lhs.(types.String)) - sep := string(rhs.(types.String)) - parts := strings.Split(s, sep) - return types.DefaultTypeAdapter.NativeToValue(parts) - }), - ), - ), -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestFunc_Replace|TestFunc_Split" -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add replace and split string functions" -``` - ---- - -## Task 4: Type coercion — parseInt, parseFloat, toString - -**Files:** -- Modify: `internal/cel/functions.go` -- Modify: `internal/cel/functions_test.go` - -- [ ] **Step 1: Write failing tests** - -Append to `internal/cel/functions_test.go`: - -```go -func TestFunc_ParseInt(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - wantErr bool - }{ - {"valid", `parseInt("42")`, int64(42), false}, - {"negative", `parseInt("-7")`, int64(-7), false}, - {"zero", `parseInt("0")`, int64(0), false}, - {"invalid", `parseInt("abc")`, nil, true}, - {"float_string", `parseInt("3.14")`, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - if tt.wantErr { - assert.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, tt.want, result) - } - }) - } -} - -func TestFunc_ParseFloat(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - wantErr bool - }{ - {"valid", `parseFloat("3.14")`, 3.14, false}, - {"integer", `parseFloat("42")`, 42.0, false}, - {"negative", `parseFloat("-1.5")`, -1.5, false}, - {"invalid", `parseFloat("abc")`, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - if tt.wantErr { - assert.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, tt.want, result) - } - }) - } -} - -func TestFunc_ToString(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - }{ - {"int", `toString(42)`, "42"}, - {"bool", `toString(true)`, "true"}, - {"string", `toString("hello")`, "hello"}, - {"float", `toString(3.14)`, "3.14"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - require.NoError(t, err) - assert.Equal(t, tt.want, result) - }) - } -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run "TestFunc_ParseInt|TestFunc_ParseFloat|TestFunc_ToString" -v` -Expected: FAIL - -- [ ] **Step 3: Add type coercion functions** - -In `functions.go`, add a new library and register it in `customFunctions()`: - -```go -func typeFunctions() cel.EnvOption { - return cel.Lib(&typeLib{}) -} - -type typeLib struct{} - -func (l *typeLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ - cel.Function("parseInt", - cel.Overload("parseInt_string", - []*cel.Type{cel.StringType}, - cel.IntType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - s := string(val.(types.String)) - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return types.NewErr("parseInt: %v", err) - } - return types.Int(n) - }), - ), - ), - cel.Function("parseFloat", - cel.Overload("parseFloat_string", - []*cel.Type{cel.StringType}, - cel.DoubleType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - s := string(val.(types.String)) - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return types.NewErr("parseFloat: %v", err) - } - return types.Double(f) - }), - ), - ), - cel.Function("toString", - cel.Overload("toString_any", - []*cel.Type{cel.DynType}, - cel.StringType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - return types.String(fmt.Sprintf("%v", val.Value())) - }), - ), - ), - } -} - -func (l *typeLib) ProgramOptions() []cel.ProgramOption { - return nil -} -``` - -Add `"fmt"` and `"strconv"` to imports. Update `customFunctions()`: - -```go -func customFunctions() []cel.EnvOption { - return []cel.EnvOption{ - stringFunctions(), - typeFunctions(), - } -} -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestFunc_ParseInt|TestFunc_ParseFloat|TestFunc_ToString" -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add parseInt, parseFloat, toString type coercion functions" -``` - ---- - -## Task 5: Object construction — obj() - -> **Implementation note:** The plan originally specified a variadic `obj()` overload, but cel-go does not support true variadic functions without macros. The implementation uses fixed-arity overloads for 2, 4, 6, 8, and 10 arguments (1–5 key-value pairs), all sharing a single `objBinding` function. - -**Files:** -- Modify: `internal/cel/functions.go` -- Modify: `internal/cel/functions_test.go` - -- [ ] **Step 1: Write failing tests** - -Append to `internal/cel/functions_test.go`: - -```go -func TestFunc_Obj(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - want any - wantErr bool - }{ - { - "basic", - `obj("name", "alice", "age", 30)`, - map[string]any{"name": "alice", "age": int64(30)}, - false, - }, - { - "single_pair", - `obj("key", "value")`, - map[string]any{"key": "value"}, - false, - }, - { - "nested_with_step", - `obj("status", steps.fetch.output.status)`, - map[string]any{"status": int64(200)}, - false, - }, - { - "odd_args", - `obj("key")`, - nil, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := eval.Eval(tt.expr, newTestContext()) - if tt.wantErr { - assert.Error(t, err) - } else { - require.NoError(t, err) - assert.Equal(t, tt.want, result) - } - }) - } -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run TestFunc_Obj -v` -Expected: FAIL - -- [ ] **Step 3: Add obj function** - -In `functions.go`, add: - -```go -func collectionFunctions() cel.EnvOption { - return cel.Lib(&collectionLib{}) -} - -type collectionLib struct{} - -func (l *collectionLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ - // obj() — register fixed-arity overloads for 1–5 key-value pairs. - // CEL does not support true variadic functions, so we register - // overloads for 2/4/6/8/10 args sharing a common objBinding helper. - cel.Function("obj", - cel.Overload("obj_2", - []*cel.Type{cel.DynType, cel.DynType}, - cel.DynType, cel.FunctionBinding(objBinding)), - cel.Overload("obj_4", - []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType}, - cel.DynType, cel.FunctionBinding(objBinding)), - cel.Overload("obj_6", - []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, - cel.DynType, cel.FunctionBinding(objBinding)), - cel.Overload("obj_8", - []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, - cel.DynType, cel.FunctionBinding(objBinding)), - cel.Overload("obj_10", - []*cel.Type{cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType, cel.DynType}, - cel.DynType, cel.FunctionBinding(objBinding)), - ), - } -} - -func (l *collectionLib) ProgramOptions() []cel.ProgramOption { - return nil -} -``` - -Update `customFunctions()`: - -```go -func customFunctions() []cel.EnvOption { - return []cel.EnvOption{ - stringFunctions(), - typeFunctions(), - collectionFunctions(), - } -} -``` - -Note: `refToNative` is defined in `cel.go` and accessible since both files are in the same package. - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run TestFunc_Obj -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add obj() map construction function" -``` - ---- - -## Task 6: Utility functions — default, flatten - -**Files:** -- Modify: `internal/cel/functions.go` -- Modify: `internal/cel/functions_test.go` - -- [ ] **Step 1: Write failing tests** - -Append to `internal/cel/functions_test.go`: - -```go -func TestFunc_Default(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - // Test with a value that exists. - result, err := eval.Eval(`default(inputs.url, "fallback")`, newTestContext()) - require.NoError(t, err) - assert.Equal(t, "https://example.com", result) - - // Test with a missing key — CEL map access on missing key errors, - // so default should catch that. We test with a direct null/0 fallback. - result, err = eval.Eval(`default("", "fallback")`, newTestContext()) - require.NoError(t, err) - assert.Equal(t, "", result) // empty string is not null, returns as-is -} - -func TestFunc_Flatten(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - ctx := &Context{ - Steps: map[string]map[string]any{ - "data": { - "output": map[string]any{ - "nested": []any{ - []any{int64(1), int64(2)}, - []any{int64(3), int64(4)}, - }, - }, - }, - }, - Inputs: map[string]any{}, - } - - result, err := eval.Eval(`flatten(steps.data.output.nested)`, ctx) - require.NoError(t, err) - assert.Equal(t, []any{int64(1), int64(2), int64(3), int64(4)}, result) -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run "TestFunc_Default|TestFunc_Flatten" -v` -Expected: FAIL - -- [ ] **Step 3: Add default and flatten to collectionLib** - -In `functions.go`, add to `collectionLib.CompileOptions()`: - -```go - cel.Function("default", - cel.Overload("default_any_any", - []*cel.Type{cel.DynType, cel.DynType}, - cel.DynType, - cel.OverloadIsNonStrict(), - cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { - if types.IsError(lhs) || types.IsUnknown(lhs) || lhs == types.NullValue { - return rhs - } - return lhs - }), - ), - ), - cel.Function("flatten", - cel.Overload("flatten_list", - []*cel.Type{cel.ListType(cel.DynType)}, - cel.ListType(cel.DynType), - cel.UnaryBinding(func(val ref.Val) ref.Val { - list := val.(traits.Lister) - var result []any - it := list.Iterator() - for it.HasNext() == types.True { - item := it.Next() - if sub, ok := item.(traits.Lister); ok { - subIt := sub.Iterator() - for subIt.HasNext() == types.True { - result = append(result, refToNative(subIt.Next())) - } - } else { - result = append(result, refToNative(item)) - } - } - return types.DefaultTypeAdapter.NativeToValue(result) - }), - ), - ), -``` - -Add a helper function in `functions.go`: - -```go -func nativeSlice(vals []ref.Val) []any { - result := make([]any, len(vals)) - for i, v := range vals { - result[i] = refToNative(v) - } - return result -} -``` - -Add `"github.com/google/cel-go/common/types/traits"` to imports. - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestFunc_Default|TestFunc_Flatten" -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add default() null coalescing and flatten() functions" -``` - ---- - -## Task 7: JSON functions — jsonEncode, jsonDecode - -**Files:** -- Modify: `internal/cel/functions.go` -- Modify: `internal/cel/functions_test.go` - -- [ ] **Step 1: Write failing tests** - -Append to `internal/cel/functions_test.go`: - -```go -func TestFunc_JsonEncode(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`jsonEncode(obj("name", "alice", "age", 30))`, newTestContext()) - require.NoError(t, err) - - // JSON key order may vary, so parse and compare. - var parsed map[string]any - require.NoError(t, json.Unmarshal([]byte(result.(string)), &parsed)) - assert.Equal(t, "alice", parsed["name"]) - assert.Equal(t, float64(30), parsed["age"]) // JSON numbers decode as float64 -} - -func TestFunc_JsonDecode(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - wantErr bool - }{ - {"object", `jsonDecode("{\"name\":\"alice\"}")`, false}, - {"array", `jsonDecode("[1,2,3]")`, false}, - {"invalid", `jsonDecode("not json")`, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := eval.Eval(tt.expr, newTestContext()) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} -``` - -Add `"encoding/json"` to test file imports. - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run "TestFunc_Json" -v` -Expected: FAIL - -- [ ] **Step 3: Add JSON functions** - -In `functions.go`, add a new library: - -```go -func jsonFunctions() cel.EnvOption { - return cel.Lib(&jsonLib{}) -} - -type jsonLib struct{} - -func (l *jsonLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ - cel.Function("jsonEncode", - cel.Overload("jsonEncode_any", - []*cel.Type{cel.DynType}, - cel.StringType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - native := refToNative(val) - b, err := json.Marshal(native) - if err != nil { - return types.NewErr("jsonEncode: %v", err) - } - return types.String(string(b)) - }), - ), - ), - cel.Function("jsonDecode", - cel.Overload("jsonDecode_string", - []*cel.Type{cel.StringType}, - cel.DynType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - s := string(val.(types.String)) - dec := json.NewDecoder(strings.NewReader(s)) - dec.UseNumber() - var result any - if err := dec.Decode(&result); err != nil { - return types.NewErr("jsonDecode: %v", err) - } - // Reject trailing data by attempting a second decode — must hit EOF. - var trailing json.RawMessage - if err := dec.Decode(&trailing); err != io.EOF { - return types.NewErr("jsonDecode: unexpected trailing data after JSON value") - } - return types.DefaultTypeAdapter.NativeToValue(normalizeJSONNumbers(result)) - }), - ), - ), - } -} - -func (l *jsonLib) ProgramOptions() []cel.ProgramOption { - return nil -} -``` - -Add `"encoding/json"` to `functions.go` imports. Update `customFunctions()`: - -```go -func customFunctions() []cel.EnvOption { - return []cel.EnvOption{ - stringFunctions(), - typeFunctions(), - collectionFunctions(), - jsonFunctions(), - } -} -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestFunc_Json" -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add jsonEncode and jsonDecode functions" -``` - ---- - -## Task 8: Date/time functions — parseTimestamp, formatTimestamp - -**Files:** -- Modify: `internal/cel/functions.go` -- Modify: `internal/cel/functions_test.go` - -- [ ] **Step 1: Write failing tests** - -Append to `internal/cel/functions_test.go`: - -```go -func TestFunc_ParseTimestamp(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - tests := []struct { - name string - expr string - wantErr bool - }{ - {"iso8601", `parseTimestamp("2026-03-24T19:00:00Z")`, false}, - {"with_offset", `parseTimestamp("2026-03-24T14:00:00-05:00")`, false}, - {"invalid", `parseTimestamp("not a date")`, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := eval.Eval(tt.expr, newTestContext()) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} - -func TestFunc_FormatTimestamp(t *testing.T) { - eval, err := NewEvaluator() - require.NoError(t, err) - - result, err := eval.Eval(`formatTimestamp(parseTimestamp("2026-03-24T19:00:00Z"), "2006-01-02")`, newTestContext()) - require.NoError(t, err) - assert.Equal(t, "2026-03-24", result) - - result, err = eval.Eval(`formatTimestamp(parseTimestamp("2026-03-24T19:30:45Z"), "15:04:05")`, newTestContext()) - require.NoError(t, err) - assert.Equal(t, "19:30:45", result) -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cel/ -run "TestFunc_ParseTimestamp|TestFunc_FormatTimestamp" -v` -Expected: FAIL — functions not registered yet - -- [ ] **Step 3: Add date/time functions** - -In `functions.go`, add: - -```go -func timeFunctions() cel.EnvOption { - return cel.Lib(&timeLib{}) -} - -type timeLib struct{} - -func (l *timeLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ - cel.Function("parseTimestamp", - cel.Overload("parseTimestamp_string", - []*cel.Type{cel.StringType}, - cel.TimestampType, - cel.UnaryBinding(func(val ref.Val) ref.Val { - s := string(val.(types.String)) - layouts := []string{ - time.RFC3339, - time.RFC3339Nano, - "2006-01-02T15:04:05", - "2006-01-02", - "01/02/2006", - "Jan 2, 2006", - } - for _, layout := range layouts { - if t, err := time.Parse(layout, s); err == nil { - return types.Timestamp{Time: t} - } - } - return types.NewErr("parseTimestamp: unable to parse %q (tried RFC3339, ISO 8601 date, and common formats)", s) - }), - ), - ), - cel.Function("formatTimestamp", - cel.Overload("formatTimestamp_timestamp_string", - []*cel.Type{cel.TimestampType, cel.StringType}, - cel.StringType, - cel.BinaryBinding(func(lhs, rhs ref.Val) ref.Val { - ts := lhs.(types.Timestamp) - layout := string(rhs.(types.String)) - return types.String(ts.Time.Format(layout)) - }), - ), - ), - } -} - -func (l *timeLib) ProgramOptions() []cel.ProgramOption { - return nil -} -``` - -Add `"time"` to imports. Update `customFunctions()`: - -```go -func customFunctions() []cel.EnvOption { - return []cel.EnvOption{ - stringFunctions(), - typeFunctions(), - collectionFunctions(), - jsonFunctions(), - timeFunctions(), - } -} -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cel/ -run "TestFunc_ParseTimestamp|TestFunc_FormatTimestamp" -v` -Expected: PASS - -- [ ] **Step 5: Run full test suite** - -Run: `go test ./internal/cel/ -v` -Expected: PASS — all tests including existing ones - -- [ ] **Step 6: Commit** - -```bash -git add internal/cel/functions.go internal/cel/functions_test.go -git commit -m "feat(cel): add parseTimestamp and formatTimestamp date/time functions" -``` - ---- - -## Task 9: Update CEL expressions documentation - -**Files:** -- Modify: `site/src/content/docs/concepts/expressions.md` - -This task is delegated to the technical writer agent. - -- [ ] **Step 1: Read the current expressions.md** - -Read: `site/src/content/docs/concepts/expressions.md` - -- [ ] **Step 2: Add new sections for custom functions and macros** - -After the existing content, add sections covering: - -**Built-in List Macros:** -- `.map(item, expr)` — transform each element -- `.filter(item, expr)` — keep matching elements -- `.exists(item, expr)` — true if any match -- `.all(item, expr)` — true if all match -- `.exists_one(item, expr)` — true if exactly one matches -- Chaining example: `.filter(...).map(...)` - -**String Functions:** -- `toLower()`, `toUpper()`, `trim()`, `replace(old, new)`, `split(delim)` - -**Type Coercion:** -- `parseInt(string)`, `parseFloat(string)`, `toString(any)` - -**Object Construction:** -- `obj(key, value, ...)` with usage examples for building params maps - -**Utility Functions:** -- `default(value, fallback)` -- `flatten(list)` - -**JSON Functions:** -- `jsonEncode(value)`, `jsonDecode(string)` - -**Date/Time Functions:** -- `parseTimestamp(string)`, `formatTimestamp(ts, layout)` with Go layout reference - -Each function should have a brief description and a YAML example showing usage in a workflow step. - -- [ ] **Step 3: Verify site builds** - -Run: `cd site && npm run build` -Expected: success - -- [ ] **Step 4: Commit** - -```bash -git add site/src/content/docs/concepts/expressions.md -git commit -m "docs: add custom CEL functions and macros to expressions reference (#14)" -``` - ---- - -## Task 10: Create data transformations guide - -**Files:** -- Create: `site/src/content/docs/getting-started/data-transformations.md` - -This task is delegated to the technical writer agent. - -- [ ] **Step 1: Create the guide** - -Write `site/src/content/docs/getting-started/data-transformations.md` covering three patterns: - -**Pattern 1 — Structural transforms (CEL only):** -Complete workflow example: fetch user list from API → `.map()` + `obj()` to reshape each record → Postgres INSERT. Show the full YAML with CEL expressions in params. - -**Pattern 2 — AI-powered transforms:** -Complete workflow example: fetch raw text/HTML → AI connector with `output_schema` to extract structured data → store results. Explain when to use AI vs CEL (interpretation vs reshaping). - -**Pattern 3 — Hybrid:** -Complete workflow example: fetch data → CEL for field extraction and normalization → AI for classification/enrichment → Postgres store. Show how to combine both approaches. - -Include a decision guide: "Use CEL when the mapping is known and structural. Use AI when the transform requires interpretation, classification, or natural language understanding." - -- [ ] **Step 2: Verify site builds** - -Run: `cd site && npm run build` -Expected: success - -- [ ] **Step 3: Commit** - -```bash -git add site/src/content/docs/getting-started/data-transformations.md -git commit -m "docs: add data transformation patterns guide (#14)" -``` - ---- - -## Task 11: Create example workflows - -**Files:** -- Create: `examples/data-transform-api-to-db.yaml` -- Create: `examples/ai-data-enrichment.yaml` - -- [ ] **Step 1: Create structural transform example** - -In `examples/data-transform-api-to-db.yaml`: - -```yaml -name: data-transform-api-to-db -description: > - Fetches a user from an API, transforms the record using CEL expressions - to match a database schema, and inserts the normalized data into Postgres. - Demonstrates toLower() and string functions without requiring an AI model. - -steps: - - name: fetch-user - action: http/request - timeout: "15s" - params: - method: GET - url: "https://jsonplaceholder.typicode.com/users/1" - headers: - Accept: "application/json" - - - name: store-user - action: postgres/query - credential: app-db - params: - query: "INSERT INTO users (username, email, city) VALUES ($1, $2, $3)" - args: - - "{{ steps['fetch-user'].output.json.username.toLower() }}" - - "{{ steps['fetch-user'].output.json.email.toLower() }}" - - "{{ steps['fetch-user'].output.json.address.city }}" -``` - -- [ ] **Step 2: Create AI enrichment example** - -In `examples/ai-data-enrichment.yaml`: - -```yaml -name: ai-data-enrichment -description: > - Fetches support tickets, uses an AI model to classify priority and - extract key entities, then stores the enriched data. Demonstrates - using AI for transforms that require interpretation rather than - simple structural mapping. - -inputs: - ticket_api_url: - type: string - description: URL to fetch support tickets from - -steps: - - name: fetch-tickets - action: http/request - timeout: "15s" - params: - method: GET - url: "{{ inputs.ticket_api_url }}" - headers: - Accept: "application/json" - - - name: classify - action: ai/completion - credential: openai - timeout: "60s" - params: - model: gpt-4o - system_prompt: > - You are a support ticket classifier. Given a ticket, determine - the priority (critical, high, medium, low), category, and extract - any mentioned product names or error codes. - prompt: "Classify this ticket: {{ steps['fetch-tickets'].output.body }}" - output_schema: - type: object - properties: - priority: - type: string - enum: [critical, high, medium, low] - category: - type: string - products: - type: array - items: - type: string - error_codes: - type: array - items: - type: string - required: [priority, category, products, error_codes] - additionalProperties: false - - - name: store-enriched - action: postgres/query - credential: app-db - if: "steps.classify.output.json.priority == 'critical' || steps.classify.output.json.priority == 'high'" - params: - query: > - INSERT INTO urgent_tickets (priority, category, products, raw_body) - VALUES ($1, $2, $3, $4) - args: - - "{{ steps.classify.output.json.priority }}" - - "{{ steps.classify.output.json.category }}" - - "{{ jsonEncode(steps.classify.output.json.products) }}" - - "{{ steps['fetch-tickets'].output.body }}" -``` - -- [ ] **Step 3: Commit** - -```bash -git add examples/data-transform-api-to-db.yaml examples/ai-data-enrichment.yaml -git commit -m "feat: add data transformation and AI enrichment example workflows (#14)" -``` - ---- - -## Task 12: Final validation - -- [ ] **Step 1: Run full test suite** - -Run: `go test ./internal/cel/ -v` -Expected: PASS — all function tests, macro tests, and existing tests - -- [ ] **Step 2: Run go vet and golangci-lint** - -Run: `go vet ./internal/cel/` -Expected: clean - -Run: `golangci-lint run ./...` -Expected: clean - -- [ ] **Step 3: Verify site builds** - -Run: `cd site && npm run build` -Expected: success - -- [ ] **Step 4: Run full project test suite** - -Run: `go test ./... -short` -Expected: PASS diff --git a/docs/superpowers/plans/2026-03-24-init-connection-recovery.md b/docs/superpowers/plans/2026-03-24-init-connection-recovery.md deleted file mode 100644 index 715f962..0000000 --- a/docs/superpowers/plans/2026-03-24-init-connection-recovery.md +++ /dev/null @@ -1,981 +0,0 @@ -# `mantle init` Connection Recovery Implementation Plan - -> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Make `mantle init` handle missing Postgres gracefully — auto-provision via Docker on localhost, retry/quit on remote hosts — and update the quickstart docs to match. - -**Architecture:** When `db.Open` fails, classify the host as loopback or remote. Loopback failures offer Docker auto-provisioning; remote failures offer retry/quit. Extract duplicated constants (testcontainers defaults, loopback detection, budget modes) into shared packages first. - -**Tech Stack:** Go, Cobra (`cmd.InOrStdin()`/`cmd.OutOrStdout()`), `os/exec` for Docker commands, `net/url` + `net` for host parsing. - -**Spec:** `docs/superpowers/specs/2026-03-24-init-connection-recovery-design.md` - ---- - -### Task 1: Create `internal/netutil/loopback.go` — loopback detection - -**Files:** -- Create: `internal/netutil/loopback.go` -- Create: `internal/netutil/loopback_test.go` - -- [ ] **Step 1: Write the failing tests** - -In `internal/netutil/loopback_test.go`: - -```go -package netutil_test - -import ( - "testing" - - "github.com/dvflw/mantle/internal/netutil" - "github.com/stretchr/testify/assert" -) - -func TestIsLoopback(t *testing.T) { - tests := []struct { - host string - expected bool - }{ - {"localhost", true}, - {"LOCALHOST", true}, - {"Localhost", true}, - {"127.0.0.1", true}, - {"::1", true}, - {"db.example.com", false}, - {"10.0.0.1", false}, - {"192.168.1.1", false}, - {"", false}, - } - for _, tt := range tests { - t.Run(tt.host, func(t *testing.T) { - assert.Equal(t, tt.expected, netutil.IsLoopback(tt.host)) - }) - } -} -``` - -- [ ] **Step 2: Run test to verify it fails** - -Run: `go test ./internal/netutil/ -v` -Expected: FAIL — package does not exist yet - -- [ ] **Step 3: Write minimal implementation** - -In `internal/netutil/loopback.go`: - -```go -package netutil - -import ( - "net" - "strings" -) - -// IsLoopback returns true if host is a loopback address: localhost, 127.0.0.1, or ::1. -func IsLoopback(host string) bool { - if strings.EqualFold(host, "localhost") { - return true - } - ip := net.ParseIP(host) - return ip != nil && ip.IsLoopback() -} -``` - -- [ ] **Step 4: Run test to verify it passes** - -Run: `go test ./internal/netutil/ -v` -Expected: PASS — all 9 cases - -- [ ] **Step 5: Commit** - -```bash -git add internal/netutil/loopback.go internal/netutil/loopback_test.go -git commit -m "feat(netutil): add IsLoopback host classifier" -``` - ---- - -### Task 2: Adopt `netutil.IsLoopback` in `internal/config/config.go` - -**Files:** -- Modify: `internal/config/config.go:268-280` (SSL warning block) - -- [ ] **Step 1: Run existing config tests as baseline** - -Run: `go test ./internal/config/ -v` -Expected: PASS — all existing tests green - -- [ ] **Step 2: Replace inline loopback logic with `netutil.IsLoopback`** - -In `internal/config/config.go`, replace the SSL warning block (lines ~268-281): - -```go -// Current code: - if dbURL := cfg.Database.URL; dbURL != "" { - if parsed, err := url.Parse(dbURL); err == nil { - host := parsed.Hostname() - ip := net.ParseIP(host) - isLoopback := host != "" && (strings.EqualFold(host, "localhost") || (ip != nil && ip.IsLoopback())) - if !isLoopback { - q := parsed.Query() - if q.Get("sslmode") == "prefer" { - log.Printf("WARNING: database URL uses sslmode=prefer for non-loopback host %q; consider sslmode=require for production", host) - } - } - } - } -``` - -Replace with: - -```go - if dbURL := cfg.Database.URL; dbURL != "" { - if parsed, err := url.Parse(dbURL); err == nil { - host := parsed.Hostname() - if !netutil.IsLoopback(host) { - q := parsed.Query() - if q.Get("sslmode") == "prefer" { - log.Printf("WARNING: database URL uses sslmode=prefer for non-loopback host %q; consider sslmode=require for production", host) - } - } - } - } -``` - -Add import `"github.com/dvflw/mantle/internal/netutil"`. Remove `"net"` from imports if no longer used (check — `net` may be used elsewhere in the file). Remove `"strings"` only if no longer used elsewhere. - -- [ ] **Step 3: Run config tests to verify no regression** - -Run: `go test ./internal/config/ -v` -Expected: PASS — identical behavior - -- [ ] **Step 4: Commit** - -```bash -git add internal/config/config.go -git commit -m "refactor(config): use netutil.IsLoopback for SSL warning" -``` - ---- - -### Task 3: Create `internal/dbdefaults/dbdefaults.go` — shared constants - -**Files:** -- Create: `internal/dbdefaults/dbdefaults.go` - -- [ ] **Step 1: Create the constants package** - -In `internal/dbdefaults/dbdefaults.go`: - -```go -package dbdefaults - -// Runtime defaults — used by Docker auto-provisioning and config defaults. -// These match the default database URL in config.go. -const ( - PostgresImage = "postgres:16-alpine" - User = "mantle" - Password = "mantle" - Database = "mantle" - ContainerName = "mantle-postgres" -) - -// Test defaults — used by testcontainers setups. -const ( - TestDatabase = "mantle_test" -) -``` - -- [ ] **Step 2: Verify it compiles** - -Run: `go build ./internal/dbdefaults/` -Expected: success (no output) - -- [ ] **Step 3: Commit** - -```bash -git add internal/dbdefaults/dbdefaults.go -git commit -m "feat(dbdefaults): add shared Postgres image and test credential constants" -``` - ---- - -### Task 4: Adopt `dbdefaults` in all testcontainers setups - -**Files:** -- Modify: `internal/db/migrate_test.go:19-22` -- Modify: `internal/budget/store_test.go:23-26` -- Modify: `internal/engine/test_helpers_test.go:21-24` -- Modify: `internal/auth/auth_test.go` (find `setupTestDB`) -- Modify: `internal/workflow/store_test.go` (find `setupTestDB`) -- Modify: `internal/secret/store_test.go` (find `setupTestDB`) -- Modify: `internal/connector/postgres_test.go` (find postgres image literal) - -- [ ] **Step 1: Run all tests as baseline** - -Run: `go test ./internal/db/ ./internal/budget/ ./internal/engine/ ./internal/auth/ ./internal/workflow/ ./internal/secret/ ./internal/connector/ -count=1 -short` -Expected: PASS (or SKIP if Docker not available) - -- [ ] **Step 2: Update each test file** - -In each file's `setupTestDB` function, replace the string literals with `dbdefaults` constants. The pattern is the same in every file. Replace: - -```go - pgContainer, err := postgres.Run(ctx, - "postgres:16-alpine", - postgres.WithDatabase("mantle_test"), - postgres.WithUsername("mantle"), - postgres.WithPassword("mantle"), -``` - -With: - -```go - pgContainer, err := postgres.Run(ctx, - dbdefaults.PostgresImage, - postgres.WithDatabase(dbdefaults.TestDatabase), - postgres.WithUsername(dbdefaults.User), - postgres.WithPassword(dbdefaults.Password), -``` - -Add import `"github.com/dvflw/mantle/internal/dbdefaults"` to each file. - -Files to update (7 total): -1. `internal/db/migrate_test.go` -2. `internal/budget/store_test.go` -3. `internal/engine/test_helpers_test.go` -4. `internal/auth/auth_test.go` -5. `internal/workflow/store_test.go` -6. `internal/secret/store_test.go` -7. `internal/connector/postgres_test.go` (only `PostgresImage` — check if it uses different user/db) - -- [ ] **Step 3: Verify compilation** - -Run: `go build ./internal/...` -Expected: success - -- [ ] **Step 4: Run tests to verify no regression** - -Run: `go test ./internal/db/ ./internal/budget/ ./internal/engine/ ./internal/auth/ ./internal/workflow/ ./internal/secret/ ./internal/connector/ -count=1 -short` -Expected: same results as baseline - -- [ ] **Step 5: Commit** - -```bash -git add internal/db/migrate_test.go internal/budget/store_test.go internal/engine/test_helpers_test.go internal/auth/auth_test.go internal/workflow/store_test.go internal/secret/store_test.go internal/connector/postgres_test.go -git commit -m "refactor(tests): use dbdefaults constants in all testcontainers setups" -``` - ---- - -### Task 5: Add budget reset mode constants - -**Files:** -- Modify: `internal/budget/budget.go:1-22` -- Modify: `internal/config/config.go:260-261` - -- [ ] **Step 1: Run baseline tests** - -Run: `go test ./internal/budget/ ./internal/config/ -v` -Expected: PASS - -- [ ] **Step 2: Add constants to budget.go** - -At the top of `internal/budget/budget.go`, after the imports, add: - -```go -// Reset mode constants for budget period calculation. -const ( - ResetModeCalendar = "calendar" - ResetModeRolling = "rolling" -) -``` - -Update `CurrentPeriodStart` to use the constant: - -```go -func CurrentPeriodStart(now time.Time, mode string, resetDay int) time.Time { - now = now.UTC() - if mode == ResetModeRolling && resetDay >= 1 && resetDay <= 28 { -``` - -- [ ] **Step 3: Update config.go validation to use budget constants** - -In `internal/config/config.go`, replace the string literals in validation (line ~261): - -```go -// Current: - if cfg.Engine.Budget.ResetMode == "rolling" { -// Replace with: - if cfg.Engine.Budget.ResetMode == budget.ResetModeRolling { -``` - -Also update the default value assignment (in the defaults block where `ResetMode` is set) if it uses the string literal `"calendar"` — replace with `budget.ResetModeCalendar`. - -Add import `"github.com/dvflw/mantle/internal/budget"` to config.go. - -- [ ] **Step 4: Run tests to verify no regression** - -Run: `go test ./internal/budget/ ./internal/config/ -v` -Expected: PASS — identical behavior - -- [ ] **Step 5: Commit** - -```bash -git add internal/budget/budget.go internal/config/config.go -git commit -m "refactor(budget): extract ResetModeCalendar/ResetModeRolling constants" -``` - ---- - -### Task 6: Create `internal/cli/docker.go` — Docker operations - -**Files:** -- Create: `internal/cli/docker.go` -- Create: `internal/cli/docker_test.go` - -- [ ] **Step 1: Write failing tests** - -In `internal/cli/docker_test.go`: - -```go -package cli - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDockerRunArgs(t *testing.T) { - args := dockerRunArgs() - assert.Equal(t, []string{ - "run", "-d", - "--name", "mantle-postgres", - "-p", "5432:5432", - "-e", "POSTGRES_USER=mantle", - "-e", "POSTGRES_PASSWORD=mantle", - "-e", "POSTGRES_DB=mantle", - "-v", "mantle-pgdata:/var/lib/postgresql/data", - "postgres:16-alpine", - }, args) -} - -func TestParseHostFromURL(t *testing.T) { - tests := []struct { - name string - url string - expected string - }{ - {"standard", "postgres://mantle:mantle@localhost:5432/mantle", "localhost"}, - {"remote", "postgres://user:pass@db.example.com:5432/mydb", "db.example.com"}, - {"ipv4", "postgres://user:pass@10.0.0.1:5432/mydb", "10.0.0.1"}, - {"ipv6", "postgres://user:pass@[::1]:5432/mydb", "::1"}, - {"no-port", "postgres://user:pass@myhost/mydb", "myhost"}, - {"empty", "", ""}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, parseHostFromURL(tt.url)) - }) - } -} -``` - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cli/ -run "TestDockerRunArgs|TestParseHostFromURL" -v` -Expected: FAIL — functions not defined - -- [ ] **Step 3: Write implementation** - -In `internal/cli/docker.go`: - -```go -package cli - -import ( - "context" - "fmt" - "net/url" - "os/exec" - "strings" - "time" - - "github.com/dvflw/mantle/internal/config" - "github.com/dvflw/mantle/internal/db" - "github.com/dvflw/mantle/internal/dbdefaults" -) - -// dockerRunArgs returns the arguments for `docker run` to start a Postgres -// container matching Mantle's default configuration. -func dockerRunArgs() []string { - return []string{ - "run", "-d", - "--name", dbdefaults.ContainerName, - "-p", "5432:5432", - "-e", "POSTGRES_USER=" + dbdefaults.User, - "-e", "POSTGRES_PASSWORD=" + dbdefaults.Password, - "-e", "POSTGRES_DB=" + dbdefaults.Database, - "-v", "mantle-pgdata:/var/lib/postgresql/data", - dbdefaults.PostgresImage, - } -} - -// parseHostFromURL extracts the hostname from a Postgres connection URL. -func parseHostFromURL(rawURL string) string { - if rawURL == "" { - return "" - } - parsed, err := url.Parse(rawURL) - if err != nil { - return "" - } - return parsed.Hostname() -} - -// dockerAvailable checks whether the Docker CLI is installed and the daemon is responsive. -func dockerAvailable() bool { - cmd := exec.Command("docker", "info") - return cmd.Run() == nil -} - -// dockerContainerStatus returns "running", "exited", or "" (not found) -// for the mantle-postgres container. -func dockerContainerStatus() string { - out, err := exec.Command("docker", "inspect", "-f", "{{.State.Status}}", dbdefaults.ContainerName).Output() - if err != nil { - return "" - } - return strings.TrimSpace(string(out)) -} - -// dockerRemoveContainer removes the mantle-postgres container (stopped or otherwise). -func dockerRemoveContainer() error { - return exec.Command("docker", "rm", "-f", dbdefaults.ContainerName).Run() -} - -// dockerStartPostgres starts a new Postgres container and waits for it to accept connections. -func dockerStartPostgres(cfg config.DatabaseConfig) error { - // Handle existing container. - switch dockerContainerStatus() { - case "running": - // Already running — just wait for readiness. - return waitForPostgres(cfg) - case "exited", "created", "dead": - _ = dockerRemoveContainer() - } - - args := dockerRunArgs() - out, err := exec.Command("docker", args...).CombinedOutput() - if err != nil { - return fmt.Errorf("docker run failed: %w\n%s", err, string(out)) - } - - return waitForPostgres(cfg) -} - -// waitForPostgres polls db.Open with backoff until the database accepts connections -// or the timeout (~15s) is exceeded. -func waitForPostgres(cfg config.DatabaseConfig) error { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - - delay := 500 * time.Millisecond - for { - database, err := db.Open(cfg) - if err == nil { - database.Close() - return nil - } - select { - case <-ctx.Done(): - return fmt.Errorf("container started but Postgres isn't accepting connections after 15s: %w", err) - case <-time.After(delay): - if delay < 2*time.Second { - delay *= 2 - } - } - } -} -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `go test ./internal/cli/ -run "TestDockerRunArgs|TestParseHostFromURL" -v` -Expected: PASS - -- [ ] **Step 5: Commit** - -```bash -git add internal/cli/docker.go internal/cli/docker_test.go -git commit -m "feat(cli): add Docker auto-provisioning helpers for mantle init" -``` - ---- - -### Task 7: Implement connection recovery in `internal/cli/init.go` - -**Files:** -- Modify: `internal/cli/init.go` -- Create: `internal/cli/init_test.go` - -- [ ] **Step 1: Write tests for non-interactive mode and isInteractive** - -In `internal/cli/init_test.go`: - -```go -package cli - -import ( - "bytes" - "testing" - - "github.com/dvflw/mantle/internal/config" - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" -) - -func TestIsInteractive_ReturnsBool(t *testing.T) { - // In test context, stdin is not a TTY — isInteractive should return false. - assert.False(t, isInteractive()) -} - -func TestHandleConnectionFailure_NonInteractive_ReturnsError(t *testing.T) { - // When stdin is not a TTY, handleConnectionFailure should return the - // connection error immediately without prompting. - cmd := &cobra.Command{} - var buf bytes.Buffer - cmd.SetOut(&buf) - - cfg := &config.Config{} - cfg.Database.URL = "postgres://mantle:mantle@localhost:5432/mantle" - - _, err := handleConnectionFailure(cmd, cfg, fmt.Errorf("connection refused")) - assert.Error(t, err) - assert.Contains(t, err.Error(), "connection refused") - // No prompt text should have been written to stdout. - assert.Empty(t, buf.String()) -} -``` - -Add `"fmt"` to imports. - -- [ ] **Step 2: Run tests to verify they fail** - -Run: `go test ./internal/cli/ -run "TestIsInteractive|TestHandleConnectionFailure_NonInteractive" -v` -Expected: FAIL — functions not defined yet - -- [ ] **Step 3: Rewrite init.go with connection recovery flow** - -Replace the contents of `internal/cli/init.go` with: - -```go -package cli - -import ( - "database/sql" - "fmt" - "os" - "strings" - - "github.com/dvflw/mantle/internal/config" - "github.com/dvflw/mantle/internal/db" - "github.com/dvflw/mantle/internal/netutil" - "github.com/spf13/cobra" -) - -func newInitCommand() *cobra.Command { - return &cobra.Command{ - Use: "init", - Short: "Initialize Mantle — run database migrations", - Long: "Runs all pending database migrations to set up or upgrade the Mantle schema.\nIf Postgres is not reachable, offers to start one automatically via Docker.", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - cfg := config.FromContext(cmd.Context()) - if cfg == nil { - return fmt.Errorf("config not loaded") - } - - database, err := db.Open(cfg.Database) - if err != nil { - database, err = handleConnectionFailure(cmd, cfg, err) - if err != nil { - return err - } - } - defer database.Close() - - fmt.Fprintln(cmd.OutOrStdout(), "Running migrations...") - if err := db.Migrate(cmd.Context(), database); err != nil { - return fmt.Errorf("migration failed: %w", err) - } - - fmt.Fprintln(cmd.OutOrStdout(), "Migrations complete.") - return nil - }, - } -} - -// handleConnectionFailure is called when the initial db.Open fails. -// It classifies the host and offers interactive recovery options. -func handleConnectionFailure(cmd *cobra.Command, cfg *config.Config, connErr error) (*sql.DB, error) { - host := parseHostFromURL(cfg.Database.URL) - - // Non-interactive mode (piped stdin, CI): just return the error. - if !isInteractive() { - return nil, fmt.Errorf("failed to connect to database: %w", connErr) - } - - if netutil.IsLoopback(host) { - return handleLoopbackFailure(cmd, cfg, connErr) - } - return handleRemoteFailure(cmd, cfg, host, connErr) -} - -// isInteractive returns true if stdin is a terminal (not piped). -func isInteractive() bool { - fi, err := os.Stdin.Stat() - if err != nil { - return false - } - return fi.Mode()&os.ModeCharDevice != 0 -} - -// handleLoopbackFailure offers Docker auto-provisioning for localhost connections. -func handleLoopbackFailure(cmd *cobra.Command, cfg *config.Config, connErr error) (*sql.DB, error) { - out := cmd.OutOrStdout() - in := cmd.InOrStdin() - - fmt.Fprintf(out, "No Postgres found on localhost: %v\n\n", connErr) - fmt.Fprint(out, "Start a Postgres container with Docker? [Y/n]: ") - - var answer string - fmt.Fscanln(in, &answer) - answer = strings.TrimSpace(strings.ToLower(answer)) - - if answer != "" && answer != "y" && answer != "yes" { - return promptConnectionStringOrRetryDocker(cmd, cfg) - } - - // User accepted Docker provisioning. - return attemptDockerProvisioning(cmd, cfg) -} - -// attemptDockerProvisioning checks Docker availability and starts the container. -func attemptDockerProvisioning(cmd *cobra.Command, cfg *config.Config) (*sql.DB, error) { - out := cmd.OutOrStdout() - - if !dockerAvailable() { - fmt.Fprintln(out, "\nDocker isn't installed or isn't running.") - return promptConnectionStringOrRetryDocker(cmd, cfg) - } - - fmt.Fprintln(out, "Starting Postgres container...") - if err := dockerStartPostgres(cfg.Database); err != nil { - return nil, fmt.Errorf("docker provisioning failed: %w", err) - } - - fmt.Fprintln(out, "Postgres is ready.") - return db.Open(cfg.Database) -} - -// promptConnectionStringOrRetryDocker offers [R]etry or [C]onnection string. -func promptConnectionStringOrRetryDocker(cmd *cobra.Command, cfg *config.Config) (*sql.DB, error) { - out := cmd.OutOrStdout() - in := cmd.InOrStdin() - - for { - fmt.Fprintln(out, "") - fmt.Fprintln(out, " [R] Retry (install or start Docker first)") - fmt.Fprintln(out, " [C] Enter a Postgres connection string") - fmt.Fprint(out, "\nChoice [R/c]: ") - - var choice string - fmt.Fscanln(in, &choice) - choice = strings.TrimSpace(strings.ToLower(choice)) - - switch choice { - case "c": - return promptConnectionString(cmd, cfg) - default: - // Retry Docker provisioning. - return attemptDockerProvisioning(cmd, cfg) - } - } -} - -// promptConnectionString asks the user for a connection URL and validates it. -func promptConnectionString(cmd *cobra.Command, cfg *config.Config) (*sql.DB, error) { - out := cmd.OutOrStdout() - in := cmd.InOrStdin() - - for { - fmt.Fprint(out, "Postgres connection string: ") - - var connStr string - fmt.Fscanln(in, &connStr) - connStr = strings.TrimSpace(connStr) - - if connStr == "" { - continue - } - - cfg.Database.URL = connStr - database, err := db.Open(cfg.Database) - if err != nil { - fmt.Fprintf(out, "Connection failed: %v\n", err) - continue - } - return database, nil - } -} - -// handleRemoteFailure shows the error and offers retry/quit for non-loopback hosts. -func handleRemoteFailure(cmd *cobra.Command, cfg *config.Config, host string, connErr error) (*sql.DB, error) { - out := cmd.OutOrStdout() - in := cmd.InOrStdin() - - for { - fmt.Fprintf(out, "Failed to connect to database at %s\n\n", host) - fmt.Fprintf(out, " Error: %v\n\n", connErr) - fmt.Fprintln(out, " [R] Retry (fix the issue and try again)") - fmt.Fprintln(out, " [Q] Quit") - fmt.Fprint(out, "\nChoice [R/q]: ") - - var choice string - fmt.Fscanln(in, &choice) - choice = strings.TrimSpace(strings.ToLower(choice)) - - if choice == "q" { - return nil, fmt.Errorf("failed to connect to database at %s: %w", host, connErr) - } - - // Retry: re-load config to pick up env var / config file changes. - newCfg, err := config.Load(cmd) - if err != nil { - fmt.Fprintf(out, "Config reload error: %v\n", err) - continue - } - cfg.Database = newCfg.Database - - database, err := db.Open(cfg.Database) - if err != nil { - connErr = err - host = parseHostFromURL(cfg.Database.URL) - continue - } - return database, nil - } -} -``` - -- [ ] **Step 4: Fix compilation — verify build succeeds** - -Run: `go build ./internal/cli/` -Expected: success - -- [ ] **Step 5: Run all CLI tests including the new ones** - -Run: `go test ./internal/cli/ -v -short` -Expected: PASS — `TestIsInteractive`, `TestHandleConnectionFailure_NonInteractive`, `TestDockerRunArgs`, `TestParseHostFromURL` all pass - -- [ ] **Step 6: Commit** - -```bash -git add internal/cli/init.go internal/cli/init_test.go -git commit -m "feat(cli): add connection recovery flow to mantle init (#7)" -``` - ---- - -### Task 8: Update landing page quickstart - -**Files:** -- Modify: `site/src/components/GetStarted.astro` - -- [ ] **Step 1: Update the steps array** - -In `site/src/components/GetStarted.astro`, replace the steps array (lines 2-23): - -```javascript -const steps = [ - { - number: '1', - title: 'Install', - code: 'go install github.com/dvflw/mantle/cmd/mantle@latest', - }, - { - number: '2', - title: 'Initialize', - code: 'mantle init\n# Starts Postgres via Docker if needed, then runs migrations', - }, - { - number: '3', - title: 'Apply your first workflow', - code: 'mantle apply examples/hello-world.yaml\n# Applied hello-world version 1', - }, - { - number: '4', - title: 'Run it', - code: 'mantle run hello-world\n# Running hello-world (version 1)...\n# Execution a1b2c3d4: completed\n# fetch: completed (1.0s)', - }, -]; -``` - -Key changes: Step 2 title changes from "Start Postgres and initialize" to "Initialize". The `docker compose up -d` line is removed. A comment explains what `mantle init` does. - -- [ ] **Step 2: Verify the site builds** - -Run: `cd site && npm run build` (or whatever the build command is — check `site/package.json`) -Expected: success - -- [ ] **Step 3: Commit** - -```bash -git add site/src/components/GetStarted.astro -git commit -m "docs(site): simplify quickstart — mantle init handles DB setup (#7)" -``` - ---- - -### Task 9: Update getting-started docs - -**Files:** -- Modify: `site/src/content/docs/getting-started/index.md` - -- [ ] **Step 1: Update prerequisites section** - -Replace the prerequisites section (lines 9-22) — Docker is no longer required: - -```markdown -## Prerequisites - -You need the following installed on your machine: - -- **Go 1.25+** -- [install instructions](https://go.dev/doc/install) -- **Docker** (optional) -- [install instructions](https://docs.docker.com/get-docker/) -- used for automatic local Postgres provisioning - -Verify your setup: - -```bash -go version # go1.25 or later -``` -``` - -- [ ] **Step 2: Update the install/start section** - -Replace the "Install and Start" section (lines 24-43) with two paths — `go install` (primary) and clone (development): - -```markdown -## Install and Start (< 2 minutes) - -Install the binary and initialize: - -```bash -go install github.com/dvflw/mantle/cmd/mantle@latest -mantle init -``` - -`mantle init` connects to Postgres and runs migrations. If no database is reachable on localhost, it offers to start one automatically via Docker. For remote databases, set the URL before running init: - -```bash -export MANTLE_DATABASE_URL="postgres://mantle:secret@db.example.com:5432/mantle?sslmode=require" -mantle init -``` - -You should see: - -``` -Running migrations... -Migrations complete. -``` - -**Development setup:** If you want to build from source, clone the repository and use `make build` instead of `go install`: - -```bash -git clone https://github.com/dvflw/mantle.git && cd mantle -make build -./mantle init -``` - -See [Configuration](/docs/configuration) for all database options. -``` - -Remove the paragraph about `docker compose up -d` and `sslmode=prefer` (lines 35-43). The new text covers both install paths and explains the Docker auto-provisioning. - -- [ ] **Step 3: Verify the site builds** - -Run: `cd site && npm run build` -Expected: success - -- [ ] **Step 4: Commit** - -```bash -git add site/src/content/docs/getting-started/index.md -git commit -m "docs: update getting-started guide for new mantle init flow (#7)" -``` - ---- - -### Task 10: Manual smoke test - -- [ ] **Step 1: Build the binary** - -```bash -cd /Users/michael/Development/mantle -make build -``` - -- [ ] **Step 2: Test happy path (Docker running, Postgres available)** - -```bash -docker compose up -d # ensure Postgres is running -./mantle init -``` - -Expected: "Running migrations... Migrations complete." - -- [ ] **Step 3: Test Docker auto-provisioning (no Postgres running)** - -```bash -docker compose down -docker rm -f mantle-postgres 2>/dev/null -./mantle init -``` - -Expected: prompts "Start a Postgres container with Docker? [Y/n]". Accept with Enter/Y. Should start container, wait for readiness, run migrations. - -- [ ] **Step 4: Test non-interactive mode** - -```bash -docker compose down -echo "" | ./mantle init -``` - -Expected: returns error immediately, no prompts. - -- [ ] **Step 5: Test remote failure with retry** - -```bash -MANTLE_DATABASE_URL="postgres://user:pass@db.doesnotexist.com:5432/mantle" ./mantle init -``` - -Expected: shows connection error with host, offers Retry/Quit. Press Q to quit. - -- [ ] **Step 6: Run full test suite** - -```bash -make test -make lint -``` - -Expected: all tests pass, no lint errors. - -- [ ] **Step 7: Clean up and final commit if needed** - -```bash -docker rm -f mantle-postgres 2>/dev/null -docker compose up -d # restore normal dev state -``` diff --git a/docs/superpowers/specs/2026-03-24-data-transformation-design.md b/docs/superpowers/specs/2026-03-24-data-transformation-design.md deleted file mode 100644 index 15197f7..0000000 --- a/docs/superpowers/specs/2026-03-24-data-transformation-design.md +++ /dev/null @@ -1,175 +0,0 @@ -# Data Transformation — CEL Functions & Documentation - -**Date:** 2026-03-24 -**Issue:** [#14 — Data Transformation Step](https://github.com/dvflw/mantle/issues/14) -**Status:** Draft - -## Problem - -Mantle workflows can pass data between steps via CEL expressions, but lack the tools to reshape that data. The common pattern — fetch from API, normalize for a DB schema, store — requires either manual field-by-field construction (not possible in CEL today) or routing through the AI connector (slow, expensive, non-deterministic for structural transforms). - -## Discovery: Existing Hidden Capabilities - -CEL's default environment includes macros that already work in Mantle but were never documented or tested: - -- `.map(item, expr)` — transform each element in a list -- `.filter(item, expr)` — keep elements matching a predicate -- `.exists(item, expr)` — true if any element matches -- `.all(item, expr)` — true if all elements match -- `.exists_one(item, expr)` — true if exactly one matches - -These need documentation and tests, not implementation. - -## Design - -### Custom CEL Functions - -All functions registered in `internal/cel/functions.go` via `cel.Function()` options passed to `cel.NewEnv()`. Pure functions, no side effects. - -#### String Functions (methods on string type) - -| Function | Example | Result | -|----------|---------|--------| -| `toLower()` | `"HELLO".toLower()` | `"hello"` | -| `toUpper()` | `"hello".toUpper()` | `"HELLO"` | -| `trim()` | `" hello ".trim()` | `"hello"` | -| `replace(old, new)` | `"foo-bar".replace("-", "_")` | `"foo_bar"` | -| `split(delim)` | `"a,b,c".split(",")` | `["a", "b", "c"]` | - -#### Type Coercion (global functions) - -| Function | Example | Result | -|----------|---------|--------| -| `parseInt(string)` | `parseInt("42")` | `42` | -| `parseFloat(string)` | `parseFloat("3.14")` | `3.14` | -| `toString(any)` | `toString(42)` | `"42"` | - -#### Object Construction (global function) - -| Function | Example | Result | -|----------|---------|--------| -| `obj(k1, v1, k2, v2, ...)` | `obj("name", "alice", "age", 30)` | `{"name": "alice", "age": 30}` | - -Errors on odd number of args or non-string keys. Enables building maps for DB inserts and API payloads. - -#### Null Coalescing (global function) - -| Function | Example | Result | -|----------|---------|--------| -| `default(value, fallback)` | `default(steps.x.output.json.name, "unknown")` | value if non-null, else `"unknown"` | - -#### JSON (global functions) - -| Function | Example | Result | -|----------|---------|--------| -| `jsonEncode(value)` | `jsonEncode(obj("a", 1))` | `'{"a":1}'` | -| `jsonDecode(string)` | `jsonDecode('{"a":1}')` | `{"a": 1}` | - -#### Date/Time (global functions) - -| Function | Example | Result | -|----------|---------|--------| -| `parseTimestamp(string)` | `parseTimestamp("2026-03-24T19:00:00Z")` | timestamp value | -| `formatTimestamp(ts, layout)` | `formatTimestamp(ts, "2006-01-02")` | `"2026-03-24"` | - -Uses Go time layout strings. - -#### Collections (global function) - -| Function | Example | Result | -|----------|---------|--------| -| `flatten(list)` | `flatten([[1,2],[3,4]])` | `[1,2,3,4]` | - -### Integration Point - -In `internal/cel/cel.go`, the `NewEvaluator` function passes function options to `cel.NewEnv()`: - -```go -func NewEvaluator() (*Evaluator, error) { - opts := []cel.EnvOption{ - cel.Variable("steps", cel.MapType(cel.StringType, cel.DynType)), - cel.Variable("inputs", cel.MapType(cel.StringType, cel.DynType)), - cel.Variable("env", cel.MapType(cel.StringType, cel.StringType)), - cel.Variable("trigger", cel.MapType(cel.StringType, cel.DynType)), - } - opts = append(opts, customFunctions()...) - - env, err := cel.NewEnv(opts...) - // ... -} -``` - -`customFunctions()` is defined in `functions.go` and returns `[]cel.EnvOption`. - -### Error Handling - -All errors surface through the existing `Eval` error path: -- Type mismatches: `parseInt("abc")` → evaluation error -- `obj()` with odd args → evaluation error -- `obj()` with non-string keys → evaluation error -- `jsonDecode()` with invalid JSON → evaluation error -- `parseTimestamp()` with unparseable string → evaluation error - -No new error types needed. - -## Documentation - -### CEL Expressions Reference Update - -Update `site/src/content/docs/concepts/expressions.md` to add: -- All custom functions organized by category -- The already-working macros (`.map()`, `.filter()`, `.exists()`, `.all()`, `.exists_one()`) -- Examples for each function - -### New: Data Transformations Guide - -New page at `site/src/content/docs/getting-started/data-transformations.md` covering three patterns: - -**Pattern 1 — Structural transforms (CEL only):** -API result → `.map()` + `obj()` → Postgres INSERT. No AI needed. For when the transform is a known schema mapping. - -**Pattern 2 — AI-powered transforms:** -Unstructured data → AI connector with `output_schema` → structured output. For when the transform requires interpretation, classification, or natural language understanding. - -**Pattern 3 — Hybrid:** -Fetch → CEL for structural normalization → AI for enrichment/classification → Store. Combines both approaches. - -Each pattern includes a complete example workflow YAML. - -### New Example Workflows - -- `examples/data-transform-api-to-db.yaml` — Fetch API → CEL `.map()` + `obj()` → Postgres INSERT (the exact use case from the issue) -- `examples/ai-data-enrichment.yaml` — Fetch data → AI classify/enrich with structured output → store - -## Files Changed - -### Modified - -| File | Change | -|------|--------| -| `internal/cel/cel.go` | Pass `customFunctions()` options to `cel.NewEnv()` | -| `site/src/content/docs/concepts/expressions.md` | Add function reference, document macros | - -### New - -| File | Purpose | -|------|---------| -| `internal/cel/functions.go` | All custom function definitions | -| `internal/cel/functions_test.go` | Table-driven tests for every custom function | -| `internal/cel/macros_test.go` | Tests for built-in macros (lock in existing behavior) | -| `site/src/content/docs/getting-started/data-transformations.md` | Transformation patterns guide | -| `examples/data-transform-api-to-db.yaml` | Structural transform example workflow | -| `examples/ai-data-enrichment.yaml` | AI transform example workflow | - -## Non-Goals - -- **Custom user-defined functions** — no plugin/extension API for CEL functions -- **Loops or control flow** — CEL is intentionally non-Turing-complete -- **Regex** — deferring to a future issue; CEL's `matches()` function could be enabled later -- **New connector type** — transformations happen in CEL expressions, not as a separate step type - -## Testing Strategy - -- **`functions_test.go`** — table-driven: each function gets happy path + error cases (wrong types, empty inputs, edge cases) -- **`macros_test.go`** — tests for `.map()`, `.filter()`, `.exists()`, `.all()`, `.exists_one()` with list data to lock in behavior -- **Existing tests unaffected** — custom functions are additive; no behavior changes to existing expressions diff --git a/docs/superpowers/specs/2026-03-24-init-connection-recovery-design.md b/docs/superpowers/specs/2026-03-24-init-connection-recovery-design.md deleted file mode 100644 index a67fde3..0000000 --- a/docs/superpowers/specs/2026-03-24-init-connection-recovery-design.md +++ /dev/null @@ -1,174 +0,0 @@ -# `mantle init` Connection Recovery & Quickstart Fix - -**Date:** 2026-03-24 -**Issue:** [#7 — Get Running in 5 Minutes](https://github.com/dvflw/mantle/issues/7) -**Status:** Draft - -## Problem - -The landing page quickstart tells users to run `docker compose up -d` after installing via `go install`. There's no `docker-compose.yml` when you install that way — step 2 immediately fails. The `mantle init` command needs to handle the "no database yet" case gracefully. - -## Design - -### Connection Recovery Flow - -`mantle init` already loads config and calls `db.Open`. The change adds a recovery path when the connection fails: - -``` -mantle init - ├─ db.Open succeeds → run migrations → done - └─ db.Open fails - ├─ host is NOT loopback → print error with details, offer [R]etry or [Q]uit - └─ host IS loopback → offer Docker auto-provisioning - ├─ user accepts - │ ├─ docker available → start container, wait for ready, run migrations → done - │ └─ docker unavailable → show message, offer [R]etry or [C]onnection string - └─ user declines → offer [R]etry or [C]onnection string -``` - -### Loopback Detection - -Parse the host from the configured database URL. Treat as loopback if the host is: -- `localhost` -- `127.0.0.1` -- `::1` - -Use `net/url` to parse the connection string and extract the host. - -### Docker Auto-Provisioning - -When the user accepts Docker provisioning: - -1. Check Docker availability: exec `docker info` and check exit code -2. Run the container: - ``` - docker run -d \ - --name mantle-postgres \ - -p 5432:5432 \ - -e POSTGRES_USER=mantle \ - -e POSTGRES_PASSWORD=mantle \ - -e POSTGRES_DB=mantle \ - -v mantle-pgdata:/var/lib/postgresql/data \ - postgres:16-alpine - ``` -3. Wait for readiness: poll `db.Open` with backoff (up to ~15s) -4. On success: continue to migrations -5. On timeout: error with "Container started but Postgres isn't accepting connections" - -Use `os/exec` to run Docker commands. The container config matches the existing defaults in `config.go` so no config persistence is needed. - -If the container name `mantle-postgres` already exists (stopped), remove it first and start fresh. If it's already running, skip straight to the readiness check. - -### Fallback: No Docker / User Declined - -Present two options: -``` -Can't auto-provision — Docker isn't installed or isn't running. - - [R] Retry (install or start Docker first) - [C] Enter a Postgres connection string - -Choice [R/c]: -``` - -- **Retry**: loop back to Docker availability check -- **Connection string**: prompt for URL, validate with `db.Open`, on success continue to migrations, on failure show the error and re-prompt - -### Non-Loopback Failure - -When the configured URL points to a remote host and the connection fails: -``` -Failed to connect to database at db.example.com:5432 - - Error: connection refused - - [R] Retry (fix the issue and try again) - [Q] Quit - -Choice [R/q]: -``` - -Include the underlying error from `db.Open` (timeout, auth failure, TLS, DNS resolution, etc.) so the user can diagnose without guessing. - -- **Retry**: re-reads the config (picks up env var or config file changes made while waiting) and retries `db.Open`. This lets the user fix a typo, adjust a firewall rule, or start their database without restarting `mantle init`. -- **Quit**: exit 1 - -### Interactive Input - -Follow the existing pattern from `login.go`: use `fmt.Fscanln(cmd.InOrStdin(), &input)` for prompts. No new dependencies needed. - -When stdin is not a terminal (piped input, CI), skip all interactive prompts and return the connection error directly. Detect with `os.Stdin.Stat()` checking for `ModeCharDevice`. - -## Constant Extraction - -Before implementing the new init flow, extract shared constants that are currently duplicated across the codebase. This keeps the new code referencing a single source of truth. - -### `internal/dbdefaults/dbdefaults.go` — shared database & Docker defaults - -| Constant | Value | Current duplication | -|----------|-------|---------------------| -| `PostgresImage` | `"postgres:16-alpine"` | 7 test files + docker-compose.yml | -| `TestUser` | `"mantle"` | 6 testcontainers setups | -| `TestPassword` | `"mantle"` | 6 testcontainers setups | -| `TestDatabase` | `"mantle_test"` | 6 testcontainers setups | -| `ContainerName` | `"mantle-postgres"` | new (Docker provisioning) | - -### `internal/netutil/loopback.go` — loopback detection - -| Function/Const | Purpose | Current duplication | -|----------------|---------|---------------------| -| `IsLoopback(host string) bool` | Returns true for localhost, 127.0.0.1, ::1 | config.go SSL warning + new init.go recovery | - -### `internal/budget/budget.go` — reset mode constants - -| Constant | Value | Current duplication | -|----------|-------|---------------------| -| `ResetModeCalendar` | `"calendar"` | config.go validation + budget logic + tests | -| `ResetModeRolling` | `"rolling"` | config.go validation + budget logic + tests | - -These already live in the budget package conceptually; just promote the string literals to exported constants. - -## Files Changed - -### Modified - -| File | Change | -|------|--------| -| `internal/cli/init.go` | Add connection recovery flow, Docker provisioning, interactive prompts | -| `internal/config/config.go` | Use `netutil.IsLoopback` for SSL warning, use `budget.ResetMode*` constants | -| `internal/budget/budget.go` | Add `ResetModeCalendar` / `ResetModeRolling` constants, use them in existing logic | -| `internal/auth/auth_test.go` | Use `dbdefaults` constants for testcontainers setup | -| `internal/workflow/store_test.go` | Use `dbdefaults` constants | -| `internal/db/migrate_test.go` | Use `dbdefaults` constants | -| `internal/secret/store_test.go` | Use `dbdefaults` constants | -| `internal/engine/test_helpers_test.go` | Use `dbdefaults` constants | -| `internal/budget/store_test.go` | Use `dbdefaults` constants | -| `internal/connector/postgres_test.go` | Use `dbdefaults.PostgresImage` | -| `site/src/components/GetStarted.astro` | Remove `docker compose up -d` from step 2, simplify to just `mantle init` | -| `site/src/content/docs/getting-started/index.md` | Update quickstart to remove Docker prerequisite, explain `mantle init` handles DB setup | - -### New - -| File | Purpose | -|------|---------| -| `internal/dbdefaults/dbdefaults.go` | Shared Postgres image, test credentials, container name constants | -| `internal/netutil/loopback.go` | `IsLoopback` function for host classification | -| `internal/netutil/loopback_test.go` | Tests for loopback detection | -| `internal/cli/docker.go` | Docker availability check, container start, readiness polling | -| `internal/cli/init_test.go` | Tests for connection recovery flow, non-interactive fallback | -| `internal/cli/docker_test.go` | Tests for Docker command construction, container name conflict handling | - -## Non-Goals - -- **Config file generation**: `mantle init` does not create `mantle.yaml`. The defaults work with the Docker container. -- **Docker Compose**: we use `docker run`, not `docker compose`. No dependency on a compose file. -- **Custom port/user/password in Docker flow**: always matches defaults. Users who need custom config can use the connection string prompt. -- **Container lifecycle management**: `mantle init` starts the container; it doesn't stop or remove it. Users manage that themselves. - -## Testing Strategy - -- **Loopback detection**: unit test `isLoopback` with localhost, 127.0.0.1, ::1, remote hosts, IPv6 -- **Non-interactive detection**: unit test that piped stdin skips prompts and returns error -- **Docker command construction**: verify the exact `docker run` args match defaults -- **Integration**: testcontainers already covers the migration path; the new code paths are the interactive/Docker shell-out portions which are unit-tested with mocked exec -- **Site content**: manual verification that quickstart steps are accurate From c4788c31f5b89c3293aea9971b90f9978a522959 Mon Sep 17 00:00:00 2001 From: "coderabbitai[bot]" <136622811+coderabbitai[bot]@users.noreply.github.com> Date: Wed, 25 Mar 2026 02:33:55 +0000 Subject: [PATCH 21/21] =?UTF-8?q?=F0=9F=93=9D=20CodeRabbit=20Chat:=20Add?= =?UTF-8?q?=20generated=20unit=20tests?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/cel/functions_test.go | 284 +++++++++++++++++++++++++++++++++ internal/cel/macros_test.go | 98 ++++++++++++ 2 files changed, 382 insertions(+) diff --git a/internal/cel/functions_test.go b/internal/cel/functions_test.go index aeb7f18..defeddb 100644 --- a/internal/cel/functions_test.go +++ b/internal/cel/functions_test.go @@ -8,6 +8,59 @@ import ( "github.com/stretchr/testify/require" ) +// ── normalizeJSONNumbers unit tests ────────────────────────────────────────── + +func TestNormalizeJSONNumbers_ExponentFloat(t *testing.T) { + result := normalizeJSONNumbers(json.Number("1e5")) + assert.Equal(t, float64(100000), result) +} + +func TestNormalizeJSONNumbers_NegativeFloat(t *testing.T) { + result := normalizeJSONNumbers(json.Number("-3.14")) + assert.Equal(t, float64(-3.14), result) +} + +func TestNormalizeJSONNumbers_IntegerMaxInt64(t *testing.T) { + // 9223372036854775807 is exactly math.MaxInt64 — must come back as int64. + result := normalizeJSONNumbers(json.Number("9223372036854775807")) + assert.Equal(t, int64(9223372036854775807), result) +} + +func TestNormalizeJSONNumbers_OverflowInt64PreservedAsString(t *testing.T) { + // One past MaxInt64 — cannot fit in int64; must preserve as string. + result := normalizeJSONNumbers(json.Number("9223372036854775808")) + assert.Equal(t, "9223372036854775808", result) +} + +func TestNormalizeJSONNumbers_NestedMap(t *testing.T) { + input := map[string]any{ + "count": json.Number("42"), + "ratio": json.Number("0.5"), + "label": "hello", + } + result := normalizeJSONNumbers(input) + m := result.(map[string]any) + assert.Equal(t, int64(42), m["count"]) + assert.Equal(t, float64(0.5), m["ratio"]) + assert.Equal(t, "hello", m["label"]) +} + +func TestNormalizeJSONNumbers_NestedArray(t *testing.T) { + input := []any{json.Number("1"), json.Number("2.5"), "text"} + result := normalizeJSONNumbers(input) + arr := result.([]any) + assert.Equal(t, int64(1), arr[0]) + assert.Equal(t, float64(2.5), arr[1]) + assert.Equal(t, "text", arr[2]) +} + +func TestNormalizeJSONNumbers_PassthroughTypes(t *testing.T) { + // Non-number types should be returned unchanged. + assert.Equal(t, true, normalizeJSONNumbers(true)) + assert.Equal(t, "hello", normalizeJSONNumbers("hello")) + assert.Nil(t, normalizeJSONNumbers(nil)) +} + func TestFunc_ToLower(t *testing.T) { eval, err := NewEvaluator() require.NoError(t, err) @@ -460,6 +513,16 @@ func TestFunc_FormatTimestamp(t *testing.T) { expr: `formatTimestamp(parseTimestamp("2024-01-15T14:30:00Z"), "15:04")`, want: "14:30", }, + { + name: "named month format", + expr: `formatTimestamp(parseTimestamp("2024-01-15T00:00:00Z"), "Jan 2, 2006")`, + want: "Jan 15, 2024", + }, + { + name: "rfc3339 roundtrip", + expr: `formatTimestamp(parseTimestamp("2026-03-24T00:00:00Z"), "2006-01-02T15:04:05Z07:00")`, + want: "2026-03-24T00:00:00Z", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -469,3 +532,224 @@ func TestFunc_FormatTimestamp(t *testing.T) { }) } } + +// ── Additional boundary and regression tests ────────────────────────────────── + +func TestFunc_Obj_MaxArity(t *testing.T) { + // obj() supports up to 5 key-value pairs (10 args). Verify all registered + // overloads (2, 4, 6, 8, 10 args) produce the correct maps. + eval, err := NewEvaluator() + require.NoError(t, err) + + t.Run("three pairs (6 args)", func(t *testing.T) { + result, err := eval.Eval(`obj("a", 1, "b", 2, "c", 3)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, map[string]any{"a": int64(1), "b": int64(2), "c": int64(3)}, result) + }) + + t.Run("four pairs (8 args)", func(t *testing.T) { + result, err := eval.Eval(`obj("a", 1, "b", 2, "c", 3, "d", 4)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, map[string]any{"a": int64(1), "b": int64(2), "c": int64(3), "d": int64(4)}, result) + }) + + t.Run("five pairs (10 args — max arity)", func(t *testing.T) { + result, err := eval.Eval(`obj("a", 1, "b", 2, "c", 3, "d", 4, "e", 5)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, map[string]any{ + "a": int64(1), "b": int64(2), "c": int64(3), + "d": int64(4), "e": int64(5), + }, result) + }) +} + +func TestFunc_Flatten_MixedScalarAndSublist(t *testing.T) { + // The flatten implementation passes non-list elements through unchanged. + // A list like [1, [2, 3]] should produce [1, 2, 3]. + eval, err := NewEvaluator() + require.NoError(t, err) + + ctx := newTestContext() + ctx.Inputs["mixed"] = []any{int64(1), []any{int64(2), int64(3)}} + result, err := eval.Eval(`flatten(inputs.mixed)`, ctx) + require.NoError(t, err) + assert.Equal(t, []any{int64(1), int64(2), int64(3)}, result) +} + +func TestFunc_Default_FalsyButNonNull(t *testing.T) { + // false and 0 are falsy but are NOT null — default() must return them unchanged. + eval, err := NewEvaluator() + require.NoError(t, err) + + t.Run("false returns false not fallback", func(t *testing.T) { + result, err := eval.Eval(`default(false, true)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, false, result) + }) + + t.Run("zero returns zero not fallback", func(t *testing.T) { + result, err := eval.Eval(`default(0, 99)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, int64(0), result) + }) + + t.Run("empty string returns empty string not fallback", func(t *testing.T) { + result, err := eval.Eval(`default("", "fallback")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "", result) + }) +} + +func TestFunc_Split_EmptySeparator(t *testing.T) { + // strings.Split("abc", "") returns ["a", "b", "c"] — each character. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`"abc".split("")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, []any{"a", "b", "c"}, result) +} + +func TestFunc_Replace_EmptyOldString(t *testing.T) { + // strings.ReplaceAll("ab", "", "X") inserts X between and around each char. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`"ab".replace("", "X")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "XaXbX", result) +} + +func TestFunc_ParseInt_WhitespaceIsInvalid(t *testing.T) { + // strconv.ParseInt is strict — " 42" (leading space) must fail. + eval, err := NewEvaluator() + require.NoError(t, err) + + _, err = eval.Eval(`parseInt(" 42")`, newTestContext()) + require.Error(t, err) +} + +func TestFunc_JsonDecode_FloatValue(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonDecode("3.14")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, float64(3.14), result) +} + +func TestFunc_JsonDecode_BoolValue(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonDecode("true")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, true, result) +} + +func TestFunc_JsonDecode_ArrayOfIntegers(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonDecode("[1, 2, 3]")`, newTestContext()) + require.NoError(t, err) + + arr, ok := result.([]any) + require.True(t, ok, "expected []any, got %T", result) + assert.Equal(t, []any{int64(1), int64(2), int64(3)}, arr) +} + +func TestFunc_JsonDecode_NestedObject(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonDecode("{\"a\":{\"b\":42}}")`, newTestContext()) + require.NoError(t, err) + + m, ok := result.(map[string]any) + require.True(t, ok) + inner, ok := m["a"].(map[string]any) + require.True(t, ok, "expected inner map") + assert.Equal(t, int64(42), inner["b"]) +} + +func TestFunc_JsonEncode_List(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonEncode(["a", "b", "c"])`, newTestContext()) + require.NoError(t, err) + + s, ok := result.(string) + require.True(t, ok) + var arr []string + require.NoError(t, json.Unmarshal([]byte(s), &arr)) + assert.Equal(t, []string{"a", "b", "c"}, arr) +} + +func TestFunc_JsonEncode_Primitive(t *testing.T) { + eval, err := NewEvaluator() + require.NoError(t, err) + + t.Run("integer", func(t *testing.T) { + result, err := eval.Eval(`jsonEncode(42)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "42", result) + }) + + t.Run("boolean", func(t *testing.T) { + result, err := eval.Eval(`jsonEncode(true)`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "true", result) + }) + + t.Run("string", func(t *testing.T) { + result, err := eval.Eval(`jsonEncode("hello")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, `"hello"`, result) + }) +} + +func TestFunc_ParseTimestamp_NamedMonthFormat(t *testing.T) { + // "Jan 2, 2006" is one of the supported layouts in parseTimestamp. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`formatTimestamp(parseTimestamp("Mar 15, 2025"), "2006-01-02")`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "2025-03-15", result) +} + +func TestFunc_StringChaining(t *testing.T) { + // Verify that string methods can be chained. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`" HELLO WORLD ".trim().toLower()`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "hello world", result) +} + +func TestFunc_ResolveString_WithCustomFunction(t *testing.T) { + // Verify that custom functions work when embedded in {{ }} template strings. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.ResolveString(`tag:{{ "PRODUCTION".toLower() }}`, newTestContext()) + require.NoError(t, err) + assert.Equal(t, "tag:production", result) +} + +func TestFunc_JsonRoundtrip(t *testing.T) { + // jsonEncode followed by jsonDecode must preserve the original value. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`jsonDecode(jsonEncode(obj("x", 1, "y", "hello")))`, newTestContext()) + require.NoError(t, err) + + m, ok := result.(map[string]any) + require.True(t, ok) + assert.Equal(t, int64(1), m["x"]) + assert.Equal(t, "hello", m["y"]) +} \ No newline at end of file diff --git a/internal/cel/macros_test.go b/internal/cel/macros_test.go index beb855e..35fa63e 100644 --- a/internal/cel/macros_test.go +++ b/internal/cel/macros_test.go @@ -88,3 +88,101 @@ func TestMacro_MapAndFilter_Chained(t *testing.T) { require.NoError(t, err) assert.Equal(t, []any{"alice", "charlie"}, result) } + +func TestMacro_ExistsOne_NoMatch(t *testing.T) { + // exists_one must return false when NO element satisfies the predicate. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.exists_one(item, item.name == "dave")`, newListContext()) + require.NoError(t, err) + assert.Equal(t, false, result) +} + +func TestMacro_ExistsOne_MultipleMatches(t *testing.T) { + // exists_one must return false when MORE THAN ONE element satisfies the predicate. + eval, err := NewEvaluator() + require.NoError(t, err) + + // alice (30) and charlie (25) are both >= 21. + result, err := eval.Eval(`steps.fetch.output.items.exists_one(item, item.age >= 21)`, newListContext()) + require.NoError(t, err) + assert.Equal(t, false, result) +} + +func TestMacro_Filter_NoMatches(t *testing.T) { + // filter that matches nothing returns an empty list, not an error. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.filter(item, item.age > 100)`, newListContext()) + require.NoError(t, err) + + items, ok := result.([]any) + require.True(t, ok, "expected []any, got %T", result) + assert.Empty(t, items) +} + +func TestMacro_Map_WithCustomFunction(t *testing.T) { + // map() can use custom functions (toLower) inside the transform expression. + eval, err := NewEvaluator() + require.NoError(t, err) + + result, err := eval.Eval(`steps.fetch.output.items.map(item, item.name.toUpper())`, newListContext()) + require.NoError(t, err) + assert.Equal(t, []any{"ALICE", "BOB", "CHARLIE"}, result) +} + +func TestMacro_Map_WithObj(t *testing.T) { + // map() combined with obj() should reshape each element into a new map. + // Fields on the resulting objects must be accessible via CEL field access. + eval, err := NewEvaluator() + require.NoError(t, err) + + // Verify correct result count. + size, err := eval.Eval( + `size(steps.fetch.output.items.filter(item, item.age >= 21).map(item, obj("display_name", item.name, "years", item.age)))`, + newListContext(), + ) + require.NoError(t, err) + assert.Equal(t, int64(2), size) + + // Verify the first element's fields are accessible by index within CEL. + name, err := eval.Eval( + `steps.fetch.output.items.filter(item, item.age >= 21).map(item, obj("display_name", item.name, "years", item.age))[0].display_name`, + newListContext(), + ) + require.NoError(t, err) + assert.Equal(t, "alice", name) + + years, err := eval.Eval( + `steps.fetch.output.items.filter(item, item.age >= 21).map(item, obj("display_name", item.name, "years", item.age))[0].years`, + newListContext(), + ) + require.NoError(t, err) + assert.Equal(t, int64(30), years) +} + +func TestMacro_All_EmptyList(t *testing.T) { + // all() on an empty list is vacuously true. + eval, err := NewEvaluator() + require.NoError(t, err) + + ctx := newListContext() + ctx.Steps["fetch"]["output"].(map[string]any)["items"] = []any{} + result, err := eval.Eval(`steps.fetch.output.items.all(item, false)`, ctx) + require.NoError(t, err) + assert.Equal(t, true, result) +} + +func TestMacro_Exists_EmptyList(t *testing.T) { + // exists() on an empty list returns false. + eval, err := NewEvaluator() + require.NoError(t, err) + + ctx := newListContext() + ctx.Steps["fetch"]["output"].(map[string]any)["items"] = []any{} + result, err := eval.Eval(`steps.fetch.output.items.exists(item, true)`, ctx) + require.NoError(t, err) + assert.Equal(t, false, result) +} \ No newline at end of file