Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 0 additions & 23 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,27 +34,9 @@ jobs:
test-e2e:
needs:
- setup
strategy:
fail-fast: false
matrix:
database: [sqlite, postgres]
env:
VERSION: v0.0.1-test
runs-on: ubuntu-latest
services:
postgres:
image: pgvector/pgvector:pg18-trixie
env:
POSTGRES_DB: kagent
POSTGRES_USER: postgres
POSTGRES_PASSWORD: kagent
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout repository
uses: actions/checkout@v4
Expand Down Expand Up @@ -99,11 +81,6 @@ jobs:
run: |
make create-kind-cluster
echo "Cache key: ${{ needs.setup.outputs.cache-key }}"
if [ "${{ matrix.database }}" = "postgres" ]; then
HOST_IP=$(docker network inspect kind -f '{{range .IPAM.Config}}{{if .Gateway}}{{.Gateway}}{{"\n"}}{{end}}{{end}}' | grep -E '^[0-9]+\.' | head -1)
export KAGENT_HELM_EXTRA_ARGS="$KAGENT_HELM_EXTRA_ARGS --set database.type=postgres --set database.postgres.url=postgres://postgres:kagent@${HOST_IP}:5432/kagent"
echo "Postgres URL: postgres://postgres:kagent@${HOST_IP}:5432/kagent"
fi
make helm-install
make push-test-agent push-test-skill
kubectl wait --for=condition=Ready agents.kagent.dev -n kagent --all --timeout=60s || kubectl get po -n kagent -o wide ||:
Expand Down
32 changes: 9 additions & 23 deletions DEVELOPMENT.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,35 +99,21 @@ make kagent-addon-install

This installs the following components into your cluster:

| Addon | Description | Namespace |
|----------------|-----------------------------------------------------|-----------|
| Istio | Service mesh (demo profile) | `istio-system` |
| Grafana | Dashboards and visualization | `kagent` |
| Prometheus | Metrics collection | `kagent` |
| Metrics Server | Kubernetes resource metrics | `kube-system` |
| Postgres | Relational database (for kagent controller storage) | `kagent` |

#### Using Postgres as the Datastore

By default, kagent uses a local SQLite database for data persistence. To use
postgres as the backing store instead, deploy kagent via:

> **Warning:**
> The following example uses hardcoded Postgres credentials (`postgres:kagent`) for local development only.
> **Do not use these credentials in production environments.**
```shell
KAGENT_HELM_EXTRA_ARGS="--set database.type=postgres --set database.postgres.url=postgres://postgres:kagent@postgres.kagent.svc.cluster.local:5432/kagent" \
make helm-install
```
| Addon | Description | Namespace |
|----------------|--------------------------------------|----------------|
| Istio | Service mesh (demo profile) | `istio-system` |
| Grafana | Dashboards and visualization | `kagent` |
| Prometheus | Metrics collection | `kagent` |
| Metrics Server | Kubernetes resource metrics | `kube-system` |

Verify the connection by checking the controller logs:
PostgreSQL (with pgvector) is deployed automatically as part of `make helm-install` via the bundled Helm chart. The optional addons above provide observability components.

Verify the database connection by checking the controller logs:

```shell
kubectl logs -n kagent deployment/kagent-controller | grep -i postgres
```

**To revert to SQLite:** Run `make helm-install` without the `KAGENT_HELM_EXTRA_ARGS` variable.

### Troubleshooting

### buildx localhost access
Expand Down
2 changes: 0 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -420,12 +420,10 @@ kagent-addon-install: use-kind-cluster
# to test the kagent addons - installing istio, grafana, prometheus, metrics-server
istioctl install --set profile=demo -y
kubectl apply --context kind-$(KIND_CLUSTER_NAME) -f contrib/addons/grafana.yaml
kubectl apply --context kind-$(KIND_CLUSTER_NAME) -f contrib/addons/postgres.yaml
kubectl apply --context kind-$(KIND_CLUSTER_NAME) -f contrib/addons/prometheus.yaml
kubectl apply --context kind-$(KIND_CLUSTER_NAME) -f contrib/addons/metrics-server.yaml
# wait for pods to be ready
kubectl wait --context kind-$(KIND_CLUSTER_NAME) --for=condition=Ready pod -l app.kubernetes.io/name=grafana -n kagent --timeout=60s
kubectl wait --context kind-$(KIND_CLUSTER_NAME) --for=condition=Ready pod -l app.kubernetes.io/name=postgres -n kagent --timeout=60s
kubectl wait --context kind-$(KIND_CLUSTER_NAME) --for=condition=Ready pod -l app.kubernetes.io/name=prometheus -n kagent --timeout=60s

.PHONY: open-dev-container
Expand Down
73 changes: 0 additions & 73 deletions contrib/addons/postgres.yaml

This file was deleted.

3 changes: 0 additions & 3 deletions go/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,3 @@ Dockerfile.cross
*.swp
*.swo
*~

# turso
file::*
5 changes: 2 additions & 3 deletions go/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,9 @@ RUN --mount=type=cache,target=/root/go/pkg/mod,rw \
CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -ldflags "$LDFLAGS" -o /app "$BUILD_PACKAGE"

### STAGE 2: final image
# Use distroless/cc-debian12 which includes C/C++ runtime libraries
# This is required for turso-go's purego library loading
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/cc-debian12:nonroot
FROM gcr.io/distroless/static:nonroot
ARG TARGETPLATFORM

WORKDIR /
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (

agenttranslator "github.com/kagent-dev/kagent/go/core/internal/controller/translator/agent"
"github.com/kagent-dev/kagent/go/core/internal/database"
"github.com/kagent-dev/kagent/go/core/internal/dbtest"
"github.com/kagent-dev/kmcp/api/v1alpha1"
)

Expand All @@ -26,6 +27,12 @@ func TestReconcileKagentMCPServer_ErrorPropagation(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)

if testing.Short() {
t.Skip("skipping database test in short mode")
}

connStr := dbtest.StartT(context.Background(), t)

testCases := []struct {
name string
mcpServer *v1alpha1.MCPServer
Expand Down Expand Up @@ -78,11 +85,10 @@ func TestReconcileKagentMCPServer_ErrorPropagation(t *testing.T) {
WithObjects(tc.mcpServer).
Build()

// Create an in-memory database manager
dbManager, err := database.NewManager(&database.Config{
DatabaseType: database.DatabaseTypeSqlite,
SqliteConfig: &database.SqliteConfig{
DatabasePath: "file::memory:?cache=shared",
PostgresConfig: &database.PostgresConfig{
URL: connStr,
VectorEnabled: true,
},
})
require.NoError(t, err)
Expand Down
67 changes: 15 additions & 52 deletions go/core/internal/database/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -512,13 +512,10 @@ func (c *clientImpl) StoreCrewAIMemory(ctx context.Context, memory *dbpkg.CrewAI
func (c *clientImpl) SearchCrewAIMemoryByTask(ctx context.Context, userID, threadID, taskDescription string, limit int) ([]*dbpkg.CrewAIAgentMemory, error) {
var memories []*dbpkg.CrewAIAgentMemory

// Search for task_description within the JSON memory_data field
// Using JSON_EXTRACT or JSON_UNQUOTE for MySQL/PostgreSQL, or simple LIKE for SQLite
// Sort by created_at DESC, then by score ASC (if score exists in JSON)
query := c.db.WithContext(ctx).Where(
"user_id = ? AND thread_id = ? AND (memory_data LIKE ? OR JSON_EXTRACT(memory_data, '$.task_description') LIKE ?)",
"user_id = ? AND thread_id = ? AND (memory_data LIKE ? OR memory_data->>'task_description' LIKE ?)",
userID, threadID, "%"+taskDescription+"%", "%"+taskDescription+"%",
).Order("created_at DESC, JSON_EXTRACT(memory_data, '$.score') ASC")
).Order("created_at DESC, memory_data->>'score' ASC")

// Apply limit
if limit > 0 {
Expand Down Expand Up @@ -597,43 +594,17 @@ func (c *clientImpl) StoreAgentMemories(ctx context.Context, memories []*dbpkg.M
func (c *clientImpl) SearchAgentMemory(ctx context.Context, agentName, userID string, embedding pgvector.Vector, limit int) ([]dbpkg.AgentMemorySearchResult, error) {
var results []dbpkg.AgentMemorySearchResult

db := c.db.WithContext(ctx)
if db.Name() == "sqlite" {
// libSQL/Turso syntax: vector_distance_cos(embedding, vector32('JSON_ARRAY'))
// We must use fmt.Sprintf to inline the JSON array because vector32() requires a string literal
// and parameter binding with ? fails with "unexpected token" errors (GORM limitation)
embeddingJSON, err := json.Marshal(embedding.Slice())
if err != nil {
return nil, fmt.Errorf("failed to serialize embedding: %w", err)
}

// Safe formatting because we control the JSON string generation from float slice
query := fmt.Sprintf(`
SELECT id, agent_name, user_id, content, metadata, created_at, expires_at, access_count,
1 - vector_distance_cos(embedding, vector32('%s')) as score
FROM memory
WHERE agent_name = ? AND user_id = ?
ORDER BY vector_distance_cos(embedding, vector32('%s')) ASC
LIMIT ?
`, string(embeddingJSON), string(embeddingJSON))

if err := db.Raw(query, agentName, userID, limit).Scan(&results).Error; err != nil {
return nil, fmt.Errorf("failed to search agent memory (sqlite): %w", err)
}
} else {
// Postgres pgvector syntax: uses <=> operator for cosine distance.
// COALESCE guards against NaN when either vector has zero magnitude.
// pgvector.Vector implements sql.Scanner and driver.Valuer
query := `
SELECT *, COALESCE(1 - (embedding <=> ?), 0) as score
FROM memory
WHERE agent_name = ? AND user_id = ?
ORDER BY embedding <=> ? ASC
LIMIT ?
`
if err := db.Raw(query, embedding, agentName, userID, embedding, limit).Scan(&results).Error; err != nil {
return nil, fmt.Errorf("failed to search agent memory (postgres): %w", err)
}
// pgvector <=> operator for cosine distance.
// COALESCE guards against NaN when either vector has zero magnitude.
query := `
SELECT *, COALESCE(1 - (embedding <=> ?), 0) as score
FROM memory
WHERE agent_name = ? AND user_id = ?
ORDER BY embedding <=> ? ASC
LIMIT ?
`
if err := c.db.WithContext(ctx).Raw(query, embedding, agentName, userID, embedding, limit).Scan(&results).Error; err != nil {
return nil, fmt.Errorf("failed to search agent memory: %w", err)
}

// Increment access count for found memories synchronously.
Expand All @@ -642,7 +613,7 @@ func (c *clientImpl) SearchAgentMemory(ctx context.Context, agentName, userID st
for i, m := range results {
ids[i] = m.ID
}
if err := db.Model(&dbpkg.Memory{}).Where("id IN ?", ids).UpdateColumn("access_count", gorm.Expr("access_count + ?", 1)).Error; err != nil {
if err := c.db.WithContext(ctx).Model(&dbpkg.Memory{}).Where("id IN ?", ids).UpdateColumn("access_count", gorm.Expr("access_count + ?", 1)).Error; err != nil {
return nil, fmt.Errorf("failed to increment access count: %w", err)
}
}
Expand Down Expand Up @@ -705,15 +676,7 @@ func (c *clientImpl) DeleteAgentMemory(ctx context.Context, agentName, userID st
}

func (c *clientImpl) deleteAgentMemoryByQuery(ctx context.Context, agentName, userID string) error {
var ids []string
if err := c.db.WithContext(ctx).Table("memory").Where("agent_name = ? AND user_id = ?", agentName, userID).Pluck("id", &ids).Error; err != nil {
return fmt.Errorf("failed to list memory ids: %w", err)
}
if len(ids) == 0 {
return nil
}
// DELETE by primary key only to avoid Turso multi-index scan on DELETE which causes a bug
if err := c.db.WithContext(ctx).Exec("DELETE FROM memory WHERE id IN ?", ids).Error; err != nil {
if err := c.db.WithContext(ctx).Where("agent_name = ? AND user_id = ?", agentName, userID).Delete(&dbpkg.Memory{}).Error; err != nil {
return fmt.Errorf("failed to delete agent memory: %w", err)
}
return nil
Expand Down
Loading
Loading