diff --git a/.golangci.yml b/.golangci.yml index 56ba433..3e28c53 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,12 +47,10 @@ linters: # Exclude G301 (directory permissions) - workspace needs readable directories # Exclude G304 (file inclusion) - paths are validated via safePath() # Exclude G306 (file permissions) - workspace files need to be readable - # Exclude G706 (log injection) - we use slog structured logging which is inherently safe excludes: - G301 - G304 - G306 - - G706 formatters: enable: diff --git a/README.md b/README.md index 3afa334..bec15f3 100644 --- a/README.md +++ b/README.md @@ -127,15 +127,15 @@ sudo mv flashduty-runner /usr/local/bin/ ```bash docker run -d \ --name flashduty-runner \ - -e FLASHDUTY_RUNNER_TOKEN=wnt_xxx \ + -e FLASHDUTY_RUNNER_TOKEN=ent_xxx \ -v /var/flashduty/workspace:/workspace \ registry.flashcat.cloud/public/flashduty-runner:latest # With custom endpoint docker run -d \ --name flashduty-runner \ - -e FLASHDUTY_RUNNER_TOKEN=wnt_xxx \ - -e FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/worknode/ws \ + -e FLASHDUTY_RUNNER_TOKEN=ent_xxx \ + -e FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/environment/ws \ -v /var/flashduty/workspace:/workspace \ registry.flashcat.cloud/public/flashduty-runner:latest ``` @@ -144,13 +144,13 @@ docker run -d \ ```bash # Basic usage (token required) -flashduty-runner run --token wnt_xxx +flashduty-runner run --token ent_xxx # Specify workspace directory -flashduty-runner run --token wnt_xxx --workspace ~/projects +flashduty-runner run --token ent_xxx --workspace ~/projects # Specify custom WebSocket endpoint -flashduty-runner run --token wnt_xxx --url wss://custom.example.com/safari/worknode/ws +flashduty-runner run --token ent_xxx --url wss://custom.example.com/safari/environment/ws # Check version flashduty-runner version @@ -180,8 +180,8 @@ WantedBy=multi-user.target Create `/etc/flashduty-runner/env`: ```bash -FLASHDUTY_RUNNER_TOKEN=wnt_xxx -# FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/worknode/ws +FLASHDUTY_RUNNER_TOKEN=ent_xxx +# FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/environment/ws # FLASHDUTY_RUNNER_WORKSPACE=/var/flashduty/workspace ``` @@ -199,7 +199,7 @@ Configuration is via command-line flags or environment variables (flags take pre | Flag | Env Variable | Required | Default | Description | |------|-------------|----------|---------|-------------| | `--token` | `FLASHDUTY_RUNNER_TOKEN` | Yes | - | Authentication token | -| `--url` | `FLASHDUTY_RUNNER_URL` | No | `wss://api.flashcat.cloud/safari/worknode/ws` | WebSocket endpoint | +| `--url` | `FLASHDUTY_RUNNER_URL` | No | `wss://api.flashcat.cloud/safari/environment/ws` | WebSocket endpoint | | `--workspace` | `FLASHDUTY_RUNNER_WORKSPACE` | No | `~/.flashduty-runner/workspace` | Workspace root directory | | `--log-level` | `FLASHDUTY_RUNNER_LOG_LEVEL` | No | `info` | Log level: debug, info, warn, error | @@ -238,7 +238,7 @@ journalctl -u flashduty-runner -f Enable debug logging to see detailed permission decisions: ```bash -flashduty-runner run --token wnt_xxx --log-level debug +flashduty-runner run --token ent_xxx --log-level debug # Or via environment variable export FLASHDUTY_RUNNER_LOG_LEVEL=debug diff --git a/README_zh.md b/README_zh.md index f132cce..6276206 100644 --- a/README_zh.md +++ b/README_zh.md @@ -127,15 +127,15 @@ sudo mv flashduty-runner /usr/local/bin/ ```bash docker run -d \ --name flashduty-runner \ - -e FLASHDUTY_RUNNER_TOKEN=wnt_xxx \ + -e FLASHDUTY_RUNNER_TOKEN=ent_xxx \ -v /var/flashduty/workspace:/workspace \ registry.flashcat.cloud/public/flashduty-runner:latest # 使用自定义端点 docker run -d \ --name flashduty-runner \ - -e FLASHDUTY_RUNNER_TOKEN=wnt_xxx \ - -e FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/worknode/ws \ + -e FLASHDUTY_RUNNER_TOKEN=ent_xxx \ + -e FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/environment/ws \ -v /var/flashduty/workspace:/workspace \ registry.flashcat.cloud/public/flashduty-runner:latest ``` @@ -144,13 +144,13 @@ docker run -d \ ```bash # 基本用法(token 必填) -flashduty-runner run --token wnt_xxx +flashduty-runner run --token ent_xxx # 指定工作区目录 -flashduty-runner run --token wnt_xxx --workspace ~/projects +flashduty-runner run --token ent_xxx --workspace ~/projects # 指定自定义 WebSocket 端点 -flashduty-runner run --token wnt_xxx --url wss://custom.example.com/safari/worknode/ws +flashduty-runner run --token ent_xxx --url wss://custom.example.com/safari/environment/ws # 查看版本 flashduty-runner version @@ -180,8 +180,8 @@ WantedBy=multi-user.target 创建 `/etc/flashduty-runner/env`: ```bash -FLASHDUTY_RUNNER_TOKEN=wnt_xxx -# FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/worknode/ws +FLASHDUTY_RUNNER_TOKEN=ent_xxx +# FLASHDUTY_RUNNER_URL=wss://custom.example.com/safari/environment/ws # FLASHDUTY_RUNNER_WORKSPACE=/var/flashduty/workspace ``` @@ -199,7 +199,7 @@ sudo systemctl enable --now flashduty-runner | 参数 | 环境变量 | 必填 | 默认值 | 说明 | |------|----------|------|--------|------| | `--token` | `FLASHDUTY_RUNNER_TOKEN` | 是 | - | 认证令牌 | -| `--url` | `FLASHDUTY_RUNNER_URL` | 否 | `wss://api.flashcat.cloud/safari/worknode/ws` | WebSocket 端点 | +| `--url` | `FLASHDUTY_RUNNER_URL` | 否 | `wss://api.flashcat.cloud/safari/environment/ws` | WebSocket 端点 | | `--workspace` | `FLASHDUTY_RUNNER_WORKSPACE` | 否 | `~/.flashduty-runner/workspace` | 工作区根目录 | | `--log-level` | `FLASHDUTY_RUNNER_LOG_LEVEL` | 否 | `info` | 日志级别:debug, info, warn, error | @@ -238,7 +238,7 @@ journalctl -u flashduty-runner -f 启用调试日志以查看详细的权限决策: ```bash -flashduty-runner run --token wnt_xxx --log-level debug +flashduty-runner run --token ent_xxx --log-level debug # 或通过环境变量 export FLASHDUTY_RUNNER_LOG_LEVEL=debug diff --git a/cmd/main.go b/cmd/main.go index e5042bd..cfadc65 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -13,8 +13,8 @@ import ( "github.com/spf13/cobra" + "github.com/flashcatcloud/flashduty-runner/environment" "github.com/flashcatcloud/flashduty-runner/permission" - "github.com/flashcatcloud/flashduty-runner/workspace" "github.com/flashcatcloud/flashduty-runner/ws" ) @@ -35,7 +35,7 @@ var ( // Default values const ( - defaultURL = "wss://api.flashcat.cloud/safari/worknode/ws" + defaultURL = "wss://api.flashcat.cloud/safari/environment/ws" defaultLogLevel = "info" ) @@ -67,13 +67,13 @@ func runCmd() *cobra.Command { Examples: # Basic usage (token required) - flashduty-runner run --token wnt_xxx + flashduty-runner run --token ent_xxx # Specify workspace directory - flashduty-runner run --token wnt_xxx --workspace ~/projects + flashduty-runner run --token ent_xxx --workspace ~/projects # Specify custom API URL - flashduty-runner run --token wnt_xxx --url wss://custom.example.com/safari/worknode/ws + flashduty-runner run --token ent_xxx --url wss://custom.example.com/safari/environment/ws Environment variables: FLASHDUTY_RUNNER_TOKEN - Authentication token (required if --token not provided) @@ -177,7 +177,7 @@ func runRunner() error { checker := permission.NewChecker(map[string]string{"*": "allow"}) // Create workspace - wspace, err := workspace.New(cfg.WorkspaceRoot, checker) + wspace, err := environment.New(cfg.WorkspaceRoot, checker) if err != nil { return fmt.Errorf("failed to create workspace: %w", err) } diff --git a/workspace/workspace.go b/environment/environment.go similarity index 78% rename from workspace/workspace.go rename to environment/environment.go index e758b57..d332033 100644 --- a/workspace/workspace.go +++ b/environment/environment.go @@ -1,5 +1,5 @@ // Package workspace implements local workspace operations. -package workspace +package environment import ( "archive/zip" @@ -26,15 +26,15 @@ import ( "github.com/flashcatcloud/flashduty-runner/protocol" ) -// Workspace handles local filesystem operations. -type Workspace struct { +// Environment handles local filesystem operations. +type Environment struct { root string checker *permission.Checker mcpMgr *mcp.ClientManager } // New creates a new workspace with the given root directory and permission checker. -func New(root string, checker *permission.Checker) (*Workspace, error) { +func New(root string, checker *permission.Checker) (*Environment, error) { absRoot, err := filepath.Abs(root) if err != nil { return nil, fmt.Errorf("failed to get absolute path: %w", err) @@ -45,7 +45,7 @@ func New(root string, checker *permission.Checker) (*Workspace, error) { return nil, fmt.Errorf("failed to create workspace root: %w", err) } - return &Workspace{ + return &Environment{ root: absRoot, checker: checker, mcpMgr: mcp.NewClientManager(), @@ -53,19 +53,19 @@ func New(root string, checker *permission.Checker) (*Workspace, error) { } // Root returns the workspace root directory. -func (w *Workspace) Root() string { - return w.root +func (e *Environment) Root() string { + return e.root } // safePath ensures the path is within the workspace root, resolving symlinks. -func (w *Workspace) safePath(path string) (string, error) { - absPath, err := filepath.Abs(filepath.Join(w.root, path)) +func (e *Environment) safePath(path string) (string, error) { + absPath, err := filepath.Abs(filepath.Join(e.root, path)) if err != nil { return "", fmt.Errorf("failed to get absolute path: %w", err) } // First check without resolving symlinks - if !strings.HasPrefix(absPath, w.root) { + if !strings.HasPrefix(absPath, e.root) { return "", fmt.Errorf("path is outside workspace root: %s", path) } @@ -82,9 +82,9 @@ func (w *Workspace) safePath(path string) (string, error) { } // Also resolve the root path for consistent comparison - realRoot, err := filepath.EvalSymlinks(w.root) + realRoot, err := filepath.EvalSymlinks(e.root) if err != nil { - realRoot = w.root + realRoot = e.root } if !strings.HasPrefix(realPath, realRoot) { @@ -97,8 +97,8 @@ func (w *Workspace) safePath(path string) (string, error) { } // Read reads a file from the workspace. -func (w *Workspace) Read(ctx context.Context, args *protocol.ReadArgs) (*protocol.ReadResult, error) { - realPath, err := w.safePath(args.Path) +func (e *Environment) Read(ctx context.Context, args *protocol.ReadArgs) (*protocol.ReadResult, error) { + realPath, err := e.safePath(args.Path) if err != nil { return nil, err } @@ -111,11 +111,11 @@ func (w *Workspace) Read(ctx context.Context, args *protocol.ReadArgs) (*protoco return nil, fmt.Errorf("cannot read a directory: %s", args.Path) } - return w.readFileContent(realPath, info.Size(), args.Offset, args.Limit) + return e.readFileContent(realPath, info.Size(), args.Offset, args.Limit) } // readFileContent reads file content with offset and limit support. -func (w *Workspace) readFileContent(path string, size, offset, limit int64) (*protocol.ReadResult, error) { +func (e *Environment) readFileContent(path string, size, offset, limit int64) (*protocol.ReadResult, error) { file, err := os.Open(path) if err != nil { return nil, fmt.Errorf("failed to open file: %w", err) @@ -148,8 +148,8 @@ func (w *Workspace) readFileContent(path string, size, offset, limit int64) (*pr } // Write writes content to a file in the workspace. -func (w *Workspace) Write(ctx context.Context, args *protocol.WriteArgs) error { - realPath, err := w.safePath(args.Path) +func (e *Environment) Write(ctx context.Context, args *protocol.WriteArgs) error { + realPath, err := e.safePath(args.Path) if err != nil { return err } @@ -170,15 +170,15 @@ func (w *Workspace) Write(ctx context.Context, args *protocol.WriteArgs) error { } // List lists entries in a directory. -func (w *Workspace) List(ctx context.Context, args *protocol.ListArgs) (*protocol.ListResult, error) { - realPath, err := w.safePath(args.Path) +func (e *Environment) List(ctx context.Context, args *protocol.ListArgs) (*protocol.ListResult, error) { + realPath, err := e.safePath(args.Path) if err != nil { return nil, err } // Resolve root for consistent relative path calculation - resolvedRoot := w.root - if resolved, resolveErr := filepath.EvalSymlinks(w.root); resolveErr == nil { + resolvedRoot := e.root + if resolved, resolveErr := filepath.EvalSymlinks(e.root); resolveErr == nil { resolvedRoot = resolved } @@ -223,8 +223,8 @@ func (w *Workspace) List(ctx context.Context, args *protocol.ListArgs) (*protoco } // Glob searches for files matching a pattern. -func (w *Workspace) Glob(ctx context.Context, args *protocol.GlobArgs) (*protocol.GlobResult, error) { - fsys := os.DirFS(w.root) +func (e *Environment) Glob(ctx context.Context, args *protocol.GlobArgs) (*protocol.GlobResult, error) { + fsys := os.DirFS(e.root) matches, err := doublestar.Glob(fsys, args.Pattern) if err != nil { return nil, fmt.Errorf("failed to glob: %w", err) @@ -235,14 +235,14 @@ func (w *Workspace) Glob(ctx context.Context, args *protocol.GlobArgs) (*protoco } // Grep searches for a pattern in files. -func (w *Workspace) Grep(ctx context.Context, args *protocol.GrepArgs) (*protocol.GrepResult, error) { +func (e *Environment) Grep(ctx context.Context, args *protocol.GrepArgs) (*protocol.GrepResult, error) { var res *protocol.GrepResult var err error // Try ripgrep first if _, lookErr := exec.LookPath("rg"); lookErr == nil { - res, err = w.grepWithRipgrep(ctx, args) + res, err = e.grepWithRipgrep(ctx, args) } else { - res, err = w.grepWithGo(ctx, args) + res, err = e.grepWithGo(ctx, args) } if err != nil || res == nil { @@ -257,7 +257,7 @@ func (w *Workspace) Grep(ctx context.Context, args *protocol.GrepArgs) (*protoco content := sb.String() // Process large output - processor := NewLargeOutputProcessor(w, DefaultLargeOutputConfig()) + processor := NewLargeOutputProcessor(e, DefaultLargeOutputConfig()) processed, err := processor.Process(ctx, content, "grep") if err != nil { res.Content = content @@ -273,7 +273,7 @@ func (w *Workspace) Grep(ctx context.Context, args *protocol.GrepArgs) (*protoco return res, nil } -func (w *Workspace) grepWithRipgrep(ctx context.Context, args *protocol.GrepArgs) (*protocol.GrepResult, error) { +func (e *Environment) grepWithRipgrep(ctx context.Context, args *protocol.GrepArgs) (*protocol.GrepResult, error) { cmdArgs := make([]string, 0, 6+2*len(args.Include)+2) cmdArgs = append(cmdArgs, "--column", "--line-number", "--no-heading", "--color", "never", "--smart-case") for _, inc := range args.Include { @@ -282,7 +282,7 @@ func (w *Workspace) grepWithRipgrep(ctx context.Context, args *protocol.GrepArgs cmdArgs = append(cmdArgs, args.Pattern, ".") cmd := exec.CommandContext(ctx, "rg", cmdArgs...) //nolint:gosec // G204: args built from validated grep parameters - cmd.Dir = w.root + cmd.Dir = e.root var stdout strings.Builder cmd.Stdout = &stdout @@ -308,7 +308,7 @@ func (w *Workspace) grepWithRipgrep(ctx context.Context, args *protocol.GrepArgs return &protocol.GrepResult{Matches: results}, nil } -func (w *Workspace) grepWithGo(ctx context.Context, args *protocol.GrepArgs) (*protocol.GrepResult, error) { +func (e *Environment) grepWithGo(ctx context.Context, args *protocol.GrepArgs) (*protocol.GrepResult, error) { var results []protocol.GrepMatch include := args.Include if len(include) == 0 { @@ -316,13 +316,13 @@ func (w *Workspace) grepWithGo(ctx context.Context, args *protocol.GrepArgs) (*p } for _, inc := range include { - matches, err := w.Glob(ctx, &protocol.GlobArgs{Pattern: inc}) + matches, err := e.Glob(ctx, &protocol.GlobArgs{Pattern: inc}) if err != nil { continue } for _, match := range matches.Matches { - realPath, _ := w.safePath(match) + realPath, _ := e.safePath(match) file, err := os.Open(realPath) if err != nil { continue @@ -348,18 +348,18 @@ func (w *Workspace) grepWithGo(ctx context.Context, args *protocol.GrepArgs) (*p } // Bash executes a bash command in the workspace. -func (w *Workspace) Bash(ctx context.Context, args *protocol.BashArgs) (*protocol.BashResult, error) { - if err := w.checker.Check(args.Command); err != nil { +func (e *Environment) Bash(ctx context.Context, args *protocol.BashArgs) (*protocol.BashResult, error) { + if err := e.checker.Check(args.Command); err != nil { return nil, err } - workdir, err := w.resolveWorkdir(args.Workdir) + workdir, err := e.resolveWorkdir(args.Workdir) if err != nil { return nil, err } - timeout := w.resolveTimeout(args.Timeout) - result, err := w.executeBashCommand(ctx, args.Command, workdir, timeout) + timeout := e.resolveTimeout(args.Timeout) + result, err := e.executeBashCommand(ctx, args.Command, workdir, timeout) if err != nil { return result, err } @@ -370,19 +370,19 @@ func (w *Workspace) Bash(ctx context.Context, args *protocol.BashArgs) (*protoco return result, nil } - return w.processLargeOutput(ctx, result, "bash") + return e.processLargeOutput(ctx, result, "bash") } // resolveWorkdir resolves the working directory for command execution. -func (w *Workspace) resolveWorkdir(workdir string) (string, error) { +func (e *Environment) resolveWorkdir(workdir string) (string, error) { if workdir == "" { - return w.root, nil + return e.root, nil } - return w.safePath(workdir) + return e.safePath(workdir) } // resolveTimeout resolves the command timeout duration. -func (w *Workspace) resolveTimeout(timeoutSec int) time.Duration { +func (e *Environment) resolveTimeout(timeoutSec int) time.Duration { if timeoutSec > 0 { return time.Duration(timeoutSec) * time.Second } @@ -390,7 +390,7 @@ func (w *Workspace) resolveTimeout(timeoutSec int) time.Duration { } // executeBashCommand executes a bash command with the given parameters. -func (w *Workspace) executeBashCommand(ctx context.Context, command, workdir string, timeout time.Duration) (*protocol.BashResult, error) { +func (e *Environment) executeBashCommand(ctx context.Context, command, workdir string, timeout time.Duration) (*protocol.BashResult, error) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() @@ -450,8 +450,8 @@ func (l *LimitedWriter) Write(p []byte) (n int, err error) { } // processLargeOutput processes command output for truncation if needed. -func (w *Workspace) processLargeOutput(ctx context.Context, result *protocol.BashResult, prefix string) (*protocol.BashResult, error) { - processor := NewLargeOutputProcessor(w, DefaultLargeOutputConfig()) +func (e *Environment) processLargeOutput(ctx context.Context, result *protocol.BashResult, prefix string) (*protocol.BashResult, error) { + processor := NewLargeOutputProcessor(e, DefaultLargeOutputConfig()) processed, err := processor.Process(ctx, result.Stdout, prefix) if err != nil { result.TotalSize = int64(len(result.Stdout)) @@ -468,8 +468,8 @@ func (w *Workspace) processLargeOutput(ctx context.Context, result *protocol.Bas // WriteRaw writes raw content (not base64 encoded) to a file. // Used internally for saving large output files. -func (w *Workspace) WriteRaw(ctx context.Context, path string, content []byte) error { - realPath, err := w.safePath(path) +func (e *Environment) WriteRaw(ctx context.Context, path string, content []byte) error { + realPath, err := e.safePath(path) if err != nil { return err } @@ -484,7 +484,7 @@ func (w *Workspace) WriteRaw(ctx context.Context, path string, content []byte) e } // MCPCall executes an MCP tool call. -func (w *Workspace) MCPCall(ctx context.Context, args *protocol.MCPCallArgs, logger *slog.Logger) (*protocol.MCPCallResult, error) { +func (e *Environment) MCPCall(ctx context.Context, args *protocol.MCPCallArgs, logger *slog.Logger) (*protocol.MCPCallResult, error) { // Parse arguments var toolArgs map[string]any if len(args.Args) > 0 { @@ -494,7 +494,7 @@ func (w *Workspace) MCPCall(ctx context.Context, args *protocol.MCPCallArgs, log } // Call MCP tool - result, err := w.mcpMgr.CallTool(ctx, &args.Server, args.ToolName, toolArgs, logger) + result, err := e.mcpMgr.CallTool(ctx, &args.Server, args.ToolName, toolArgs, logger) if err != nil { return nil, err } @@ -503,7 +503,7 @@ func (w *Workspace) MCPCall(ctx context.Context, args *protocol.MCPCallArgs, log content := mcp.ExtractContent(result) // Process large output - processor := NewLargeOutputProcessor(w, DefaultLargeOutputConfig()) + processor := NewLargeOutputProcessor(e, DefaultLargeOutputConfig()) processed, err := processor.Process(ctx, content, "mcp") if err != nil { return nil, err @@ -519,8 +519,8 @@ func (w *Workspace) MCPCall(ctx context.Context, args *protocol.MCPCallArgs, log } // MCPListTools lists available tools from an MCP server. -func (w *Workspace) MCPListTools(ctx context.Context, args *protocol.MCPListToolsArgs, logger *slog.Logger) (*protocol.MCPListToolsResult, error) { - tools, err := w.mcpMgr.ListTools(ctx, &args.Server, logger) +func (e *Environment) MCPListTools(ctx context.Context, args *protocol.MCPListToolsArgs, logger *slog.Logger) (*protocol.MCPListToolsResult, error) { + tools, err := e.mcpMgr.ListTools(ctx, &args.Server, logger) if err != nil { return nil, err } @@ -540,8 +540,8 @@ func (w *Workspace) MCPListTools(ctx context.Context, args *protocol.MCPListTool } // SyncSkill syncs a skill from cloud to local workspace. -func (w *Workspace) SyncSkill(ctx context.Context, args *protocol.SyncSkillArgs) (*protocol.SyncSkillResult, error) { - skillDir, err := w.safePath(args.SkillDir) +func (e *Environment) SyncSkill(ctx context.Context, args *protocol.SyncSkillArgs) (*protocol.SyncSkillResult, error) { + skillDir, err := e.safePath(args.SkillDir) if err != nil { return nil, err } @@ -553,7 +553,7 @@ func (w *Workspace) SyncSkill(ctx context.Context, args *protocol.SyncSkillArgs) } // Unzip to skill directory - if err := w.unzipSkill(zipData, skillDir); err != nil { + if err := e.unzipSkill(zipData, skillDir); err != nil { return nil, fmt.Errorf("failed to unzip skill: %w", err) } @@ -570,7 +570,7 @@ func (w *Workspace) SyncSkill(ctx context.Context, args *protocol.SyncSkillArgs) } // unzipSkill extracts a zip archive to the destination directory. -func (w *Workspace) unzipSkill(data []byte, dest string) error { +func (e *Environment) unzipSkill(data []byte, dest string) error { reader, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) if err != nil { return fmt.Errorf("failed to read zip archive: %w", err) @@ -609,7 +609,7 @@ func (w *Workspace) unzipSkill(data []byte, dest string) error { return fmt.Errorf("failed to create parent directory: %w", err) } - if err := w.extractZipFile(f, absTarget); err != nil { + if err := e.extractZipFile(f, absTarget); err != nil { return fmt.Errorf("failed to extract %s: %w", cleanName, err) } } @@ -617,7 +617,7 @@ func (w *Workspace) unzipSkill(data []byte, dest string) error { } // extractZipFile extracts a single file from a zip archive. -func (w *Workspace) extractZipFile(f *zip.File, targetPath string) error { +func (e *Environment) extractZipFile(f *zip.File, targetPath string) error { rc, err := f.Open() if err != nil { return err diff --git a/workspace/workspace_test.go b/environment/environment_test.go similarity index 87% rename from workspace/workspace_test.go rename to environment/environment_test.go index 01ebec3..827f462 100644 --- a/workspace/workspace_test.go +++ b/environment/environment_test.go @@ -1,4 +1,4 @@ -package workspace +package environment import ( "context" @@ -14,7 +14,7 @@ import ( "github.com/flashcatcloud/flashduty-runner/protocol" ) -func newTestWorkspace(t *testing.T) *Workspace { +func newTestEnvironment(t *testing.T) *Environment { tmpDir := t.TempDir() checker := permission.NewChecker(map[string]string{ "*": "allow", // Allow all for testing @@ -24,8 +24,8 @@ func newTestWorkspace(t *testing.T) *Workspace { return ws } -func TestWorkspace_Read(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_Read(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() // Create test file @@ -56,8 +56,8 @@ func TestWorkspace_Read(t *testing.T) { assert.Equal(t, "World", string(decoded)) } -func TestWorkspace_Read_PathTraversal(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_Read_PathTraversal(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() // Attempt path traversal @@ -66,8 +66,8 @@ func TestWorkspace_Read_PathTraversal(t *testing.T) { assert.Contains(t, err.Error(), "outside workspace root") } -func TestWorkspace_Write(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_Write(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() testContent := "Test content" @@ -85,8 +85,8 @@ func TestWorkspace_Write(t *testing.T) { assert.Equal(t, testContent, string(content)) } -func TestWorkspace_List(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_List(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() // Create test structure @@ -106,8 +106,8 @@ func TestWorkspace_List(t *testing.T) { assert.Len(t, result.Entries, 4) // dir1, dir2, file1.txt, dir1/file2.txt } -func TestWorkspace_Glob(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_Glob(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() // Create test files @@ -122,8 +122,8 @@ func TestWorkspace_Glob(t *testing.T) { assert.Contains(t, result.Matches, "file2.txt") } -func TestWorkspace_Grep(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_Grep(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() // Create test files @@ -135,8 +135,8 @@ func TestWorkspace_Grep(t *testing.T) { assert.Len(t, result.Matches, 2) } -func TestWorkspace_Bash(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_Bash(t *testing.T) { + ws := newTestEnvironment(t) ctx := context.Background() // Simple command @@ -151,7 +151,7 @@ func TestWorkspace_Bash(t *testing.T) { assert.Equal(t, 42, result.ExitCode) } -func TestWorkspace_Bash_PermissionDenied(t *testing.T) { +func TestEnvironment_Bash_PermissionDenied(t *testing.T) { tmpDir := t.TempDir() // Note: rules are sorted alphabetically, so "echo *" comes after "*" // This means "echo *" will override "*" for echo commands @@ -175,8 +175,8 @@ func TestWorkspace_Bash_PermissionDenied(t *testing.T) { assert.Contains(t, err.Error(), "denied") } -func TestWorkspace_SafePath(t *testing.T) { - ws := newTestWorkspace(t) +func TestEnvironment_SafePath(t *testing.T) { + ws := newTestEnvironment(t) tests := []struct { name string diff --git a/environment/knowledge.go b/environment/knowledge.go new file mode 100644 index 0000000..63155ce --- /dev/null +++ b/environment/knowledge.go @@ -0,0 +1,236 @@ +package environment + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "log/slog" + "os" + "path/filepath" + "strings" + + "github.com/flashcatcloud/flashduty-runner/protocol" +) + +const ( + // sentinelName is the hidden JSON map that tracks staged-file checksums. + // Safari reads this to decide which knowledge pack files are already current. + sentinelName = ".safari-knowledge-sentinel.json" +) + +// validateKnowledgeRelPath enforces the path rules for knowledge file operations. +// +// Rules (from the Safari-side contract): +// - Must not contain path separators or double-dot components — the runner +// only writes flat files in the workspace root, never in sub-directories. +// - Leading-dot filenames are rejected because they are hidden by convention; +// the sentinel is written by the runner itself and is never staged by clients. +func validateKnowledgeRelPath(relPath string) error { + if relPath == "" { + return fmt.Errorf("rel_path must not be empty") + } + if strings.ContainsAny(relPath, `/\`) { + return fmt.Errorf("rel_path must not contain path separators: %q", relPath) + } + // Reject the bare ".." token. Slash-separated traversal like "foo/../bar" + // is already blocked above, but a plain ".." with no slashes still escapes. + if relPath == ".." { + return fmt.Errorf("rel_path must not be '..': %q", relPath) + } + if strings.HasPrefix(relPath, ".") { + // Hidden files (including the sentinel itself) cannot be staged by clients. + // The runner owns the sentinel exclusively. + return fmt.Errorf("rel_path must not start with '.': %q", relPath) + } + return nil +} + +// atomicWriteFile writes data to path using a temp-file + rename so readers +// never see a partially-written file. The temp file is created in the same +// directory as the target to guarantee the rename stays on the same filesystem. +func atomicWriteFile(path string, data []byte, perm os.FileMode) error { + dir := filepath.Dir(path) + tmp, err := os.CreateTemp(dir, ".tmp-knowledge-*") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + tmpName := tmp.Name() + + // Clean up the temp file on any error path. + var writeErr error + defer func() { + if writeErr != nil { + _ = os.Remove(tmpName) + } + }() + + if _, writeErr = tmp.Write(data); writeErr != nil { + _ = tmp.Close() + return fmt.Errorf("failed to write temp file: %w", writeErr) + } + if writeErr = tmp.Chmod(perm); writeErr != nil { + _ = tmp.Close() + return fmt.Errorf("failed to chmod temp file: %w", writeErr) + } + if writeErr = tmp.Close(); writeErr != nil { + return fmt.Errorf("failed to close temp file: %w", writeErr) + } + + if writeErr = os.Rename(tmpName, path); writeErr != nil { + return fmt.Errorf("failed to rename temp file: %w", writeErr) + } + return nil +} + +// withSentinelLock acquires a platform-specific advisory lock keyed to the +// sentinel path, calls fn, then releases the lock. The lock protects +// concurrent read-modify-write cycles across BYOC sessions that share the +// same filesystem (e.g. multiple Safari instances writing to the same +// environment workspace root). +// +// acquireSentinelLock is implemented per-platform: +// - knowledge_flock_unix.go: opens a lock fd + flock(2) +// - knowledge_flock_windows.go: no-op (runner not shipped on Windows; +// also avoids holding a handle that would block os.Rename of the sentinel) +func withSentinelLock(sentinelPath string, fn func() error) error { + release, err := acquireSentinelLock(sentinelPath) + if err != nil { + return err + } + defer release() + return fn() +} + +// readSentinel reads the sentinel JSON map. Missing file or empty file → +// empty map (both are expected states on first use). Corrupt JSON → log a +// warning, return empty map (safe rebuild on next stage). +func readSentinel(sentinelPath string) map[string]string { + data, err := os.ReadFile(sentinelPath) + if err != nil { + if !os.IsNotExist(err) { + slog.Warn("failed to read sentinel, treating as empty", "error", err) + } + return make(map[string]string) + } + // An empty file is the initial state created by withSentinelLock; treat it + // as an empty map without logging a warning. + if len(data) == 0 { + return make(map[string]string) + } + var m map[string]string + if err := json.Unmarshal(data, &m); err != nil { + slog.Warn("sentinel JSON corrupt, treating as empty", "error", err) + return make(map[string]string) + } + return m +} + +// writeSentinel atomically rewrites the sentinel with the given map. +func writeSentinel(sentinelPath string, m map[string]string) error { + data, err := json.Marshal(m) + if err != nil { + return fmt.Errorf("failed to marshal sentinel: %w", err) + } + return atomicWriteFile(sentinelPath, data, 0o644) +} + +// StageKnowledgeFiles writes the supplied files into the workspace root and +// updates the sentinel checksum map. +func (e *Environment) StageKnowledgeFiles(ctx context.Context, args *protocol.StageKnowledgeFilesArgs) (*protocol.StageKnowledgeFilesResult, error) { + result := &protocol.StageKnowledgeFilesResult{ + Files: make([]protocol.KnowledgeFileStatus, 0, len(args.Files)), + } + + // validated collects (relPath, checksum) for files that landed successfully; + // we only merge these into the sentinel. + type staged struct{ relPath, checksum string } + succeeded := make([]staged, 0, len(args.Files)) + + for _, f := range args.Files { + status := protocol.KnowledgeFileStatus{RelPath: f.RelPath} + + if err := validateKnowledgeRelPath(f.RelPath); err != nil { + status.Success = false + status.Error = err.Error() + result.Files = append(result.Files, status) + continue + } + + content, err := base64.StdEncoding.DecodeString(f.ContentB64) + if err != nil { + status.Success = false + status.Error = fmt.Sprintf("failed to decode content_b64: %v", err) + result.Files = append(result.Files, status) + continue + } + + targetPath := filepath.Join(e.root, f.RelPath) + if err := atomicWriteFile(targetPath, content, 0o644); err != nil { + status.Success = false + status.Error = err.Error() + result.Files = append(result.Files, status) + continue + } + + status.Success = true + result.Files = append(result.Files, status) + succeeded = append(succeeded, staged{f.RelPath, f.Checksum}) + } + + // Update sentinel for successfully written files under advisory lock. + if len(succeeded) > 0 { + sentinelPath := filepath.Join(e.root, sentinelName) + if err := withSentinelLock(sentinelPath, func() error { + m := readSentinel(sentinelPath) + for _, s := range succeeded { + m[s.relPath] = s.checksum + } + return writeSentinel(sentinelPath, m) + }); err != nil { + // Sentinel write failure is non-fatal for the already-written files, + // but we log it clearly so operators can investigate. + slog.Error("failed to update sentinel after staging", "error", err) + } + } + + return result, nil +} + +// DeleteKnowledgeFiles removes the supplied files from the workspace root and +// scrubs their entries from the sentinel. +func (e *Environment) DeleteKnowledgeFiles(ctx context.Context, args *protocol.DeleteKnowledgeFilesArgs) (*protocol.DeleteKnowledgeFilesResult, error) { + // toRemove collects validated rel_paths so we can clean the sentinel in one + // locked pass after the file removals. + toRemove := make(map[string]struct{}, len(args.RelPaths)) + + for _, relPath := range args.RelPaths { + if err := validateKnowledgeRelPath(relPath); err != nil { + slog.Warn("skipping invalid rel_path in delete_knowledge_files", "rel_path", relPath, "error", err) + continue + } + toRemove[relPath] = struct{}{} + + targetPath := filepath.Join(e.root, relPath) + if err := os.Remove(targetPath); err != nil && !os.IsNotExist(err) { + slog.Warn("failed to remove knowledge file", "rel_path", relPath, "error", err) + } + } + + // Remove entries from sentinel for all valid paths (idempotent — missing + // sentinel entries are simply no-ops in the delete loop). + if len(toRemove) > 0 { + sentinelPath := filepath.Join(e.root, sentinelName) + if err := withSentinelLock(sentinelPath, func() error { + m := readSentinel(sentinelPath) + for rp := range toRemove { + delete(m, rp) + } + return writeSentinel(sentinelPath, m) + }); err != nil { + slog.Error("failed to update sentinel after deletion", "error", err) + } + } + + return &protocol.DeleteKnowledgeFilesResult{Success: true}, nil +} diff --git a/environment/knowledge_flock_unix.go b/environment/knowledge_flock_unix.go new file mode 100644 index 0000000..f1d8c81 --- /dev/null +++ b/environment/knowledge_flock_unix.go @@ -0,0 +1,29 @@ +//go:build !windows + +package environment + +import ( + "fmt" + "os" + "syscall" +) + +// acquireSentinelLock opens (or creates) the sentinel path and takes an +// exclusive advisory flock(2) on that fd. Returns a release function that +// drops the lock and closes the fd. The lock fd is kept separate from the +// read/write path so atomic rename (temp + rename) still works unimpeded. +func acquireSentinelLock(sentinelPath string) (release func(), err error) { + f, err := os.OpenFile(sentinelPath, os.O_RDWR|os.O_CREATE, 0o600) + if err != nil { + return nil, fmt.Errorf("failed to open sentinel for locking: %w", err) + } + fd := int(f.Fd()) //nolint:gosec // os.File.Fd returns uintptr but the underlying OS fd is always a valid int on unix + if err := syscall.Flock(fd, syscall.LOCK_EX); err != nil { + _ = f.Close() + return nil, fmt.Errorf("failed to acquire sentinel lock: %w", err) + } + return func() { + _ = syscall.Flock(fd, syscall.LOCK_UN) + _ = f.Close() + }, nil +} diff --git a/environment/knowledge_flock_windows.go b/environment/knowledge_flock_windows.go new file mode 100644 index 0000000..a3d4b7c --- /dev/null +++ b/environment/knowledge_flock_windows.go @@ -0,0 +1,13 @@ +//go:build windows + +package environment + +// acquireSentinelLock is a no-op on Windows. The runner is not shipped on +// Windows; this stub exists only so `go build` and `go test` succeed on +// windows/amd64 in CI. Returning an unlocked sentinel path avoids holding an +// open handle that would otherwise block os.Rename on the sentinel file. +// Concurrent sessions on Windows would race — acceptable for a non-production +// platform. +func acquireSentinelLock(_ string) (release func(), err error) { + return func() {}, nil +} diff --git a/environment/knowledge_test.go b/environment/knowledge_test.go new file mode 100644 index 0000000..76202d7 --- /dev/null +++ b/environment/knowledge_test.go @@ -0,0 +1,238 @@ +package environment + +import ( + "context" + "encoding/base64" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/flashcatcloud/flashduty-runner/protocol" +) + +// b64 encodes a string to base64 — reduces noise in test table declarations. +func b64(s string) string { return base64.StdEncoding.EncodeToString([]byte(s)) } + +// readSentinelMap reads the sentinel JSON file from root and returns the map. +func readSentinelMap(t *testing.T, root string) map[string]string { + t.Helper() + data, err := os.ReadFile(filepath.Join(root, sentinelName)) + require.NoError(t, err) + var m map[string]string + require.NoError(t, json.Unmarshal(data, &m)) + return m +} + +// fileContent reads a file and returns its content as a string. +func fileContent(t *testing.T, root, relPath string) string { + t.Helper() + data, err := os.ReadFile(filepath.Join(root, relPath)) + require.NoError(t, err) + return string(data) +} + +// Case 1: stage one file → file lands, sentinel has the entry. +func TestStageKnowledgeFiles_Single(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + result, err := ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: "DUTY.md", Checksum: "abc123", ContentB64: b64("# Duty\n")}, + }, + }) + require.NoError(t, err) + require.Len(t, result.Files, 1) + assert.True(t, result.Files[0].Success) + assert.Empty(t, result.Files[0].Error) + + assert.Equal(t, "# Duty\n", fileContent(t, ws.Root(), "DUTY.md")) + + sentinel := readSentinelMap(t, ws.Root()) + assert.Equal(t, "abc123", sentinel["DUTY.md"]) +} + +// Case 2: mixed batch — one valid file + one with '/' in rel_path. +// The valid file must land; the invalid one must return an error in the ack; +// the sentinel must contain only the valid entry. +func TestStageKnowledgeFiles_MixedValidity(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + result, err := ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: "DUTY.md", Checksum: "goodsum", ContentB64: b64("content")}, + {RelPath: "sub/evil.md", Checksum: "badsum", ContentB64: b64("evil")}, + }, + }) + require.NoError(t, err) + require.Len(t, result.Files, 2) + + // First entry (valid) should succeed. + assert.True(t, result.Files[0].Success) + assert.Empty(t, result.Files[0].Error) + + // Second entry (slash in path) should fail. + assert.False(t, result.Files[1].Success) + assert.NotEmpty(t, result.Files[1].Error) + + // Only valid file exists on disk. + assert.FileExists(t, filepath.Join(ws.Root(), "DUTY.md")) + assert.NoFileExists(t, filepath.Join(ws.Root(), "sub/evil.md")) + + // Sentinel has only the good entry. + sentinel := readSentinelMap(t, ws.Root()) + assert.Equal(t, "goodsum", sentinel["DUTY.md"]) + _, hasBad := sentinel["sub/evil.md"] + assert.False(t, hasBad) +} + +// Case 3: stage the same file twice with different checksums → second stage +// overwrites content and updates the sentinel checksum. +func TestStageKnowledgeFiles_Overwrite(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + _, err := ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: "runbook.md", Checksum: "v1sum", ContentB64: b64("version 1")}, + }, + }) + require.NoError(t, err) + + _, err = ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: "runbook.md", Checksum: "v2sum", ContentB64: b64("version 2")}, + }, + }) + require.NoError(t, err) + + assert.Equal(t, "version 2", fileContent(t, ws.Root(), "runbook.md")) + + sentinel := readSentinelMap(t, ws.Root()) + assert.Equal(t, "v2sum", sentinel["runbook.md"]) +} + +// Case 4: delete a previously staged file → file gone, sentinel entry gone. +func TestDeleteKnowledgeFiles_Staged(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + // Stage first. + _, err := ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: "old-runbook.md", Checksum: "deadbeef", ContentB64: b64("old content")}, + }, + }) + require.NoError(t, err) + + // Delete. + delResult, err := ws.DeleteKnowledgeFiles(ctx, &protocol.DeleteKnowledgeFilesArgs{ + RelPaths: []string{"old-runbook.md"}, + }) + require.NoError(t, err) + assert.True(t, delResult.Success) + + assert.NoFileExists(t, filepath.Join(ws.Root(), "old-runbook.md")) + + sentinel := readSentinelMap(t, ws.Root()) + _, exists := sentinel["old-runbook.md"] + assert.False(t, exists) +} + +// Case 5: delete a path that was never staged → no error (idempotent). +func TestDeleteKnowledgeFiles_Missing(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + result, err := ws.DeleteKnowledgeFiles(ctx, &protocol.DeleteKnowledgeFilesArgs{ + RelPaths: []string{"does-not-exist.md"}, + }) + require.NoError(t, err) + assert.True(t, result.Success) +} + +// Case 6: stage → delete → stage → delete cycle twice; final state has zero +// knowledge pack files and an empty sentinel. +func TestStageDeleteCycle(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + for i := range 2 { + _, err := ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: "cycle.md", Checksum: "cycle", ContentB64: b64("cycle")}, + }, + }) + require.NoError(t, err, "stage iteration %d", i) + + _, err = ws.DeleteKnowledgeFiles(ctx, &protocol.DeleteKnowledgeFilesArgs{ + RelPaths: []string{"cycle.md"}, + }) + require.NoError(t, err, "delete iteration %d", i) + } + + assert.NoFileExists(t, filepath.Join(ws.Root(), "cycle.md")) + + sentinel := readSentinelMap(t, ws.Root()) + assert.Empty(t, sentinel) +} + +// Case 7: path rejection rules. +func TestStageKnowledgeFiles_RejectedPaths(t *testing.T) { + ws := newTestEnvironment(t) + ctx := context.Background() + + cases := []struct { + relPath string + desc string + }{ + {"../etc/passwd", "path traversal with .."}, + {"sub/x.md", "subdirectory path with /"}, + {".hidden.md", "leading-dot filename"}, + {sentinelName, "sentinel filename itself"}, + {"", "empty string"}, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + result, err := ws.StageKnowledgeFiles(ctx, &protocol.StageKnowledgeFilesArgs{ + Files: []protocol.KnowledgeFile{ + {RelPath: tc.relPath, Checksum: "sum", ContentB64: b64("data")}, + }, + }) + require.NoError(t, err) // handler never returns a hard error; rejection is in the per-file status + require.Len(t, result.Files, 1) + assert.False(t, result.Files[0].Success, "expected rejection for %q", tc.relPath) + assert.NotEmpty(t, result.Files[0].Error) + }) + } +} + +// TestValidateKnowledgeRelPath exercises the validation helper directly for +// thorough coverage of edge cases. +func TestValidateKnowledgeRelPath(t *testing.T) { + valid := []string{ + "DUTY.md", "runbook-api.md", "README.txt", "a", + } + for _, p := range valid { + assert.NoError(t, validateKnowledgeRelPath(p), "should be valid: %q", p) + } + + invalid := []string{ + "", "..", "a/b", `a\b`, ".hidden", sentinelName, "foo/../bar", + // These are now valid because ".." only matches the bare token, not + // substrings — "foo..bar" is a legitimate flat filename. + } + validButPreviouslyRejected := []string{"foo..bar", "v2..md"} + for _, p := range validButPreviouslyRejected { + assert.NoError(t, validateKnowledgeRelPath(p), "should be valid (double-dot in middle): %q", p) + } + for _, p := range invalid { + assert.Error(t, validateKnowledgeRelPath(p), "should be invalid: %q", p) + } +} diff --git a/workspace/large_output.go b/environment/large_output.go similarity index 96% rename from workspace/large_output.go rename to environment/large_output.go index f2dc568..2b8282b 100644 --- a/workspace/large_output.go +++ b/environment/large_output.go @@ -1,4 +1,4 @@ -package workspace +package environment import ( "context" @@ -40,11 +40,11 @@ func DefaultLargeOutputConfig() LargeOutputConfig { // LargeOutputProcessor handles large output truncation and file storage. type LargeOutputProcessor struct { config LargeOutputConfig - ws *Workspace + ws *Environment } // NewLargeOutputProcessor creates a new processor with the given workspace. -func NewLargeOutputProcessor(ws *Workspace, config LargeOutputConfig) *LargeOutputProcessor { +func NewLargeOutputProcessor(ws *Environment, config LargeOutputConfig) *LargeOutputProcessor { if config.MaxOutputSize <= 0 { config.MaxOutputSize = DefaultMaxOutputSize } diff --git a/workspace/webfetch.go b/environment/webfetch.go similarity index 93% rename from workspace/webfetch.go rename to environment/webfetch.go index a4148d4..6217e40 100644 --- a/workspace/webfetch.go +++ b/environment/webfetch.go @@ -1,4 +1,4 @@ -package workspace +package environment import ( "context" @@ -22,7 +22,7 @@ const ( ) // WebFetch fetches content from a URL and converts it to readable format. -func (w *Workspace) WebFetch(ctx context.Context, args *protocol.WebFetchArgs) (*protocol.WebFetchResult, error) { +func (e *Environment) WebFetch(ctx context.Context, args *protocol.WebFetchArgs) (*protocol.WebFetchResult, error) { if args.URL == "" || (!strings.HasPrefix(args.URL, "http://") && !strings.HasPrefix(args.URL, "https://")) { return nil, fmt.Errorf("valid http/https url is required") } @@ -40,7 +40,7 @@ func (w *Workspace) WebFetch(ctx context.Context, args *protocol.WebFetchArgs) ( format = "markdown" } - resp, err := w.fetchURL(ctx, args.URL, format, timeout) + resp, err := e.fetchURL(ctx, args.URL, format, timeout) if err != nil { return nil, err } @@ -54,7 +54,7 @@ func (w *Workspace) WebFetch(ctx context.Context, args *protocol.WebFetchArgs) ( } content := convertContent(string(body), format, resp.Header.Get("Content-Type")) - processor := NewLargeOutputProcessor(w, DefaultLargeOutputConfig()) + processor := NewLargeOutputProcessor(e, DefaultLargeOutputConfig()) processed, err := processor.Process(ctx, content, "webfetch") if err != nil { return nil, err @@ -70,7 +70,7 @@ func (w *Workspace) WebFetch(ctx context.Context, args *protocol.WebFetchArgs) ( } // fetchURL performs the HTTP request. -func (w *Workspace) fetchURL(ctx context.Context, url, format string, timeout time.Duration) (*http.Response, error) { +func (e *Environment) fetchURL(ctx context.Context, url, format string, timeout time.Duration) (*http.Response, error) { httpCtx, cancel := context.WithTimeout(ctx, timeout) defer cancel() diff --git a/protocol/messages.go b/protocol/messages.go index 44f233a..c0f936d 100644 --- a/protocol/messages.go +++ b/protocol/messages.go @@ -55,21 +55,21 @@ func generateMessageID() string { } // WelcomePayload is the payload for the welcome message sent by server after connection. -// Contains worknode info (name, labels) managed via Web UI. +// Contains environment info (name, labels) managed via Web UI. type WelcomePayload struct { - WorknodeID string `json:"worknode_id"` - Name string `json:"name"` - Labels []string `json:"labels"` + EnvironmentID string `json:"environment_id"` + Name string `json:"name"` + Labels []string `json:"labels"` } // HeartbeatPayload is the payload for heartbeat messages. type HeartbeatPayload struct { - WorknodeID string `json:"worknode_id"` - Name string `json:"name"` - Labels []string `json:"labels"` - Version string `json:"version"` - Environment *EnvironmentInfo `json:"environment,omitempty"` - Metrics *HeartbeatMetrics `json:"metrics,omitempty"` + EnvironmentID string `json:"environment_id"` + Name string `json:"name"` + Labels []string `json:"labels"` + Version string `json:"version"` + Environment *EnvironmentInfo `json:"environment,omitempty"` + Metrics *HeartbeatMetrics `json:"metrics,omitempty"` } // EnvironmentInfo contains detailed environment information for LLM context. @@ -101,16 +101,18 @@ type HeartbeatMetrics struct { type TaskOperation string const ( - TaskOpRead TaskOperation = "read" - TaskOpWrite TaskOperation = "write" - TaskOpList TaskOperation = "list" - TaskOpGlob TaskOperation = "glob" - TaskOpGrep TaskOperation = "grep" - TaskOpBash TaskOperation = "bash" - TaskOpWebFetch TaskOperation = "webfetch" - TaskOpMCPCall TaskOperation = "mcp_call" - TaskOpMCPListTools TaskOperation = "mcp_list_tools" - TaskOpSyncSkill TaskOperation = "sync_skill" + TaskOpRead TaskOperation = "read" + TaskOpWrite TaskOperation = "write" + TaskOpList TaskOperation = "list" + TaskOpGlob TaskOperation = "glob" + TaskOpGrep TaskOperation = "grep" + TaskOpBash TaskOperation = "bash" + TaskOpWebFetch TaskOperation = "webfetch" + TaskOpMCPCall TaskOperation = "mcp_call" + TaskOpMCPListTools TaskOperation = "mcp_list_tools" + TaskOpSyncSkill TaskOperation = "sync_skill" + TaskOpStageKnowledgeFiles TaskOperation = "stage_knowledge_files" + TaskOpDeleteKnowledgeFiles TaskOperation = "delete_knowledge_files" ) // TaskRequestPayload is the payload for task request messages. @@ -323,3 +325,37 @@ type MCPResultPayload struct { Result json.RawMessage `json:"result,omitempty"` Error string `json:"error,omitempty"` } + +// KnowledgeFile is a single file entry in a stage_knowledge_files request. +type KnowledgeFile struct { + RelPath string `json:"rel_path"` + Checksum string `json:"checksum"` + ContentB64 string `json:"content_b64"` +} + +// StageKnowledgeFilesArgs are the arguments for stage_knowledge_files operation. +type StageKnowledgeFilesArgs struct { + Files []KnowledgeFile `json:"files"` +} + +// KnowledgeFileStatus is the per-file result entry in the stage ack. +type KnowledgeFileStatus struct { + RelPath string `json:"rel_path"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` +} + +// StageKnowledgeFilesResult is the result of a stage_knowledge_files operation. +type StageKnowledgeFilesResult struct { + Files []KnowledgeFileStatus `json:"files"` +} + +// DeleteKnowledgeFilesArgs are the arguments for delete_knowledge_files operation. +type DeleteKnowledgeFilesArgs struct { + RelPaths []string `json:"rel_paths"` +} + +// DeleteKnowledgeFilesResult is the result of a delete_knowledge_files operation. +type DeleteKnowledgeFilesResult struct { + Success bool `json:"success"` +} diff --git a/ws/client.go b/ws/client.go index a117b2b..c3dbf49 100644 --- a/ws/client.go +++ b/ws/client.go @@ -63,10 +63,10 @@ type Client struct { doneCh chan struct{} sendCh chan *protocol.Message - // Worknode info from welcome message - worknodeID string - name string - labels []string + // Environment info from welcome message + environmentID string + name string + labels []string } // NewClient creates a new WebSocket client. @@ -117,13 +117,13 @@ func (c *Client) Connect(ctx context.Context) error { c.conn = conn c.mu.Unlock() - // Read welcome message to get worknode info (name, labels) + // Read welcome message to get environment info (name, labels) if err := c.readWelcomeMessage(); err != nil { slog.Warn("failed to read welcome message", "error", err) } slog.Info("connected to Flashduty", - "worknode_id", c.worknodeID, + "environment_id", c.environmentID, "name", c.name, "labels", c.labels, ) @@ -131,7 +131,7 @@ func (c *Client) Connect(ctx context.Context) error { return nil } -// readWelcomeMessage reads the initial welcome message containing worknode info. +// readWelcomeMessage reads the initial welcome message containing environment info. func (c *Client) readWelcomeMessage() error { c.mu.Lock() conn := c.conn @@ -166,7 +166,7 @@ func (c *Client) readWelcomeMessage() error { return fmt.Errorf("failed to parse welcome payload: %w", err) } - c.worknodeID = welcome.WorknodeID + c.environmentID = welcome.EnvironmentID c.name = welcome.Name c.labels = welcome.Labels @@ -242,7 +242,7 @@ func (c *Client) RunWithReconnect(ctx context.Context) error { // Run (blocking until disconnect) if err := c.Run(ctx); err != nil { slog.Warn("connection lost, will reconnect", - "worknode_id", c.worknodeID, + "environment_id", c.environmentID, "error", err, ) } @@ -301,17 +301,17 @@ func (c *Client) Close() error { return nil } -// WorknodeID returns the worknode ID assigned by Flashduty. -func (c *Client) WorknodeID() string { - return c.worknodeID +// EnvironmentID returns the environment ID assigned by Flashduty. +func (c *Client) EnvironmentID() string { + return c.environmentID } -// Name returns the worknode name from welcome message. +// Name returns the environment name from welcome message. func (c *Client) Name() string { return c.name } -// Labels returns the worknode labels from welcome message. +// Labels returns the environment labels from welcome message. func (c *Client) Labels() []string { return c.labels } @@ -437,10 +437,10 @@ func (c *Client) heartbeatLoop(ctx context.Context) { func (c *Client) sendHeartbeat() { payload := protocol.HeartbeatPayload{ - WorknodeID: c.worknodeID, - Name: c.name, - Labels: c.labels, - Version: c.version, + EnvironmentID: c.environmentID, + Name: c.name, + Labels: c.labels, + Version: c.version, } // Only send environment info on first heartbeat after connection diff --git a/ws/handler.go b/ws/handler.go index 95426e5..428ae5f 100644 --- a/ws/handler.go +++ b/ws/handler.go @@ -8,13 +8,13 @@ import ( "sync" "time" + "github.com/flashcatcloud/flashduty-runner/environment" "github.com/flashcatcloud/flashduty-runner/protocol" - "github.com/flashcatcloud/flashduty-runner/workspace" ) // Handler handles incoming WebSocket messages. type Handler struct { - ws *workspace.Workspace + ws *environment.Environment client *Client // Track running tasks for cancellation and graceful shutdown @@ -24,7 +24,7 @@ type Handler struct { } // NewHandler creates a new message handler. -func NewHandler(ws *workspace.Workspace) *Handler { +func NewHandler(ws *environment.Environment) *Handler { return &Handler{ ws: ws, runningTask: make(map[string]context.CancelFunc), @@ -235,6 +235,22 @@ func (h *Handler) executeTask(ctx context.Context, req *protocol.TaskRequestPayl logger.Info("syncing skill", "skill", args.SkillName, "dir", args.SkillDir) return h.ws.SyncSkill(ctx, args) + case protocol.TaskOpStageKnowledgeFiles: + args, err := parseArgs[protocol.StageKnowledgeFilesArgs](req.Args) + if err != nil { + return nil, fmt.Errorf("invalid stage_knowledge_files args: %w", err) + } + logger.Info("staging knowledge files", "count", len(args.Files)) + return h.ws.StageKnowledgeFiles(ctx, args) + + case protocol.TaskOpDeleteKnowledgeFiles: + args, err := parseArgs[protocol.DeleteKnowledgeFilesArgs](req.Args) + if err != nil { + return nil, fmt.Errorf("invalid delete_knowledge_files args: %w", err) + } + logger.Info("deleting knowledge files", "count", len(args.RelPaths)) + return h.ws.DeleteKnowledgeFiles(ctx, args) + default: return nil, fmt.Errorf("unknown operation: %s", req.Operation) }