diff --git a/internal/functions/deploy/bundle.go b/internal/functions/deploy/bundle.go index f476bd31c..24eccb773 100644 --- a/internal/functions/deploy/bundle.go +++ b/internal/functions/deploy/bundle.go @@ -122,18 +122,23 @@ func GetBindMounts(cwd, hostFuncDir, hostOutputDir, hostEntrypointPath, hostImpo binds = append(binds, hostOutputDir+":"+dockerOutputDir+":rw") } } + + // Get all paths and their bind mounts // Imports outside of ./supabase/functions will be bound by walking the entrypoint - modules, err := utils.BindHostModules(cwd, hostEntrypointPath, hostImportMapPath, fsys) + functionPaths, err := utils.BindHostModules(cwd, hostEntrypointPath, hostImportMapPath, fsys) if err != nil { return nil, err } + + // Add bind mounts that aren't already covered by the functions directory or output directory // Remove any duplicate mount points - for _, mod := range modules { - hostPath := strings.Split(mod, ":")[0] + for _, bind := range functionPaths.Binds { + hostPath := strings.Split(bind, ":")[0] if !strings.HasPrefix(hostPath, hostFuncDir) && (len(hostOutputDir) == 0 || !strings.HasPrefix(hostPath, hostOutputDir)) { - binds = append(binds, mod) + binds = append(binds, bind) } } + return binds, nil } diff --git a/internal/functions/serve/edge_function_watcher.go b/internal/functions/serve/edge_function_watcher.go new file mode 100644 index 000000000..12680ce62 --- /dev/null +++ b/internal/functions/serve/edge_function_watcher.go @@ -0,0 +1,308 @@ +package serve + +import ( + "context" + "os" + "path/filepath" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/functions/deploy" + "github.com/supabase/cli/internal/utils" +) + +const ( + // Default debounce duration for edge function file changes + DefaultEdgeFunctionDebounceDuration = 500 * time.Millisecond +) + +var ( + // Directories to ignore for edge functions. + edgeFunctionIgnoredDirNames = []string{ + ".git", + "node_modules", + ".vscode", + ".idea", + ".DS_Store", + "vendor", + } + + // Patterns for ignoring file events in edge functions. + edgeFunctionIgnoredFilePatterns = []struct { + Prefix string // File basename prefix + Suffix string // File basename suffix + ExactMatch string // File basename exact match + Op fsnotify.Op // Specific operation to ignore for this pattern (0 for any op) + }{ + {Suffix: "~"}, // Common backup files (e.g., emacs, gedit) + {Prefix: ".", Suffix: ".swp"}, // Vim swap files + {Prefix: ".", Suffix: ".swx"}, // Vim swap files (extended) + {Prefix: "___", Suffix: "___"}, // Deno deploy/bundle temporary files often look like _________ + {Prefix: "___"}, // Some other editor temp files might start with this + {Suffix: ".tmp"}, // Generic temp files + {Prefix: ".#"}, // Emacs lock files + {Suffix: "___", Op: fsnotify.Chmod}, // Deno specific temp file pattern during write (often involves a chmod) + } +) + +// EdgeFunctionWatcher provides file watching specifically for edge functions +type EdgeFunctionWatcher struct { + genericWatcher *GenericFileWatcher + fsys afero.Fs +} + +// NewEdgeFunctionWatcher creates a new edge function watcher +func NewEdgeFunctionWatcher(fsys afero.Fs) (*EdgeFunctionWatcher, error) { + config := GenericFileWatcherConfig{ + DebounceDuration: DefaultEdgeFunctionDebounceDuration, + IgnoreFunc: isIgnoredEdgeFunctionFileEvent, + DirIgnoreFunc: isIgnoredEdgeFunctionDir, + SignificantEventFunc: isSignificantEdgeFunctionEvent, + } + + genericWatcher, err := NewGenericFileWatcher(config) + if err != nil { + return nil, err + } + + return &EdgeFunctionWatcher{ + genericWatcher: genericWatcher, + fsys: fsys, + }, nil +} + +// Watch starts watching for edge function file changes and returns channels for restart signals and errors +func (efw *EdgeFunctionWatcher) Watch(ctx context.Context) (<-chan struct{}, <-chan error) { + // Calculate and set initial watch targets + if err := efw.UpdateWatchTargets(); err != nil { + // Return error channel with the error + errorChan := make(chan error, 1) + errorChan <- err + return nil, errorChan + } + + // Start watching + restartChan, errorChan := efw.genericWatcher.Watch(ctx) + + // Create a new channel to handle refresh logic + wrappedRestartChan := make(chan struct{}) + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-restartChan: + // Refresh watch targets to catch any new dependencies + if err := efw.UpdateWatchTargets(); err != nil { + utils.Warning("could not update watch targets: %v", err) + } + wrappedRestartChan <- struct{}{} + } + } + }() + + return wrappedRestartChan, errorChan +} + +// Close closes the edge function watcher +func (efw *EdgeFunctionWatcher) Close() error { + return efw.genericWatcher.Close() +} + +// UpdateWatchTargets calculates which files and directories should be watched for edge functions +func (efw *EdgeFunctionWatcher) UpdateWatchTargets() error { + var targets []WatchTarget + + // Always try to watch the functions directory if it exists + functionsDir := utils.FunctionsDir + absFunctionsPath := functionsDir + + if filepath.IsAbs(functionsDir) { + absFunctionsPath = functionsDir + } else { + if utils.CurrentDirAbs != "" { + absFunctionsPath = filepath.Join(utils.CurrentDirAbs, functionsDir) + } else { + cwd, err := os.Getwd() + if err != nil { + utils.Warning("could not get current working directory: %v", err) + } else { + absFunctionsPath = filepath.Join(cwd, functionsDir) + } + } + } + absFunctionsPath = filepath.Clean(absFunctionsPath) + + // Add functions directory if it exists - this will recursively watch subdirectories + if _, err := os.Stat(absFunctionsPath); err == nil { + targets = append(targets, WatchTarget{ + Path: absFunctionsPath, + IsFile: false, + }) + utils.Info(1, "Added functions directory to watch targets: %s", absFunctionsPath) + + // Add all subdirectories within the functions directory for recursive watching + err := filepath.Walk(absFunctionsPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil // Continue walking even if there's an error + } + + if info.IsDir() && path != absFunctionsPath { + // Check if this directory should be ignored + if !isIgnoredEdgeFunctionDir(filepath.Base(path), absFunctionsPath, path) { + targets = append(targets, WatchTarget{ + Path: path, + IsFile: false, + }) + utils.Info(1, "Added subdirectory to watch targets: %s", path) + } + } + return nil + }) + if err != nil { + utils.Warning("could not walk functions directory: %v", err) + } + } + + // Always try to watch the config.toml file if it exists + configPath := utils.ConfigPath + if !filepath.IsAbs(configPath) { + if utils.CurrentDirAbs != "" { + configPath = filepath.Join(utils.CurrentDirAbs, utils.ConfigPath) + } else { + cwd, err := os.Getwd() + if err != nil { + utils.Warning("could not get current working directory: %v", err) + } else { + configPath = filepath.Join(cwd, utils.ConfigPath) + } + } + } + configPath = filepath.Clean(configPath) + + // Add config file if it exists + if _, err := os.Stat(configPath); err == nil { + targets = append(targets, WatchTarget{ + Path: configPath, + IsFile: true, + }) + } + + // Add import dependencies from function configurations + slugs, err := deploy.GetFunctionSlugs(efw.fsys) + if err != nil { + utils.Warning("could not get function slugs: %v", err) + } else { + functionsConfig, err := deploy.GetFunctionConfig(slugs, "", nil, efw.fsys) + if err != nil { + utils.Warning("could not get function config: %v", err) + } else { + // Add directories from import dependencies + dependencyDirs := make(map[string]bool) + + for _, fc := range functionsConfig { + if !fc.Enabled { + continue + } + + modulePaths, err := utils.BindHostModules(utils.CurrentDirAbs, fc.Entrypoint, fc.ImportMap, efw.fsys) + if err != nil { + utils.Warning("could not get function paths: %v", err) + continue + } + + for _, path := range modulePaths.Paths { + // Get the directory containing the path + dir := filepath.Dir(path) + dependencyDirs[dir] = true + } + } + + // Add unique dependency directories + for dir := range dependencyDirs { + // Only add if not already covered by functions directory or its subdirectories + isAlreadyCovered := false + if strings.HasPrefix(dir, absFunctionsPath) { + isAlreadyCovered = true + } + + if !isAlreadyCovered { + targets = append(targets, WatchTarget{ + Path: dir, + IsFile: false, + }) + utils.Info(1, "Added dependency directory to watch targets: %s", dir) + } + } + } + } + + // Set the watch targets + return efw.genericWatcher.SetWatchTargets(targets) +} + +// isIgnoredEdgeFunctionDir checks if a directory should be ignored by the edge function watcher +func isIgnoredEdgeFunctionDir(dirName string, rootWatchedPath string, currentPath string) bool { + // Never ignore the root watched directory itself, even if it's a dot-directory + if filepath.Clean(currentPath) == filepath.Clean(rootWatchedPath) { + return false + } + + for _, ignoredName := range edgeFunctionIgnoredDirNames { + if dirName == ignoredName { + return true + } + } + + // By default, ignore all directories starting with a "." (dot-directories) + // unless it's the root path (already handled) or "." and ".." which are not actual directory names from Walk + if strings.HasPrefix(dirName, ".") && dirName != "." && dirName != ".." { + return true + } + + return false +} + +// isIgnoredEdgeFunctionFileEvent checks if a file event should be ignored based on edge function patterns +func isIgnoredEdgeFunctionFileEvent(eventName string, eventOp fsnotify.Op) bool { + baseName := filepath.Base(eventName) + for _, p := range edgeFunctionIgnoredFilePatterns { + match := false + if p.ExactMatch != "" && baseName == p.ExactMatch { + match = true + } else { + // Check prefix if specified + prefixMatch := p.Prefix == "" || strings.HasPrefix(baseName, p.Prefix) + // Check suffix if specified + suffixMatch := p.Suffix == "" || strings.HasSuffix(baseName, p.Suffix) + + // Both prefix and suffix must match + if p.Prefix != "" && p.Suffix != "" { + match = prefixMatch && suffixMatch + // Only prefix specified + } else if p.Prefix != "" { + match = prefixMatch + // Only suffix specified + } else if p.Suffix != "" { + match = suffixMatch + } + } + + if match { + // If Op is 0, it means the pattern applies to any operation. + // Otherwise, check if the event's operation is relevant to the pattern's Op. + if p.Op == 0 || (eventOp&p.Op) != 0 { + return true + } + } + } + return false +} + +// isSignificantEdgeFunctionEvent determines if an event should trigger a restart for edge functions +func isSignificantEdgeFunctionEvent(event fsnotify.Event) bool { + return event.Has(fsnotify.Write) || event.Has(fsnotify.Create) || event.Has(fsnotify.Remove) || event.Has(fsnotify.Rename) +} diff --git a/internal/functions/serve/edge_function_watcher_test.go b/internal/functions/serve/edge_function_watcher_test.go new file mode 100644 index 000000000..60bc6a7d9 --- /dev/null +++ b/internal/functions/serve/edge_function_watcher_test.go @@ -0,0 +1,792 @@ +package serve + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/supabase/cli/internal/utils" +) + +func TestNewEdgeFunctionWatcher(t *testing.T) { + fsys := afero.NewMemMapFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + require.NotNil(t, watcher) + defer watcher.Close() + + assert.NotNil(t, watcher.genericWatcher) + assert.Equal(t, fsys, watcher.fsys) +} + +func TestIsIgnoredEdgeFunctionDir(t *testing.T) { + rootPath := "/home/project/supabase/functions" + + testCases := []struct { + name string + dirName string + currentPath string + expected bool + }{ + {"never ignores root directory", "functions", rootPath, false}, + {"ignores git directory", ".git", filepath.Join(rootPath, ".git"), true}, + {"ignores node_modules", "node_modules", filepath.Join(rootPath, "node_modules"), true}, + {"ignores vscode directory", ".vscode", filepath.Join(rootPath, ".vscode"), true}, + {"allows normal directories", "src", filepath.Join(rootPath, "src"), false}, + {"allows function directories", "my-function", filepath.Join(rootPath, "my-function"), false}, + {"ignores dot directories", ".cache", filepath.Join(rootPath, ".cache"), true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := isIgnoredEdgeFunctionDir(tc.dirName, rootPath, tc.currentPath) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestIsIgnoredEdgeFunctionFileEvent(t *testing.T) { + testCases := []struct { + name string + filename string + op fsnotify.Op + expected bool + }{ + // Regular files that should not be ignored + {"TypeScript file", "index.ts", fsnotify.Write, false}, + {"JavaScript file", "function.js", fsnotify.Create, false}, + {"JSON config", "config.json", fsnotify.Write, false}, + {"TOML config", "config.toml", fsnotify.Write, false}, + + // Editor files that should be ignored + {"Vim backup", "file.txt~", fsnotify.Write, true}, + {"Vim swap", ".file.swp", fsnotify.Create, true}, + {"Emacs lock", ".#file.txt", fsnotify.Create, true}, + {"Temp file", "file.tmp", fsnotify.Write, true}, + + // Deno temporary files + {"Deno bundle", "___deno_bundle_123___", fsnotify.Create, true}, + {"Deno temp", "___temp_file___", fsnotify.Write, true}, + + // Special operation cases + {"CHMOD on underscore file", "file___", fsnotify.Chmod, true}, + {"Write on underscore file", "file___", fsnotify.Write, false}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := isIgnoredEdgeFunctionFileEvent(tc.filename, tc.op) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestEdgeFunctionWatcher_UpdateWatchTargets(t *testing.T) { + t.Run("watches functions directory when it exists", func(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Create a test function + testFunc := filepath.Join(functionsDir, "hello", "index.ts") + require.NoError(t, os.MkdirAll(filepath.Dir(testFunc), 0755)) + require.NoError(t, os.WriteFile(testFunc, []byte("export default () => new Response('hello')"), 0644)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + err = watcher.UpdateWatchTargets() + assert.NoError(t, err) + + // Should have the functions directory as a target + found := false + for _, target := range watcher.genericWatcher.watchTargets { + if target.Path == functionsDir && !target.IsFile { + found = true + break + } + } + assert.True(t, found, "functions directory should be in watch targets") + }) + + t.Run("watches config.toml file when it exists", func(t *testing.T) { + tempDir := t.TempDir() + configFile := filepath.Join(tempDir, "config.toml") + require.NoError(t, os.WriteFile(configFile, []byte("[api]\nport = 54321"), 0644)) + + // Temporarily set utils.ConfigPath + originalConfigPath := utils.ConfigPath + utils.ConfigPath = configFile + defer func() { utils.ConfigPath = originalConfigPath }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + err = watcher.UpdateWatchTargets() + assert.NoError(t, err) + + // Should have the config file as a target + found := false + for _, target := range watcher.genericWatcher.watchTargets { + if target.Path == configFile && target.IsFile { + found = true + break + } + } + assert.True(t, found, "config.toml file should be in watch targets") + }) + + t.Run("handles missing directories gracefully", func(t *testing.T) { + tempDir := t.TempDir() + nonExistentDir := filepath.Join(tempDir, "nonexistent") + + // Set paths to non-existent locations + originalFunctionsDir := utils.FunctionsDir + originalConfigPath := utils.ConfigPath + utils.FunctionsDir = nonExistentDir + utils.ConfigPath = filepath.Join(nonExistentDir, "config.toml") + defer func() { + utils.FunctionsDir = originalFunctionsDir + utils.ConfigPath = originalConfigPath + }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + err = watcher.UpdateWatchTargets() + assert.NoError(t, err) + + // Should have no targets since directories don't exist + assert.Empty(t, watcher.genericWatcher.watchTargets) + }) +} + +func TestEdgeFunctionWatcher_Watch_FunctionsDirectory(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Create a new function file + testFunc := filepath.Join(functionsDir, "test", "index.ts") + require.NoError(t, os.MkdirAll(filepath.Dir(testFunc), 0755)) + require.NoError(t, os.WriteFile(testFunc, []byte("export default () => new Response('test')"), 0644)) + + // Should receive a restart signal + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestEdgeFunctionWatcher_Watch_ConfigFile(t *testing.T) { + tempDir := t.TempDir() + configFile := filepath.Join(tempDir, "config.toml") + require.NoError(t, os.WriteFile(configFile, []byte("[api]\nport = 54321"), 0644)) + + // Temporarily set utils.ConfigPath + originalConfigPath := utils.ConfigPath + utils.ConfigPath = configFile + defer func() { utils.ConfigPath = originalConfigPath }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Modify the config file + require.NoError(t, os.WriteFile(configFile, []byte("[api]\nport = 55555"), 0644)) + + // Should receive a restart signal + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestEdgeFunctionWatcher_Watch_IgnoredFiles(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Create an ignored file (should not trigger restart) + ignoredFile := filepath.Join(functionsDir, "test.tmp") + require.NoError(t, os.WriteFile(ignoredFile, []byte("ignored"), 0644)) + + // Should NOT receive a restart signal + select { + case <-restartChan: + t.Fatal("unexpected restart signal for ignored file") + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(200 * time.Millisecond): + // Success - no signal received + } + + // Now create a normal file (should trigger restart) + normalFile := filepath.Join(functionsDir, "test.ts") + require.NoError(t, os.WriteFile(normalFile, []byte("export default () => new Response('test')"), 0644)) + + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestEdgeFunctionWatcher_Close(t *testing.T) { + fsys := afero.NewMemMapFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + + err = watcher.Close() + assert.NoError(t, err) + + // Closing again should not cause an error + err = watcher.Close() + assert.NoError(t, err) +} +func TestEdgeFunctionWatcher_Integration_IgnoreNonValidFiles(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + // Create function directory + funcDir := filepath.Join(functionsDir, "test") + require.NoError(t, os.MkdirAll(funcDir, 0755)) + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Give watcher time to initialize + time.Sleep(100 * time.Millisecond) + + // Create and modify files that should not trigger reloads + ignoredFiles := []string{ + filepath.Join(funcDir, "test.tmp"), // temp file + filepath.Join(funcDir, ".test.swp"), // vim swap + filepath.Join(funcDir, "test~"), // backup + filepath.Join(funcDir, "___deno_temp___"), // deno temp + } + + for _, ignoredFile := range ignoredFiles { + require.NoError(t, os.WriteFile(ignoredFile, []byte("test content"), 0600)) + time.Sleep(50 * time.Millisecond) + } + + // Wait for debounce period to ensure ignored files don't trigger restarts + time.Sleep(600 * time.Millisecond) + + // Should not receive any restart signals from ignored files + select { + case <-restartChan: + t.Error("Received unexpected restart signal from ignored file") + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(100 * time.Millisecond): + // Expected - no restart for ignored files + } +} + +func TestEdgeFunctionWatcher_Integration_TransitiveDependencies(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + // Create the shared directory structure + sharedDir := filepath.Join(tempDir, "shared") + require.NoError(t, os.MkdirAll(sharedDir, 0755)) + + // Create the deepest dependency: another.ts + anotherFile := filepath.Join(sharedDir, "another.ts") + require.NoError(t, os.WriteFile(anotherFile, []byte(`export const another = "original value";`), 0600)) + + // Create the intermediate dependency: test.ts (imports from another.ts) + testFile := filepath.Join(sharedDir, "test.ts") + require.NoError(t, os.WriteFile(testFile, []byte("import { another } from './another.ts';\nexport const value = `some ${another}`;"), 0600)) + + // Create the function: index.ts (imports from test.ts) + funcDir := filepath.Join(functionsDir, "transitive") + require.NoError(t, os.MkdirAll(funcDir, 0755)) + funcFile := filepath.Join(funcDir, "index.ts") + require.NoError(t, os.WriteFile(funcFile, []byte(`import { value } from '../../shared/test.ts'; + +export default async (): Promise => { + console.log(value); + return new Response(value); +};`), 0600)) + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Give watcher time to initialize and process the dependency chain + time.Sleep(200 * time.Millisecond) + + // Modify another.ts (the deepest dependency) + require.NoError(t, os.WriteFile(anotherFile, []byte(`export const another = "updated value";`), 0600)) + + // Wait for restart signal - this tests true transitive dependency tracking + select { + case <-restartChan: + // Expected - change to transitive dependency should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(2 * time.Second): + t.Error("Expected restart signal after modifying transitive dependency (another.ts)") + } +} + +func TestEdgeFunctionWatcher_Integration_FunctionLifecycle(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Give watcher time to initialize + time.Sleep(100 * time.Millisecond) + + // Test 1: Add new function + newFuncDir := filepath.Join(functionsDir, "new-function") + require.NoError(t, os.MkdirAll(newFuncDir, 0755)) + newFuncFile := filepath.Join(newFuncDir, "index.ts") + require.NoError(t, os.WriteFile(newFuncFile, []byte("export default () => new Response('new')"), 0600)) + + // Wait for restart signal + select { + case <-restartChan: + // Expected - new function should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after adding new function") + } + + // Test 2: Update existing function + require.NoError(t, os.WriteFile(newFuncFile, []byte("export default () => new Response('updated')"), 0600)) + + // Wait for restart signal + select { + case <-restartChan: + // Expected - update should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after updating function") + } + + // Test 3: Remove function + require.NoError(t, os.RemoveAll(newFuncDir)) + + // Wait for restart signal + select { + case <-restartChan: + // Expected - removal should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after removing function") + } +} + +func TestEdgeFunctionWatcher_Integration_ComplexMultiLevelDependencies(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + // Create multiple shared directories + dirs := []string{ + filepath.Join(tempDir, "shared", "types"), + filepath.Join(tempDir, "shared", "constants"), + filepath.Join(tempDir, "lib", "database"), + } + + for _, dir := range dirs { + require.NoError(t, os.MkdirAll(dir, 0755)) + } + + // Create shared files + typesFile := filepath.Join(tempDir, "shared", "types", "index.ts") + require.NoError(t, os.WriteFile(typesFile, []byte(` +export interface User { + id: string; + name: string; +} +`), 0600)) + + constantsFile := filepath.Join(tempDir, "shared", "constants", "api.ts") + require.NoError(t, os.WriteFile(constantsFile, []byte(` +export const API_BASE_URL = "https://api.example.com"; +`), 0600)) + + databaseFile := filepath.Join(tempDir, "lib", "database", "client.ts") + require.NoError(t, os.WriteFile(databaseFile, []byte(` +export function connectDB() { + return "connected"; +} +`), 0600)) + + // Create a function with multiple imports + funcDir := filepath.Join(functionsDir, "complex") + require.NoError(t, os.MkdirAll(funcDir, 0755)) + funcFile := filepath.Join(funcDir, "index.ts") + + require.NoError(t, os.WriteFile(funcFile, []byte(` +import { User } from "../../shared/types/index.ts"; +import { API_BASE_URL } from "../../shared/constants/api.ts"; +import { connectDB } from "../../lib/database/client.ts"; + +export default async (): Promise => { + const db = connectDB(); + const user: User = { id: "1", name: "Test" }; + + return new Response(JSON.stringify({ user, apiUrl: API_BASE_URL, db }), { + headers: { 'Content-Type': 'application/json' } + }); +}; +`), 0600)) + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Give watcher time to initialize + time.Sleep(200 * time.Millisecond) + + // Test 1: Modify types file + require.NoError(t, os.WriteFile(typesFile, []byte(` +export interface User { + id: string; + name: string; + email?: string; +} +`), 0600)) + + // Wait for restart signal + select { + case <-restartChan: + // Expected - types change should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after modifying types file") + } + + // Test 2: Modify constants file + require.NoError(t, os.WriteFile(constantsFile, []byte(` +export const API_BASE_URL = "https://new-api.example.com"; +`), 0600)) + + // Wait for restart signal + select { + case <-restartChan: + // Expected - constants change should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after modifying constants file") + } + + // Test 3: Modify database file + require.NoError(t, os.WriteFile(databaseFile, []byte(` +export function connectDB() { + return "new connection"; +} +`), 0600)) + + // Wait for restart signal + select { + case <-restartChan: + // Expected - database change should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after modifying database file") + } +} + +func TestEdgeFunctionWatcher_Integration_WatcherErrorHandling(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + // Create a function with invalid imports + funcDir := filepath.Join(functionsDir, "test") + require.NoError(t, os.MkdirAll(funcDir, 0755)) + funcFile := filepath.Join(funcDir, "index.ts") + require.NoError(t, os.WriteFile(funcFile, []byte(` +import { something } from "/invalid/path"; +export default () => new Response("test"); +`), 0600)) + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Give watcher time to initialize + time.Sleep(100 * time.Millisecond) + + // Modify the function with invalid import + require.NoError(t, os.WriteFile(funcFile, []byte(` +import { something } from "/another/invalid/path"; +export default () => new Response("test"); +`), 0600)) + + // Wait for restart signal - should still work despite invalid imports + select { + case <-restartChan: + // Expected - file change should still trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + t.Error("Expected restart signal after modifying function with invalid imports") + } +} + +func TestEdgeFunctionWatcher_Integration_DependencyRemoval(t *testing.T) { + tempDir := t.TempDir() + functionsDir := filepath.Join(tempDir, "functions") + require.NoError(t, os.MkdirAll(functionsDir, 0755)) + + // Temporarily set utils.FunctionsDir + originalFunctionsDir := utils.FunctionsDir + utils.FunctionsDir = functionsDir + defer func() { utils.FunctionsDir = originalFunctionsDir }() + + // Create dependency chain: function -> utils -> helpers + utilsDir := filepath.Join(tempDir, "lib", "utils") + helpersDir := filepath.Join(tempDir, "lib", "helpers") + require.NoError(t, os.MkdirAll(utilsDir, 0755)) + require.NoError(t, os.MkdirAll(helpersDir, 0755)) + + // Create helper file (deepest dependency) + helperFile := filepath.Join(helpersDir, "common.ts") + require.NoError(t, os.WriteFile(helperFile, []byte(`export const helper = "helper value";`), 0600)) + + // Create utils file that imports from helpers + utilsFile := filepath.Join(utilsDir, "index.ts") + require.NoError(t, os.WriteFile(utilsFile, []byte(` +import { helper } from "../helpers/common.ts"; +export const utilValue = `+"`util: ${helper}`;"+` +`), 0600)) + + // Create function that imports from utils (which transitively imports helpers) + funcDir := filepath.Join(functionsDir, "test") + require.NoError(t, os.MkdirAll(funcDir, 0755)) + funcFile := filepath.Join(funcDir, "index.ts") + require.NoError(t, os.WriteFile(funcFile, []byte(` +import { utilValue } from "../../lib/utils/index.ts"; + +export default async (): Promise => { + return new Response(utilValue); +}; +`), 0600)) + + fsys := afero.NewOsFs() + watcher, err := NewEdgeFunctionWatcher(fsys) + require.NoError(t, err) + defer watcher.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Give watcher time to initialize and discover dependencies + time.Sleep(500 * time.Millisecond) + + // Test 1: Verify that changes to helper file trigger restart (before removal) + require.NoError(t, os.WriteFile(helperFile, []byte(`export const helper = "initial test";`), 0600)) + + // Should trigger restart because helper is a transitive dependency + select { + case <-restartChan: + // Expected - helper change should trigger restart when it's a dependency + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(2 * time.Second): + t.Error("Expected restart signal from helper dependency change") + } + + // Clear any remaining restart signals + time.Sleep(100 * time.Millisecond) + for len(restartChan) > 0 { + <-restartChan + } + + // Test 2: Remove dependency chain by modifying function to not import utils + require.NoError(t, os.WriteFile(funcFile, []byte(` +export default async (): Promise => { + return new Response("No more dependencies!"); +}; +`), 0600)) + + // Wait for file change to be processed and dependencies to be updated + select { + case <-restartChan: + // Expected - file change should trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(2 * time.Second): + t.Error("Expected restart signal after removing dependencies") + } + + // Give the watcher time to process the change and clean up unused directories + time.Sleep(800 * time.Millisecond) + + // Clear any remaining restart signals + for len(restartChan) > 0 { + <-restartChan + } + + // Test 3: Verify that changes to helper no longer trigger restarts + require.NoError(t, os.WriteFile(helperFile, []byte(`export const helper = "should not trigger restart";`), 0600)) + + // Should NOT trigger restart because helper is no longer a dependency + select { + case <-restartChan: + t.Error("Should not receive restart signal from unused dependency") + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(1 * time.Second): + // Expected - no restart for unused dependencies + } + + // Test 4: Verify function changes still trigger restarts + require.NoError(t, os.WriteFile(funcFile, []byte(` +export default async (): Promise => { + return new Response("Function still works!"); +}; +`), 0600)) + + // Should trigger restart because function itself changed + select { + case <-restartChan: + // Expected - function change should still trigger restart + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(2 * time.Second): + t.Error("Expected restart signal from function change") + } +} diff --git a/internal/functions/serve/generic_watcher.go b/internal/functions/serve/generic_watcher.go new file mode 100644 index 000000000..64fb997d4 --- /dev/null +++ b/internal/functions/serve/generic_watcher.go @@ -0,0 +1,205 @@ +package serve + +import ( + "context" + "path/filepath" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/go-errors/errors" + "github.com/supabase/cli/internal/utils" +) + +// WatchTarget represents a file or directory to watch +type WatchTarget struct { + Path string + IsFile bool // true for files, false for directories +} + +// IgnoreFunc is a function type for determining if a file event should be ignored +type IgnoreFunc func(eventPath string, eventOp fsnotify.Op) bool + +// DirIgnoreFunc is a function type for determining if a directory should be ignored +type DirIgnoreFunc func(dirName string, rootWatchedPath string, currentPath string) bool + +// SignificantEventFunc is a function type for determining if an event should trigger a restart +type SignificantEventFunc func(event fsnotify.Event) bool + +// GenericFileWatcher provides a configurable file system watcher +type GenericFileWatcher struct { + watcher *fsnotify.Watcher + watchTargets []WatchTarget + watchedPaths map[string]bool + debounceDuration time.Duration + ignoreFunc IgnoreFunc + dirIgnoreFunc DirIgnoreFunc + significantEventFunc SignificantEventFunc + restartChan chan struct{} + errorChan chan error +} + +// GenericFileWatcherConfig holds configuration for the generic file watcher +type GenericFileWatcherConfig struct { + DebounceDuration time.Duration + IgnoreFunc IgnoreFunc + DirIgnoreFunc DirIgnoreFunc + SignificantEventFunc SignificantEventFunc +} + +// NewGenericFileWatcher creates a new generic file watcher with the given configuration +func NewGenericFileWatcher(config GenericFileWatcherConfig) (*GenericFileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, errors.Errorf("failed to create file watcher: %w", err) + } + + return &GenericFileWatcher{ + watcher: watcher, + watchedPaths: make(map[string]bool), + debounceDuration: config.DebounceDuration, + ignoreFunc: config.IgnoreFunc, + dirIgnoreFunc: config.DirIgnoreFunc, + significantEventFunc: config.SignificantEventFunc, + restartChan: make(chan struct{}), + errorChan: make(chan error), + }, nil +} + +// SetWatchTargets sets the files and directories to watch +func (gfw *GenericFileWatcher) SetWatchTargets(targets []WatchTarget) error { + // Remove existing watches + for path := range gfw.watchedPaths { + if err := gfw.watcher.Remove(path); err != nil { + utils.Warning("could not remove path from watcher %s: %v", path, err) + } + } + gfw.watchedPaths = make(map[string]bool) + gfw.watchTargets = targets + + // Add new watches + for _, target := range targets { + if target.IsFile { + // For files, watch the file itself directly + if !gfw.watchedPaths[target.Path] { + if err := gfw.watcher.Add(target.Path); err != nil { + utils.Warning("could not watch file %s: %v", target.Path, err) + } else { + utils.Info(1, "Added file to watcher: %s", target.Path) + gfw.watchedPaths[target.Path] = true + } + } + } else { + // For directories, watch the directory itself + if !gfw.watchedPaths[target.Path] { + if err := gfw.watcher.Add(target.Path); err != nil { + utils.Warning("could not watch directory %s: %v", target.Path, err) + } else { + utils.Info(1, "Added directory to watcher: %s", target.Path) + gfw.watchedPaths[target.Path] = true + } + } + } + } + + return nil +} + +// Watch starts watching for file system events and returns channels for restart signals and errors +func (gfw *GenericFileWatcher) Watch(ctx context.Context) (<-chan struct{}, <-chan error) { + go gfw.runWatcher(ctx) + return gfw.restartChan, gfw.errorChan +} + +// runWatcher listens for events from the watcher, debounces them, and signals for a restart +func (gfw *GenericFileWatcher) runWatcher(ctx context.Context) { + var restartTimer *time.Timer // Timer for debouncing restarts + + for { + select { + case event, ok := <-gfw.watcher.Events: + if !ok { + return + } + + // Check if this event should be ignored + if gfw.ignoreFunc != nil && gfw.ignoreFunc(event.Name, event.Op) { + utils.Debug("Ignoring file event: %s (%s)", event.Name, event.Op.String()) + continue + } + + // Check if this is a path we're watching + shouldProcess := false + eventPath := filepath.Clean(event.Name) + + // Check if this event is for a target we're watching + for _, target := range gfw.watchTargets { + targetPath := filepath.Clean(target.Path) + + if target.IsFile { + // For file targets, event must be for the exact file + if eventPath == targetPath { + shouldProcess = true + break + } + } else { + // For directory targets, check if the event is within this directory + if eventPath == targetPath || strings.HasPrefix(eventPath, targetPath+string(filepath.Separator)) { + shouldProcess = true + break + } + } + } + + if !shouldProcess { + continue + } + + // Handle file change events that should trigger a restart + var isSignificantEventForRestart bool + if gfw.significantEventFunc != nil { + isSignificantEventForRestart = gfw.significantEventFunc(event) + } else { + // Default behavior if no function provided + isSignificantEventForRestart = event.Has(fsnotify.Write) || event.Has(fsnotify.Create) || event.Has(fsnotify.Remove) || event.Has(fsnotify.Rename) + } + + if isSignificantEventForRestart { + utils.Info(2, "File change detected: %s (%s)", event.Name, event.Op.String()) + + if restartTimer != nil { + restartTimer.Stop() + } + restartTimer = time.AfterFunc(gfw.debounceDuration, func() { + select { + case gfw.restartChan <- struct{}{}: + case <-ctx.Done(): + } + }) + } + + case err, ok := <-gfw.watcher.Errors: + if !ok { + return + } + select { + case gfw.errorChan <- err: + case <-ctx.Done(): + } + + case <-ctx.Done(): + if restartTimer != nil { + restartTimer.Stop() + } + return + } + } +} + +// Close closes the file watcher +func (gfw *GenericFileWatcher) Close() error { + if gfw.watcher != nil { + return gfw.watcher.Close() + } + return nil +} diff --git a/internal/functions/serve/generic_watcher_test.go b/internal/functions/serve/generic_watcher_test.go new file mode 100644 index 000000000..11240db06 --- /dev/null +++ b/internal/functions/serve/generic_watcher_test.go @@ -0,0 +1,354 @@ +package serve + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewGenericFileWatcher(t *testing.T) { + config := GenericFileWatcherConfig{ + DebounceDuration: 100 * time.Millisecond, + IgnoreFunc: nil, + DirIgnoreFunc: nil, + } + + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + require.NotNil(t, watcher) + defer watcher.Close() + + assert.Equal(t, config.DebounceDuration, watcher.debounceDuration) + assert.NotNil(t, watcher.watcher) + assert.NotNil(t, watcher.watchedPaths) + assert.NotNil(t, watcher.restartChan) + assert.NotNil(t, watcher.errorChan) +} + +func TestGenericFileWatcher_SetWatchTargets(t *testing.T) { + t.Run("can set directory targets", func(t *testing.T) { + tempDir := t.TempDir() + targetDir := filepath.Join(tempDir, "test") + require.NoError(t, os.MkdirAll(targetDir, 0755)) + + config := GenericFileWatcherConfig{ + DebounceDuration: 100 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + targets := []WatchTarget{ + {Path: targetDir, IsFile: false}, + } + + err = watcher.SetWatchTargets(targets) + assert.NoError(t, err) + assert.True(t, watcher.watchedPaths[targetDir]) + assert.Equal(t, targets, watcher.watchTargets) + }) + + t.Run("can set file targets", func(t *testing.T) { + tempDir := t.TempDir() + targetFile := filepath.Join(tempDir, "test.txt") + require.NoError(t, os.WriteFile(targetFile, []byte("test"), 0644)) + + config := GenericFileWatcherConfig{ + DebounceDuration: 100 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + targets := []WatchTarget{ + {Path: targetFile, IsFile: true}, + } + + err = watcher.SetWatchTargets(targets) + assert.NoError(t, err) + // For files, we watch the file itself directly + assert.True(t, watcher.watchedPaths[targetFile]) + assert.Equal(t, targets, watcher.watchTargets) + }) + + t.Run("can replace existing targets", func(t *testing.T) { + tempDir := t.TempDir() + dir1 := filepath.Join(tempDir, "dir1") + dir2 := filepath.Join(tempDir, "dir2") + require.NoError(t, os.MkdirAll(dir1, 0755)) + require.NoError(t, os.MkdirAll(dir2, 0755)) + + config := GenericFileWatcherConfig{ + DebounceDuration: 100 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + // Set initial targets + targets1 := []WatchTarget{ + {Path: dir1, IsFile: false}, + } + err = watcher.SetWatchTargets(targets1) + require.NoError(t, err) + assert.True(t, watcher.watchedPaths[dir1]) + + // Replace with new targets + targets2 := []WatchTarget{ + {Path: dir2, IsFile: false}, + } + err = watcher.SetWatchTargets(targets2) + assert.NoError(t, err) + assert.False(t, watcher.watchedPaths[dir1]) // Old target should be removed + assert.True(t, watcher.watchedPaths[dir2]) // New target should be added + }) +} + +func TestGenericFileWatcher_Watch_DirectoryEvents(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + config := GenericFileWatcherConfig{ + DebounceDuration: 50 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + targets := []WatchTarget{ + {Path: tempDir, IsFile: false}, + } + err = watcher.SetWatchTargets(targets) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Write to a file in the watched directory + require.NoError(t, os.WriteFile(testFile, []byte("test content"), 0644)) + + // Should receive a restart signal + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(200 * time.Millisecond): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestGenericFileWatcher_Watch_FileEvents(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "config.toml") + require.NoError(t, os.WriteFile(testFile, []byte("test = 'value'"), 0644)) + + config := GenericFileWatcherConfig{ + DebounceDuration: 50 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + targets := []WatchTarget{ + {Path: testFile, IsFile: true}, + } + err = watcher.SetWatchTargets(targets) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Modify the specific file + require.NoError(t, os.WriteFile(testFile, []byte("test = 'new_value'"), 0644)) + + // Should receive a restart signal + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(200 * time.Millisecond): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestGenericFileWatcher_Watch_IgnoreSpecificFile(t *testing.T) { + tempDir := t.TempDir() + configFile := filepath.Join(tempDir, "config.toml") + otherFile := filepath.Join(tempDir, "other.txt") + require.NoError(t, os.WriteFile(configFile, []byte("test = 'value'"), 0644)) + require.NoError(t, os.WriteFile(otherFile, []byte("other content"), 0644)) + + config := GenericFileWatcherConfig{ + DebounceDuration: 50 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + // Only watch the config file specifically + targets := []WatchTarget{ + {Path: configFile, IsFile: true}, + } + err = watcher.SetWatchTargets(targets) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Modify the other file (should not trigger restart) + require.NoError(t, os.WriteFile(otherFile, []byte("other content modified"), 0644)) + + // Should NOT receive a restart signal + select { + case <-restartChan: + t.Fatal("unexpected restart signal for file not being watched") + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(100 * time.Millisecond): + // Success - no signal received + } + + // Now modify the watched file (should trigger restart) + require.NoError(t, os.WriteFile(configFile, []byte("test = 'modified'"), 0644)) + + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(200 * time.Millisecond): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestGenericFileWatcher_Watch_WithIgnoreFunc(t *testing.T) { + tempDir := t.TempDir() + normalFile := filepath.Join(tempDir, "normal.txt") + ignoreFile := filepath.Join(tempDir, "ignore.tmp") + + // Create ignore function that ignores .tmp files + ignoreFunc := func(eventPath string, eventOp fsnotify.Op) bool { + return filepath.Ext(eventPath) == ".tmp" + } + + config := GenericFileWatcherConfig{ + DebounceDuration: 50 * time.Millisecond, + IgnoreFunc: ignoreFunc, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + targets := []WatchTarget{ + {Path: tempDir, IsFile: false}, + } + err = watcher.SetWatchTargets(targets) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Create ignored file (should not trigger restart) + require.NoError(t, os.WriteFile(ignoreFile, []byte("ignored"), 0644)) + + select { + case <-restartChan: + t.Fatal("unexpected restart signal for ignored file") + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(100 * time.Millisecond): + // Success - no signal received + } + + // Create normal file (should trigger restart) + require.NoError(t, os.WriteFile(normalFile, []byte("normal"), 0644)) + + select { + case <-restartChan: + // Success + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-time.After(200 * time.Millisecond): + t.Fatal("expected restart signal but didn't receive one") + } +} + +func TestGenericFileWatcher_Watch_Debouncing(t *testing.T) { + tempDir := t.TempDir() + testFile := filepath.Join(tempDir, "test.txt") + + config := GenericFileWatcherConfig{ + DebounceDuration: 100 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + defer watcher.Close() + + targets := []WatchTarget{ + {Path: tempDir, IsFile: false}, + } + err = watcher.SetWatchTargets(targets) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + restartChan, errorChan := watcher.Watch(ctx) + + // Create multiple rapid changes + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(testFile, []byte("content"), 0644)) + time.Sleep(10 * time.Millisecond) // Rapid changes + } + + // Should only receive one restart signal after debounce period + restartCount := 0 + timeout := time.After(200 * time.Millisecond) + + for { + select { + case <-restartChan: + restartCount++ + case err := <-errorChan: + t.Fatalf("unexpected error: %v", err) + case <-timeout: + // Done counting + goto done + } + } + +done: + // Should receive exactly one restart signal due to debouncing + assert.Equal(t, 1, restartCount, "expected exactly one restart signal due to debouncing") +} + +func TestGenericFileWatcher_Close(t *testing.T) { + config := GenericFileWatcherConfig{ + DebounceDuration: 100 * time.Millisecond, + } + watcher, err := NewGenericFileWatcher(config) + require.NoError(t, err) + + err = watcher.Close() + assert.NoError(t, err) + + // Closing again should not cause an error + err = watcher.Close() + assert.NoError(t, err) +} diff --git a/internal/functions/serve/serve.go b/internal/functions/serve/serve.go index 9fba48a76..3d8728515 100644 --- a/internal/functions/serve/serve.go +++ b/internal/functions/serve/serve.go @@ -1,6 +1,7 @@ package serve import ( + "bufio" "context" _ "embed" "encoding/json" @@ -10,6 +11,7 @@ import ( "strconv" "strings" + "github.com/containerd/errdefs" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/go-connections/nat" @@ -75,23 +77,54 @@ func Run(ctx context.Context, envFilePath string, noVerifyJWT *bool, importMapPa if err := utils.AssertSupabaseDbIsRunning(); err != nil { return err } - // 2. Remove existing container. - _ = utils.Docker.ContainerRemove(ctx, utils.EdgeRuntimeId, container.RemoveOptions{ - RemoveVolumes: true, - Force: true, - }) - // Use network alias because Deno cannot resolve `_` in hostname - dbUrl := fmt.Sprintf("postgresql://postgres:postgres@%s:5432/postgres", utils.DbAliases[0]) - // 3. Serve and log to console - fmt.Fprintln(os.Stderr, "Setting up Edge Functions runtime...") - if err := ServeFunctions(ctx, envFilePath, noVerifyJWT, importMapPath, dbUrl, runtimeOption, fsys); err != nil { - return err + + // Ensure cleanup on exit + defer func() { + _ = utils.Docker.ContainerRemove(context.Background(), utils.EdgeRuntimeId, container.RemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + }() + + watcher := NewSimpleFileWatcher() + go watcher.Start(ctx, fsys) + + var streamer logStreamer + var streamerStarted bool + + // Function to start/restart both runtime and streamer + startServices := func() error { + if err := restartEdgeRuntime(ctx, envFilePath, noVerifyJWT, importMapPath, runtimeOption, fsys); err != nil { + return err + } + + // Start log streamer after container is running + streamer = NewLogStreamer() + go streamer.Start(ctx) + streamerStarted = true + return nil } - if err := utils.DockerStreamLogs(ctx, utils.EdgeRuntimeId, os.Stdout, os.Stderr); err != nil { + + // Initial start + if err := startServices(); err != nil { return err } - fmt.Println("Stopped serving " + utils.Bold(utils.FunctionsDir)) - return nil + + for { + select { + case <-ctx.Done(): + fmt.Println("Stopped serving " + utils.Bold(utils.FunctionsDir)) + return ctx.Err() + case <-watcher.RestartCh: + if err := startServices(); err != nil { + return err + } + case err := <-streamer.ErrCh: + if streamerStarted { + return err + } + } + } } func ServeFunctions(ctx context.Context, envFilePath string, noVerifyJWT *bool, importMapPath string, dbUrl string, runtimeOption RuntimeOption, fsys afero.Fs) error { @@ -196,6 +229,10 @@ func parseEnvFile(envFilePath string, fsys afero.Fs) ([]string, error) { } env := []string{} secrets, err := set.ListSecrets(envFilePath, fsys) + if err != nil { + // If parsing fails, return empty slice and error + return nil, err + } for _, v := range secrets { env = append(env, fmt.Sprintf("%s=%s", v.Name, v.Value)) } @@ -214,7 +251,7 @@ func populatePerFunctionConfigs(cwd, importMapPath string, noVerifyJWT *bool, fs binds := []string{} for slug, fc := range functionsConfig { if !fc.Enabled { - fmt.Fprintln(os.Stderr, "Skipped serving Function:", slug) + utils.Warning("Skipped serving Function: %s\n", slug) continue } modules, err := deploy.GetBindMounts(cwd, utils.FunctionsDir, "", fc.Entrypoint, fc.ImportMap, fsys) @@ -235,3 +272,101 @@ func populatePerFunctionConfigs(cwd, importMapPath string, noVerifyJWT *bool, fs } return utils.RemoveDuplicates(binds), string(functionsConfigBytes), nil } + +func restartEdgeRuntime(ctx context.Context, envFilePath string, noVerifyJWT *bool, importMapPath string, runtimeOption RuntimeOption, fsys afero.Fs) error { + // Reload config.toml file in case functions settings has been changed + if err := flags.LoadConfig(fsys); err != nil { + return errors.Errorf("Failed to reload config: %w", err) + } + + // Remove existing container before starting a new one. + if err := utils.Docker.ContainerRemove(context.Background(), utils.EdgeRuntimeId, container.RemoveOptions{ + RemoveVolumes: true, + Force: true, + }); err != nil { + utils.Warning("Failed to remove existing Edge Runtime container before start: %v\n", err) + } + + // Use network alias because Deno cannot resolve `_` in hostname + dbUrl := fmt.Sprintf("postgresql://postgres:postgres@%s:5432/postgres", utils.DbAliases[0]) + + utils.Info(0, "Setting up Edge Functions runtime...\n") + if err := ServeFunctions(ctx, envFilePath, noVerifyJWT, importMapPath, dbUrl, runtimeOption, fsys); err != nil { + return errors.Errorf("Failed to serve functions: %w", err) + } + + utils.Info(0, "Edge Functions runtime is ready.\n") + return nil +} + +type logStreamer struct { + ErrCh chan error +} + +func NewLogStreamer() logStreamer { + return logStreamer{ + ErrCh: make(chan error, 1), + } +} + +func (s *logStreamer) Start(ctx context.Context) { + // Stream logs from the edge runtime container + if err := utils.DockerStreamLogs(ctx, utils.EdgeRuntimeId, os.Stdout, os.Stderr); err != nil && + !errdefs.IsNotFound(err) && + !strings.HasSuffix(err.Error(), "exit 137") && + !strings.HasSuffix(err.Error(), "can not get logs from container which is dead or marked for removal") { + s.ErrCh <- err + return + } +} + +type fileWatcher struct { + RestartCh chan struct{} +} + +func NewSimpleFileWatcher() fileWatcher { + return fileWatcher{ + RestartCh: make(chan struct{}, 1), + } +} + +func (w *fileWatcher) Start(ctx context.Context, fsys afero.Fs) { + edgeWatcher, err := NewEdgeFunctionWatcher(fsys) + if err != nil { + utils.Error("Failed to create edge function watcher: %v\n", err) + utils.Warning("Press enter to reload...\n") + scanner := bufio.NewScanner(os.Stdin) + for { + select { + case <-ctx.Done(): + return + default: + if scanner.Scan() { + w.RestartCh <- struct{}{} + } + } + } + } + defer edgeWatcher.Close() + + // Start watching for file changes + restartChan, errChan := edgeWatcher.Watch(ctx) + + for { + select { + case <-ctx.Done(): + return + case <-restartChan: + w.RestartCh <- struct{}{} + case err := <-errChan: + utils.Error("Edge function watcher error: %v\n", err) + // Fall back to manual reload on error + utils.Warning("Press enter to reload...\n") + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + w.RestartCh <- struct{}{} + } + return + } + } +} diff --git a/internal/functions/serve/serve_test.go b/internal/functions/serve/serve_test.go index 7dbd75377..9dddf74d8 100644 --- a/internal/functions/serve/serve_test.go +++ b/internal/functions/serve/serve_test.go @@ -6,10 +6,14 @@ import ( "os" "path/filepath" "testing" + "time" + + "encoding/json" "github.com/docker/docker/api/types/container" "github.com/h2non/gock" "github.com/spf13/afero" + "github.com/spf13/viper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/supabase/cli/internal/testing/apitest" @@ -17,6 +21,80 @@ import ( "github.com/supabase/cli/pkg/cast" ) +// Test helper functions +type TestSetup struct { + T *testing.T + Fsys afero.Fs + Context context.Context + Cancel context.CancelFunc + ProjectId string + RootPath string +} + +func NewTestSetup(t *testing.T) *TestSetup { + fsys := afero.NewMemMapFs() + ctx, cancel := context.WithCancel(context.Background()) + + setup := &TestSetup{ + T: t, + Fsys: fsys, + Context: ctx, + Cancel: cancel, + ProjectId: "test", + RootPath: "/project", + } + + // Initialize basic config + require.NoError(t, utils.InitConfig(utils.InitParams{ProjectId: setup.ProjectId}, fsys)) + + return setup +} + +func (s *TestSetup) Cleanup() { + s.Cancel() + gock.OffAll() +} + +// SetupFunction creates a test function with given name and content +func (s *TestSetup) SetupFunction(name, content string) { + funcDir := filepath.Join(utils.FunctionsDir, name) + require.NoError(s.T, s.Fsys.MkdirAll(funcDir, 0755)) + require.NoError(s.T, afero.WriteFile(s.Fsys, filepath.Join(funcDir, "index.ts"), []byte(content), 0644)) +} + +// SetupEnvFile creates an environment file with given content +func (s *TestSetup) SetupEnvFile(path, content string) { + if path == "" { + path = utils.FallbackEnvFilePath + } + require.NoError(s.T, afero.WriteFile(s.Fsys, path, []byte(content), 0644)) +} + +// SetupImportMap creates an import map file with given content +func (s *TestSetup) SetupImportMap(path, content string) { + if path == "" { + path = utils.FallbackImportMapPath + } + require.NoError(s.T, afero.WriteFile(s.Fsys, path, []byte(content), 0644)) +} + +// SetupConfigWithFunctions creates a supabase config.toml with function configurations +func (s *TestSetup) SetupConfigWithFunctions() { + configContent := `[functions.hello] +enabled = true +verify_jwt = false + +[functions.protected] +enabled = true +verify_jwt = true + +[functions.goodbye] +enabled = false +verify_jwt = false` + + require.NoError(s.T, afero.WriteFile(s.Fsys, "supabase/config.toml", []byte(configContent), 0644)) +} + func TestServeCommand(t *testing.T) { t.Run("serves all functions", func(t *testing.T) { // Setup in-memory fs @@ -37,10 +115,15 @@ func TestServeCommand(t *testing.T) { Reply(http.StatusOK) apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.EdgeRuntime.Image), containerId) require.NoError(t, apitest.MockDockerLogs(utils.Docker, containerId, "success")) - // Run test - err := Run(context.Background(), "", nil, "", RuntimeOption{}, fsys) - // Check error - assert.NoError(t, err) + + // Create a context with timeout to prevent test from hanging + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + // Run test with timeout context + err := Run(ctx, "", nil, "", RuntimeOption{}, fsys) + // Check error - expect context.DeadlineExceeded because the server runs until cancelled + assert.ErrorIs(t, err, context.DeadlineExceeded) assert.Empty(t, apitest.ListUnmatchedRequests()) }) @@ -48,6 +131,7 @@ func TestServeCommand(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte("malformed"), 0644)) + // Run test err := Run(context.Background(), "", nil, "", RuntimeOption{}, fsys) // Check error @@ -64,6 +148,7 @@ func TestServeCommand(t *testing.T) { gock.New(utils.Docker.DaemonHost()). Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). Reply(http.StatusNotFound) + // Run test err := Run(context.Background(), "", nil, "", RuntimeOption{}, fsys) // Check error @@ -81,6 +166,7 @@ func TestServeCommand(t *testing.T) { Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). Reply(http.StatusOK). JSON(container.InspectResponse{}) + // Run test err := Run(context.Background(), ".env", nil, "", RuntimeOption{}, fsys) // Check error @@ -102,9 +188,176 @@ func TestServeCommand(t *testing.T) { Get("/v" + utils.Docker.ClientVersion() + "/containers/supabase_db_test/json"). Reply(http.StatusOK). JSON(container.InspectResponse{}) + // Run test err := Run(context.Background(), ".env", cast.Ptr(true), "import_map.json", RuntimeOption{}, fsys) // Check error assert.ErrorIs(t, err, os.ErrNotExist) }) } + +func TestParseEnvFile(t *testing.T) { + // Save original CurrentDirAbs + originalCurrentDirAbs := utils.CurrentDirAbs + defer func() { + utils.CurrentDirAbs = originalCurrentDirAbs + }() + + t.Run("parses env file successfully", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + envContent := `DATABASE_URL=postgresql://localhost:5432/test +API_KEY=secret123 +DEBUG=true` + envPath := "/project/.env" + setup.SetupEnvFile(envPath, envContent) + + env, err := parseEnvFile(envPath, setup.Fsys) + assert.NoError(t, err) + assert.Contains(t, env, "DATABASE_URL=postgresql://localhost:5432/test") + assert.Contains(t, env, "API_KEY=secret123") + assert.Contains(t, env, "DEBUG=true") + }) + + t.Run("uses fallback env file when path is empty", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + envContent := `FALLBACK_VAR=fallback_value` + setup.SetupEnvFile("", envContent) + + env, err := parseEnvFile("", setup.Fsys) + assert.NoError(t, err) + assert.Contains(t, env, "FALLBACK_VAR=fallback_value") + }) + + t.Run("returns error when file doesn't exist", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + envPath := "/project/nonexistent.env" + env, err := parseEnvFile(envPath, setup.Fsys) + assert.Error(t, err) + assert.Nil(t, env) + assert.Contains(t, err.Error(), "failed to open env file") + }) +} + +func TestPopulatePerFunctionConfigs(t *testing.T) { + // Save original values + originalFunctionsDir := utils.FunctionsDir + defer func() { + utils.FunctionsDir = originalFunctionsDir + }() + + t.Run("populates function configs successfully", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + utils.FunctionsDir = "functions" + setup.SetupFunction("hello", "export default () => 'hello'") + setup.SetupConfigWithFunctions() + + binds, configString, err := populatePerFunctionConfigs("/project", "", cast.Ptr(false), setup.Fsys) + assert.NoError(t, err) + assert.NotEmpty(t, binds) + assert.NotEmpty(t, configString) + + var config map[string]interface{} + err = json.Unmarshal([]byte(configString), &config) + assert.NoError(t, err) + assert.Contains(t, config, "hello") + }) + + t.Run("handles function config creation", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + utils.FunctionsDir = "functions" + setup.SetupFunction("enabled", "export default () => 'enabled'") + + _, configString, err := populatePerFunctionConfigs("/project", "", nil, setup.Fsys) + assert.NoError(t, err) + + var resultConfig map[string]interface{} + err = json.Unmarshal([]byte(configString), &resultConfig) + assert.NoError(t, err) + assert.Contains(t, resultConfig, "enabled") + + enabledConfig := resultConfig["enabled"].(map[string]interface{}) + assert.Contains(t, enabledConfig, "entrypointPath") + assert.Contains(t, enabledConfig, "verifyJWT") + }) + + t.Run("handles import map path", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + utils.FunctionsDir = "functions" + setup.SetupFunction("hello", "export default () => 'hello'") + setup.SetupImportMap("import_map.json", "{}") + + binds, configString, err := populatePerFunctionConfigs("/project", "import_map.json", nil, setup.Fsys) + assert.NoError(t, err) + assert.NotEmpty(t, binds) + assert.NotEmpty(t, configString) + }) + + t.Run("returns empty config when no functions exist", func(t *testing.T) { + setup := NewTestSetup(t) + defer setup.Cleanup() + + utils.FunctionsDir = "functions" + require.NoError(t, setup.Fsys.MkdirAll("functions", 0755)) + + _, configString, err := populatePerFunctionConfigs("/project", "", nil, setup.Fsys) + assert.NoError(t, err) + + var resultConfig map[string]interface{} + err = json.Unmarshal([]byte(configString), &resultConfig) + assert.NoError(t, err) + assert.Empty(t, resultConfig) + }) +} + +func TestServeFunctions(t *testing.T) { + // Save original values + originalConfig := utils.Config + originalDebug := viper.Get("DEBUG") + originalFunctionsDir := utils.FunctionsDir + defer func() { + utils.Config = originalConfig + viper.Set("DEBUG", originalDebug) + utils.FunctionsDir = originalFunctionsDir + }() + + t.Run("returns error on env file parsing failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Test with nonexistent file to trigger error + + // Test function + err := ServeFunctions(context.Background(), "nonexistent.env", nil, "", "postgresql://localhost:5432/test", RuntimeOption{}, fsys) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to open env file") + }) + + t.Run("returns error on function config failure", func(t *testing.T) { + // Setup config + utils.Config.Auth.AnonKey.Value = "anon_key" + utils.Config.Auth.ServiceRoleKey.Value = "service_role_key" + utils.Config.Auth.JwtSecret.Value = "jwt_secret" + utils.Config.Api.Port = 8000 + utils.Config.EdgeRuntime.Policy = "permissive" + utils.KongAliases = []string{"supabase_kong_test"} + + // Setup in-memory fs with invalid functions directory + fsys := afero.NewMemMapFs() + utils.FunctionsDir = "nonexistent" + + // Test function + err := ServeFunctions(context.Background(), "", nil, "", "postgresql://localhost:5432/test", RuntimeOption{}, fsys) + assert.Error(t, err) + }) +} diff --git a/internal/functions/serve/templates/main.ts b/internal/functions/serve/templates/main.ts index 534409a98..568c547f8 100644 --- a/internal/functions/serve/templates/main.ts +++ b/internal/functions/serve/templates/main.ts @@ -48,6 +48,7 @@ const DENO_SB_ERROR_MAP = new Map([ SB_SPECIFIC_ERROR_CODE.WorkerLimit, ], ]); +const GENERIC_FUNCTION_SERVE_MESSAGE = `Serving functions on http://127.0.0.1:${HOST_PORT}/functions/v1/` interface FunctionConfig { entrypointPath: string; @@ -228,9 +229,36 @@ Deno.serve({ }, onListen: () => { - console.log( - `Serving functions on http://127.0.0.1:${HOST_PORT}/functions/v1/\nUsing ${Deno.version.deno}`, - ); + try { + const functionsConfigString = Deno.env.get( + "SUPABASE_INTERNAL_FUNCTIONS_CONFIG" + ); + if (functionsConfigString) { + const MAX_FUNCTIONS_URL_EXAMPLES = 5 + const functionsConfig = JSON.parse(functionsConfigString) as Record< + string, + unknown + >; + const functionNames = Object.keys(functionsConfig); + const exampleFunctions = functionNames.slice(0, MAX_FUNCTIONS_URL_EXAMPLES); + const functionsUrls = exampleFunctions.map( + (fname) => ` - http://127.0.0.1:${HOST_PORT}/functions/v1/${fname}` + ); + const functionsExamplesMessages = functionNames.length > 0 + // Show some functions urls examples + ? `\n${functionsUrls.join(`\n`)}${functionNames.length > MAX_FUNCTIONS_URL_EXAMPLES + // If we have more than 10 functions to serve, then show examples for first 10 + // and a count for the remaining ones + ? `\n... and ${functionNames.length - MAX_FUNCTIONS_URL_EXAMPLES} more functions` + : ''}` + : '' + console.log(`${GENERIC_FUNCTION_SERVE_MESSAGE}${functionsExamplesMessages}\nUsing ${Deno.version.deno}`); + } + } catch (e) { + console.log( + `${GENERIC_FUNCTION_SERVE_MESSAGE}\nUsing ${Deno.version.deno}` + ); + } }, onError: e => { diff --git a/internal/utils/colors.go b/internal/utils/colors.go index ed710c4a2..0a447b188 100644 --- a/internal/utils/colors.go +++ b/internal/utils/colors.go @@ -13,6 +13,11 @@ func Yellow(str string) string { return lipgloss.NewStyle().Foreground(lipgloss.Color("11")).Render(str) } +// For info messages. +func Blue(str string) string { + return lipgloss.NewStyle().Foreground(lipgloss.Color("12")).Render(str) +} + // For errors. func Red(str string) string { return lipgloss.NewStyle().Foreground(lipgloss.Color("9")).Render(str) diff --git a/internal/utils/container_output.go b/internal/utils/container_output.go index 8f417afd1..452c72a05 100644 --- a/internal/utils/container_output.go +++ b/internal/utils/container_output.go @@ -208,18 +208,13 @@ func ProcessDiffOutput(diffBytes []byte) ([]byte, error) { func ProcessPsqlOutput(out io.Reader, p Program) error { r, w := io.Pipe() - doneCh := make(chan struct{}, 1) + doneCh := make(chan struct{}) go func() { + defer close(doneCh) // closes when goroutine ends scanner := bufio.NewScanner(r) for scanner.Scan() { - select { - case <-doneCh: - return - default: - } - line := scanner.Text() p.Send(PsqlMsg(&line)) } @@ -227,13 +222,18 @@ func ProcessPsqlOutput(out io.Reader, p Program) error { var errBuf bytes.Buffer if _, err := stdcopy.StdCopy(w, &errBuf, out); err != nil { + w.Close() + <-doneCh // Wait for goroutine to finish return err } + w.Close() // Close the writer to signal EOF to the scanner + if errBuf.Len() > 0 { + <-doneCh // Wait for goroutine to finish return errors.New("Error running SQL: " + errBuf.String()) } - doneCh <- struct{}{} + <-doneCh // Wait for goroutine to finish p.Send(PsqlMsg(nil)) return nil diff --git a/internal/utils/deno.go b/internal/utils/deno.go index 5bece3a5a..408a4edcd 100644 --- a/internal/utils/deno.go +++ b/internal/utils/deno.go @@ -209,20 +209,31 @@ func CopyDenoScripts(ctx context.Context, fsys afero.Fs) (*DenoScriptDir, error) return &sd, nil } -func BindHostModules(cwd, relEntrypointPath, relImportMapPath string, fsys afero.Fs) ([]string, error) { - var modules []string +// ModulePaths contains both the raw paths and their Docker bind mount equivalents +type ModulePaths struct { + Paths []string // Raw host paths + Binds []string // Docker bind mount strings +} + +func BindHostModules(cwd, relEntrypointPath, relImportMapPath string, fsys afero.Fs) (*ModulePaths, error) { + var paths []string + var binds []string + bindModule := func(srcPath string, r io.Reader) error { hostPath := filepath.Join(cwd, filepath.FromSlash(srcPath)) dockerPath := ToDockerPath(hostPath) - modules = append(modules, hostPath+":"+dockerPath+":ro") + paths = append(paths, hostPath) + binds = append(binds, hostPath+":"+dockerPath+":ro") return nil } + importMap := function.ImportMap{} if imPath := filepath.ToSlash(relImportMapPath); len(imPath) > 0 { if err := importMap.LoadAsDeno(imPath, afero.NewIOFS(fsys), bindModule); err != nil { return nil, err } } + // Resolving all Import Graph addModule := func(unixPath string, w io.Writer) error { hostPath := filepath.FromSlash(unixPath) @@ -240,15 +251,21 @@ func BindHostModules(cwd, relEntrypointPath, relImportMapPath string, fsys afero return errors.Errorf("failed to copy file content: %w", err) } dockerPath := ToDockerPath(hostPath) - modules = append(modules, hostPath+":"+dockerPath+":ro") + paths = append(paths, hostPath) + binds = append(binds, hostPath+":"+dockerPath+":ro") return nil } + unixPath := filepath.ToSlash(relEntrypointPath) if err := importMap.WalkImportPaths(unixPath, addModule); err != nil { return nil, err } + // TODO: support scopes - return modules, nil + return &ModulePaths{ + Paths: paths, + Binds: binds, + }, nil } func ToDockerPath(absHostPath string) string { diff --git a/internal/utils/deno_test.go b/internal/utils/deno_test.go index c5b140a8f..10b47026c 100644 --- a/internal/utils/deno_test.go +++ b/internal/utils/deno_test.go @@ -29,13 +29,20 @@ import "./child/index.ts"` mods, err := BindHostModules("/app", "supabase/functions/hello/index.ts", "", fsys) // Check error assert.NoError(t, err) - assert.ElementsMatch(t, mods, []string{ + assert.ElementsMatch(t, mods.Binds, []string{ "/app/supabase/functions/hello/index.ts:/app/supabase/functions/hello/index.ts:ro", "/tmp/index.ts:/tmp/index.ts:ro", "/app/supabase/functions/common/index.ts:/app/supabase/functions/common/index.ts:ro", "/app/supabase/tests/index.ts:/app/supabase/tests/index.ts:ro", "/app/supabase/functions/hello/child/index.ts:/app/supabase/functions/hello/child/index.ts:ro", }) + assert.ElementsMatch(t, mods.Paths, []string{ + "/app/supabase/functions/hello/index.ts", + "/tmp/index.ts", + "/app/supabase/functions/common/index.ts", + "/app/supabase/tests/index.ts", + "/app/supabase/functions/hello/child/index.ts", + }) }) } diff --git a/internal/utils/logger.go b/internal/utils/logger.go index 0cde1df38..cab255791 100644 --- a/internal/utils/logger.go +++ b/internal/utils/logger.go @@ -1,15 +1,108 @@ package utils import ( + "context" + "fmt" "io" + "log/slog" "os" "github.com/spf13/viper" ) +var logger *slog.Logger + +func init() { + // Custom handler for simple colored output + handler := &simpleHandler{output: os.Stderr} + logger = slog.New(handler) +} + +// simpleHandler implements slog.Handler with simple colored output +type simpleHandler struct { + output io.Writer +} + +func (h *simpleHandler) Enabled(ctx context.Context, level slog.Level) bool { + return true +} + +func (h *simpleHandler) Handle(ctx context.Context, record slog.Record) error { + var prefix string + var colorFunc func(string) string + + switch record.Level { + case slog.LevelDebug: + prefix = "DEBUG:" + colorFunc = Aqua + case slog.LevelInfo: + prefix = "INFO:" + colorFunc = Blue + case slog.LevelWarn: + prefix = "WARNING:" + colorFunc = Yellow + case slog.LevelError: + prefix = "ERROR:" + colorFunc = Red + default: + prefix = "LOG:" + colorFunc = func(s string) string { return s } + } + + fmt.Fprintf(h.output, "%s %s\n", colorFunc(prefix), record.Message) + return nil +} + +func (h *simpleHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return h +} + +func (h *simpleHandler) WithGroup(name string) slog.Handler { + return h +} + +// GetVerbosity returns the current verbosity level (0 = minimal, 1 = normal info, 2+ = more verbose) +func GetVerbosity() int { + return viper.GetInt("VERBOSITY") +} + func GetDebugLogger() io.Writer { if viper.GetBool("DEBUG") { return os.Stderr } return io.Discard } + +// Log logs a plain message with no formatting, colors, or prefixes over stdout +func Log(format string, args ...interface{}) { + message := fmt.Sprintf(format, args...) + fmt.Fprintf(os.Stdout, "%s", message) +} + +// Info logs an info message if the current SUPABASE_VERBOSITY level is >= the required level +func Info(level int, format string, args ...interface{}) { + if GetVerbosity() >= level { + logger.Info(fmt.Sprintf(format, args...)) + } +} + +// Debug logs a debug message when SUPABASE_DEBUG=true +func Debug(format string, args ...interface{}) { + if viper.GetBool("DEBUG") { + logger.Debug(fmt.Sprintf(format, args...)) + } +} + +// Warning logs a warning message +func Warning(format string, args ...interface{}) { + logger.Warn(fmt.Sprintf(format, args...)) +} + +// Error logs an error message +func Error(format string, args ...interface{}) { + logger.Error(fmt.Sprintf(format, args...)) +} + +func GetLogger() *slog.Logger { + return logger +}