From a9c3a182e2e6c830778169af26e7ea2e627e4570 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sat, 11 Jan 2025 11:24:29 -0500 Subject: [PATCH 01/34] start laying out prefetch --- pkg/blobfs.go | 18 ++++++++++----- pkg/blobfs_node.go | 8 +++++-- pkg/blobfs_prefetch.go | 50 ++++++++++++++++++++++++++++++++++++++++++ pkg/types.go | 1 + 4 files changed, 69 insertions(+), 8 deletions(-) create mode 100644 pkg/blobfs_prefetch.go diff --git a/pkg/blobfs.go b/pkg/blobfs.go index 8f726c5..de32b8f 100644 --- a/pkg/blobfs.go +++ b/pkg/blobfs.go @@ -67,12 +67,13 @@ type BlobFsSystemOpts struct { } type BlobFs struct { - ctx context.Context - root *FSNode - verbose bool - Metadata *BlobCacheMetadata - Client *BlobCacheClient - Config BlobCacheConfig + ctx context.Context + root *FSNode + verbose bool + Metadata *BlobCacheMetadata + Client *BlobCacheClient + Config BlobCacheConfig + PrefetchManager *PrefetchManager } func Mount(ctx context.Context, opts BlobFsSystemOpts) (func() error, <-chan error, *fuse.Server, error) { @@ -170,6 +171,11 @@ func NewFileSystem(ctx context.Context, opts BlobFsSystemOpts) (*BlobFs, error) Metadata: metadata, } + if opts.Config.BlobFs.Prefetch { + bfs.PrefetchManager = NewPrefetchManager(ctx, opts.Config) + bfs.PrefetchManager.Start() + } + rootID := GenerateFsID("/") rootPID := "" // Root node has no parent rootPath := "/" diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 663b654..436b0e8 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -3,6 +3,7 @@ package blobcache import ( "context" "fmt" + "log" "path" "strings" "syscall" @@ -128,8 +129,11 @@ func (n *FSNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (* return nil, syscall.ENOENT } - // TODO: stream file to a temp file in the container somewhere - // /tmp/cache/path/to/file + if n.filesystem.Config.BlobFs.Prefetch { + log.Printf("Prefetching file: %s", sourcePath) + // TODO: stream file to a temp file in the container somewhere + // /tmp/cache/path/to/file + } out.Attr = *attr return node, fs.OK diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go new file mode 100644 index 0000000..b7f6a94 --- /dev/null +++ b/pkg/blobfs_prefetch.go @@ -0,0 +1,50 @@ +package blobcache + +import ( + "context" + "sync" + "time" +) + +const ( + PrefetchEvictionInterval = 30 * time.Second + PrefetchIdleTTL = 60 * time.Second // e.g. remove buffer if no read in the past 60s + PrefetchBufferSize = 0 // if 0, no specific limit, just store all +) + +type PrefetchManager struct { + ctx context.Context + config BlobCacheConfig + buffers sync.Map +} + +func NewPrefetchManager(ctx context.Context, config BlobCacheConfig) *PrefetchManager { + return &PrefetchManager{ + ctx: ctx, + config: config, + buffers: sync.Map{}, + } +} + +func (pm *PrefetchManager) Start() { + go pm.evictIdleBuffers() +} + +// GetPrefetchBuffer returns an existing prefetch buffer if it exists, or nil. +func (pm *PrefetchManager) GetPrefetchBuffer(hash string) *PrefetchBuffer { + if val, ok := pm.buffers.Load(hash); ok { + return val.(*PrefetchBuffer) + } + + return nil +} + +func (pm *PrefetchManager) evictIdleBuffers() { + +} + +type PrefetchBuffer struct { + hash string + buffer []byte + lastRead time.Time +} diff --git a/pkg/types.go b/pkg/types.go index ab8bb30..80bbb8e 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -91,6 +91,7 @@ type RedisConfig struct { type BlobFsConfig struct { Enabled bool `key:"enabled" json:"enabled"` + Prefetch bool `key:"prefetch" json:"prefetch"` MountPoint string `key:"mountPoint" json:"mount_point"` Sources []SourceConfig `key:"sources" json:"sources"` MaxBackgroundTasks int `key:"maxBackgroundTasks" json:"max_background_tasks"` From 0f57f40d6b86cae0514ae37598c4793e2f3fb6b4 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sat, 11 Jan 2025 12:29:09 -0500 Subject: [PATCH 02/34] add separate throughput test --- .gitignore | 5 ++- Makefile | 5 ++- e2e/fs/main.go | 52 ++++++++++++++++++++++++++ e2e/{testclient => throughput}/main.go | 0 pkg/blobfs.go | 2 +- pkg/blobfs_node.go | 9 +++-- pkg/config.default.yaml | 1 + 7 files changed, 67 insertions(+), 7 deletions(-) create mode 100644 e2e/fs/main.go rename e2e/{testclient => throughput}/main.go (100%) diff --git a/.gitignore b/.gitignore index 9e8f57c..5bd184c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,13 @@ *.tgz .DS_Store bin/blobcache +bin/throughput +bin/fs bin/testclient build.sh tmp/ config.yaml -e2e/testclient/testdata/*.bin +e2e/throughput/testdata/*.bin +e2e/fs/testdata/*.bin daemonset.yaml output.bin \ No newline at end of file diff --git a/Makefile b/Makefile index e956356..7728648 100644 --- a/Makefile +++ b/Makefile @@ -24,5 +24,6 @@ publish-chart: helm push beam-blobcache-v2-chart-$(chartVersion).tgz oci://public.ecr.aws/n4e0e1y0 rm beam-blobcache-v2-chart-$(chartVersion).tgz -testclient: - GOOS=linux GOARCH=amd64 go build -o bin/testclient e2e/testclient/main.go +testclients: + GOOS=linux GOARCH=amd64 go build -o bin/throughput e2e/throughput/main.go + GOOS=linux GOARCH=amd64 go build -o bin/fs e2e/fs/main.go diff --git a/e2e/fs/main.go b/e2e/fs/main.go new file mode 100644 index 0000000..3043080 --- /dev/null +++ b/e2e/fs/main.go @@ -0,0 +1,52 @@ +package main + +import ( + "context" + "flag" + "log" + "os" + "os/signal" + "syscall" + + blobcache "github.com/beam-cloud/blobcache-v2/pkg" +) + +var ( + totalIterations int + checkContent bool +) + +type TestResult struct { + ElapsedTime float64 + ContentCheckPassed bool +} + +func main() { + flag.IntVar(&totalIterations, "iterations", 3, "Number of iterations to run the tests") + flag.BoolVar(&checkContent, "checkcontent", true, "Check the content hash after receiving data") + flag.Parse() + + configManager, err := blobcache.NewConfigManager[blobcache.BlobCacheConfig]() + if err != nil { + log.Fatalf("Failed to load config: %v\n", err) + } + + cfg := configManager.GetConfig() + + // Initialize logger + blobcache.InitLogger(cfg.DebugMode) + + ctx := context.Background() + + _, err = blobcache.NewBlobCacheClient(ctx, cfg) + if err != nil { + log.Fatalf("Unable to create client: %v\n", err) + } + + // Block until Ctrl+C (SIGINT) or SIGTERM is received + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) + <-sigChan + + log.Println("Received interrupt or termination signal, exiting.") +} diff --git a/e2e/testclient/main.go b/e2e/throughput/main.go similarity index 100% rename from e2e/testclient/main.go rename to e2e/throughput/main.go diff --git a/pkg/blobfs.go b/pkg/blobfs.go index de32b8f..bfd01a9 100644 --- a/pkg/blobfs.go +++ b/pkg/blobfs.go @@ -182,7 +182,7 @@ func NewFileSystem(ctx context.Context, opts BlobFsSystemOpts) (*BlobFs, error) dirMeta, err := metadata.GetFsNode(bfs.ctx, rootID) if err != nil || dirMeta == nil { - log.Printf("Root node metadata not found, creating it now...\n") + Logger.Infof("Root node metadata not found, creating it now...") dirMeta = &BlobFsMetadata{PID: rootPID, ID: rootID, Path: rootPath, Ino: 1, Mode: fuse.S_IFDIR | 0755} diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 436b0e8..d15ed68 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -119,7 +119,7 @@ func (n *FSNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (* sourcePath := strings.ReplaceAll(fullPath, "%", "/") n.log("Storing content from source with path: %s", sourcePath) - _, err := n.filesystem.Client.StoreContentFromSource(sourcePath, 0) + hash, err := n.filesystem.Client.StoreContentFromSource(sourcePath, 0) if err != nil { return nil, syscall.ENOENT } @@ -131,8 +131,7 @@ func (n *FSNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (* if n.filesystem.Config.BlobFs.Prefetch { log.Printf("Prefetching file: %s", sourcePath) - // TODO: stream file to a temp file in the container somewhere - // /tmp/cache/path/to/file + n.filesystem.PrefetchManager.GetPrefetchBuffer(hash) } out.Attr = *attr @@ -174,6 +173,10 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int return fuse.ReadResultData(dest[:0]), fs.OK } + if n.filesystem.Config.BlobFs.Prefetch { + + } + buffer, err := n.filesystem.Client.GetContent(n.bfsNode.Hash, off, int64(len(dest))) if err != nil { return nil, syscall.EIO diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index 98ff4d0..fc6427b 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -12,6 +12,7 @@ discoveryMode: metadata directIO: false options: [] blobfs: + prefetch: false enabled: false mountPoint: /tmp/test maxBackgroundTasks: 512 From 8767978177d75d26503d4a36997950e3645c65e0 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sat, 11 Jan 2025 14:35:53 -0500 Subject: [PATCH 03/34] clean up logs --- cmd/main.go | 2 +- e2e/fs/main.go | 4 ++-- e2e/throughput/main.go | 8 ++++---- pkg/blobfs.go | 5 ++--- pkg/client.go | 6 +++--- pkg/server.go | 6 +++--- pkg/source.go | 6 +++--- pkg/source_juicefs.go | 4 ++-- pkg/source_mountpoint.go | 2 +- 9 files changed, 21 insertions(+), 22 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 8c4427a..07871dd 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -10,7 +10,7 @@ import ( func main() { configManager, err := blobcache.NewConfigManager[blobcache.BlobCacheConfig]() if err != nil { - log.Fatalf("Failed to load config: %v\n", err) + log.Fatalf("Failed to load config: %v", err) } ctx := context.Background() diff --git a/e2e/fs/main.go b/e2e/fs/main.go index 3043080..1ffd31f 100644 --- a/e2e/fs/main.go +++ b/e2e/fs/main.go @@ -28,7 +28,7 @@ func main() { configManager, err := blobcache.NewConfigManager[blobcache.BlobCacheConfig]() if err != nil { - log.Fatalf("Failed to load config: %v\n", err) + log.Fatalf("Failed to load config: %v", err) } cfg := configManager.GetConfig() @@ -40,7 +40,7 @@ func main() { _, err = blobcache.NewBlobCacheClient(ctx, cfg) if err != nil { - log.Fatalf("Unable to create client: %v\n", err) + log.Fatalf("Unable to create client: %v", err) } // Block until Ctrl+C (SIGINT) or SIGTERM is received diff --git a/e2e/throughput/main.go b/e2e/throughput/main.go index d1d9044..e20e778 100644 --- a/e2e/throughput/main.go +++ b/e2e/throughput/main.go @@ -30,7 +30,7 @@ func main() { configManager, err := blobcache.NewConfigManager[blobcache.BlobCacheConfig]() if err != nil { - log.Fatalf("Failed to load config: %v\n", err) + log.Fatalf("Failed to load config: %v", err) } cfg := configManager.GetConfig() @@ -42,7 +42,7 @@ func main() { client, err := blobcache.NewBlobCacheClient(ctx, cfg) if err != nil { - log.Fatalf("Unable to create client: %v\n", err) + log.Fatalf("Unable to create client: %v", err) } filePath := "e2e/testclient/testdata/test3.bin" @@ -101,7 +101,7 @@ func storeFile(client *blobcache.BlobCacheClient, filePath string) (string, erro n, err := file.Read(buf) if err != nil && err != io.EOF { - log.Fatalf("err reading file: %v\n", err) + log.Fatalf("err reading file: %v", err) } if n == 0 { @@ -155,7 +155,7 @@ func TestGetContentStream(client *blobcache.BlobCacheClient, hash string, fileSi // Verify received content's hash if checkContent { - log.Printf("Verifying hash for GetContentStream\n") + log.Printf("Verifying hash for GetContentStream") hashBytes := sha256.Sum256(contentStream) retrievedHash := hex.EncodeToString(hashBytes[:]) diff --git a/pkg/blobfs.go b/pkg/blobfs.go index bfd01a9..c4c2f48 100644 --- a/pkg/blobfs.go +++ b/pkg/blobfs.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "log" "os" "os/exec" "strings" @@ -78,7 +77,7 @@ type BlobFs struct { func Mount(ctx context.Context, opts BlobFsSystemOpts) (func() error, <-chan error, *fuse.Server, error) { mountPoint := opts.Config.BlobFs.MountPoint - Logger.Infof("Mounting to %s\n", mountPoint) + Logger.Infof("Mounting to %s", mountPoint) if _, err := os.Stat(mountPoint); os.IsNotExist(err) { err = os.MkdirAll(mountPoint, 0755) @@ -188,7 +187,7 @@ func NewFileSystem(ctx context.Context, opts BlobFsSystemOpts) (*BlobFs, error) err := metadata.SetFsNode(bfs.ctx, rootID, dirMeta) if err != nil { - log.Fatalf("Unable to create blobfs root node dir metdata: %+v\n", err) + Logger.Fatalf("Unable to create blobfs root node dir metdata: %+v", err) } } diff --git a/pkg/client.go b/pkg/client.go index 14a885c..46207b7 100644 --- a/pkg/client.go +++ b/pkg/client.go @@ -382,7 +382,7 @@ func (c *BlobCacheClient) getGRPCClient(ctx context.Context, request *ClientRequ } defer c.metadata.RemoveClientLock(ctx, c.hostname, request.hash) - Logger.Infof("Content not available in any nearby cache - repopulating from: %s\n", entry.SourcePath) + Logger.Infof("Content not available in any nearby cache - repopulating from: %s", entry.SourcePath) host, err = c.hostMap.Closest(closestHostTimeout) if err != nil { return nil, nil, err @@ -402,7 +402,7 @@ func (c *BlobCacheClient) getGRPCClient(ctx context.Context, request *ClientRequ } if resp.Ok { - Logger.Infof("Content repopulated from source: %s\n", entry.SourcePath) + Logger.Infof("Content repopulated from source: %s", entry.SourcePath) c.mu.Lock() c.localHostCache[request.hash] = &localClientCache{ host: host, @@ -494,7 +494,7 @@ func (c *BlobCacheClient) StoreContent(chunks chan []byte) (string, error) { return "", err } - Logger.Debugf("Elapsed time to send content: %v\n", time.Since(start)) + Logger.Debugf("Elapsed time to send content: %v", time.Since(start)) return resp.Hash, nil } diff --git a/pkg/server.go b/pkg/server.go index 32bf0fd..d93cc46 100644 --- a/pkg/server.go +++ b/pkg/server.go @@ -81,11 +81,11 @@ func NewCacheService(ctx context.Context, cfg BlobCacheConfig) (*CacheService, e for _, sourceConfig := range cfg.BlobFs.Sources { _, err := NewSource(sourceConfig) if err != nil { - Logger.Errorf("Failed to configure content source: %+v\n", err) + Logger.Errorf("Failed to configure content source: %+v", err) continue } - Logger.Infof("Configured and mounted source: %+v\n", sourceConfig.FilesystemName) + Logger.Infof("Configured and mounted source: %+v", sourceConfig.FilesystemName) } } @@ -162,7 +162,7 @@ func (cs *CacheService) StartServer(port uint) error { ) proto.RegisterBlobCacheServer(s, cs) - Logger.Infof("Running @ %s%s, cfg: %+v\n", cs.hostname, addr, cs.cfg) + Logger.Infof("Running @ %s%s, cfg: %+v", cs.hostname, addr, cs.cfg) go s.Serve(localListener) go s.Serve(tailscaleListener) diff --git a/pkg/source.go b/pkg/source.go index 34f6ae6..822365c 100644 --- a/pkg/source.go +++ b/pkg/source.go @@ -39,13 +39,13 @@ func NewSource(config SourceConfig) (Source, error) { // NOTE: this is a no-op if already formatted err = s.Format(config.FilesystemName) if err != nil { - Logger.Fatalf("Unable to format filesystem: %+v\n", err) + Logger.Fatalf("Unable to format filesystem: %+v", err) } // Mount filesystem err = s.Mount(config.FilesystemPath) if err != nil { - Logger.Fatalf("Unable to mount filesystem: %+v\n", err) + Logger.Fatalf("Unable to mount filesystem: %+v", err) } return s, nil @@ -58,7 +58,7 @@ func NewSource(config SourceConfig) (Source, error) { // Mount filesystem err = s.Mount(config.FilesystemPath) if err != nil { - Logger.Fatalf("Unable to mount filesystem: %+v\n", err) + Logger.Fatalf("Unable to mount filesystem: %+v", err) } return s, nil diff --git a/pkg/source_juicefs.go b/pkg/source_juicefs.go index c5699b7..74452de 100644 --- a/pkg/source_juicefs.go +++ b/pkg/source_juicefs.go @@ -21,7 +21,7 @@ func NewJuiceFsSource(config JuiceFSConfig) (Source, error) { } func (s *JuiceFsSource) Mount(localPath string) error { - Logger.Infof("JuiceFS filesystem mounting to: '%s'\n", localPath) + Logger.Infof("JuiceFS filesystem mounting to: '%s'", localPath) cacheSize := strconv.FormatInt(s.config.CacheSize, 10) @@ -126,6 +126,6 @@ func (s *JuiceFsSource) Unmount(localPath string) error { return fmt.Errorf("error executing juicefs umount: %v, output: %s", err, string(output)) } - Logger.Infof("JuiceFS filesystem unmounted from: '%s'\n", localPath) + Logger.Infof("JuiceFS filesystem unmounted from: '%s'", localPath) return nil } diff --git a/pkg/source_mountpoint.go b/pkg/source_mountpoint.go index 36dc287..02428ff 100644 --- a/pkg/source_mountpoint.go +++ b/pkg/source_mountpoint.go @@ -48,7 +48,7 @@ func (s *MountPointSource) Mount(localPath string) error { } }() - Logger.Infof("Mountpoint filesystem is being mounted to: '%s'\n", localPath) + Logger.Infof("Mountpoint filesystem is being mounted to: '%s'", localPath) return nil } From d01cf8ca28b312f9c7bc09970b8f7a4123d491fd Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 10:03:34 -0500 Subject: [PATCH 04/34] refactor config --- e2e/throughput/main.go | 2 +- pkg/blobfs.go | 2 +- pkg/blobfs_node.go | 11 ++++++++--- pkg/blobfs_prefetch.go | 18 +++++++++++++++++- pkg/config.default.yaml | 5 +++-- pkg/types.go | 25 +++++++++++++++---------- 6 files changed, 45 insertions(+), 18 deletions(-) diff --git a/e2e/throughput/main.go b/e2e/throughput/main.go index e20e778..a4d7320 100644 --- a/e2e/throughput/main.go +++ b/e2e/throughput/main.go @@ -45,7 +45,7 @@ func main() { log.Fatalf("Unable to create client: %v", err) } - filePath := "e2e/testclient/testdata/test3.bin" + filePath := "e2e/throughput/testdata/test3.bin" b, err := os.ReadFile(filePath) if err != nil { log.Fatalf("Unable to read input file: %v\n", err) diff --git a/pkg/blobfs.go b/pkg/blobfs.go index c4c2f48..c72c459 100644 --- a/pkg/blobfs.go +++ b/pkg/blobfs.go @@ -170,7 +170,7 @@ func NewFileSystem(ctx context.Context, opts BlobFsSystemOpts) (*BlobFs, error) Metadata: metadata, } - if opts.Config.BlobFs.Prefetch { + if opts.Config.BlobFs.Prefetch.Enabled { bfs.PrefetchManager = NewPrefetchManager(ctx, opts.Config) bfs.PrefetchManager.Start() } diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index d15ed68..9ea3c87 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -129,7 +129,7 @@ func (n *FSNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (* return nil, syscall.ENOENT } - if n.filesystem.Config.BlobFs.Prefetch { + if n.filesystem.Config.BlobFs.Prefetch.Enabled { log.Printf("Prefetching file: %s", sourcePath) n.filesystem.PrefetchManager.GetPrefetchBuffer(hash) } @@ -173,8 +173,13 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int return fuse.ReadResultData(dest[:0]), fs.OK } - if n.filesystem.Config.BlobFs.Prefetch { - + log.Printf("Reading file: %s, offset: %v, length: %v", n.bfsNode.Path, off, len(dest)) + if n.filesystem.Config.BlobFs.Prefetch.Enabled { + buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash) + if buffer != nil { + log.Printf("Prefetch buffer found for file: %s", n.bfsNode.Path) + return fuse.ReadResultData(buffer.buffer), fs.OK + } } buffer, err := n.filesystem.Client.GetContent(n.bfsNode.Hash, off, int64(len(dest))) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index b7f6a94..1015a98 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -8,7 +8,7 @@ import ( const ( PrefetchEvictionInterval = 30 * time.Second - PrefetchIdleTTL = 60 * time.Second // e.g. remove buffer if no read in the past 60s + PrefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s PrefetchBufferSize = 0 // if 0, no specific limit, just store all ) @@ -40,6 +40,22 @@ func (pm *PrefetchManager) GetPrefetchBuffer(hash string) *PrefetchBuffer { } func (pm *PrefetchManager) evictIdleBuffers() { + for { + select { + case <-pm.ctx.Done(): + return + case <-time.After(PrefetchEvictionInterval): + pm.buffers.Range(func(key, value any) bool { + buffer := value.(*PrefetchBuffer) + + if time.Since(buffer.lastRead) > PrefetchIdleTTL { + pm.buffers.Delete(key) + } + + return true + }) + } + } } diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index fc6427b..6ae8524 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -12,8 +12,9 @@ discoveryMode: metadata directIO: false options: [] blobfs: - prefetch: false - enabled: false + prefetch: + enabled: false + idleTtlS: 60 mountPoint: /tmp/test maxBackgroundTasks: 512 maxReadAheadKB: 128 diff --git a/pkg/types.go b/pkg/types.go index 80bbb8e..cd759d5 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -90,16 +90,21 @@ type RedisConfig struct { } type BlobFsConfig struct { - Enabled bool `key:"enabled" json:"enabled"` - Prefetch bool `key:"prefetch" json:"prefetch"` - MountPoint string `key:"mountPoint" json:"mount_point"` - Sources []SourceConfig `key:"sources" json:"sources"` - MaxBackgroundTasks int `key:"maxBackgroundTasks" json:"max_background_tasks"` - MaxWriteKB int `key:"maxWriteKB" json:"max_write_kb"` - MaxReadAheadKB int `key:"maxReadAheadKB" json:"max_read_ahead_kb"` - DirectMount bool `key:"directMount" json:"direct_mount"` - DirectIO bool `key:"directIO" json:"direct_io"` - Options []string `key:"options" json:"options"` + Enabled bool `key:"enabled" json:"enabled"` + Prefetch BlobFsPrefetchConfig `key:"prefetch" json:"prefetch"` + MountPoint string `key:"mountPoint" json:"mount_point"` + Sources []SourceConfig `key:"sources" json:"sources"` + MaxBackgroundTasks int `key:"maxBackgroundTasks" json:"max_background_tasks"` + MaxWriteKB int `key:"maxWriteKB" json:"max_write_kb"` + MaxReadAheadKB int `key:"maxReadAheadKB" json:"max_read_ahead_kb"` + DirectMount bool `key:"directMount" json:"direct_mount"` + DirectIO bool `key:"directIO" json:"direct_io"` + Options []string `key:"options" json:"options"` +} + +type BlobFsPrefetchConfig struct { + Enabled bool `key:"enabled" json:"enabled"` + IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` } type SourceConfig struct { From 139f49e4cfa5547b2b97535e85ebc67612830313 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 11:20:41 -0500 Subject: [PATCH 05/34] wip --- pkg/blobfs_node.go | 18 +++++++----------- pkg/blobfs_prefetch.go | 34 +++++++++++++++++++++++++++++++--- pkg/config.default.yaml | 2 ++ pkg/types.go | 6 ++++-- 4 files changed, 44 insertions(+), 16 deletions(-) diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 9ea3c87..991319f 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -119,7 +119,7 @@ func (n *FSNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (* sourcePath := strings.ReplaceAll(fullPath, "%", "/") n.log("Storing content from source with path: %s", sourcePath) - hash, err := n.filesystem.Client.StoreContentFromSource(sourcePath, 0) + _, err := n.filesystem.Client.StoreContentFromSource(sourcePath, 0) if err != nil { return nil, syscall.ENOENT } @@ -129,11 +129,6 @@ func (n *FSNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (* return nil, syscall.ENOENT } - if n.filesystem.Config.BlobFs.Prefetch.Enabled { - log.Printf("Prefetching file: %s", sourcePath) - n.filesystem.PrefetchManager.GetPrefetchBuffer(hash) - } - out.Attr = *attr return node, fs.OK } @@ -173,12 +168,13 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int return fuse.ReadResultData(dest[:0]), fs.OK } - log.Printf("Reading file: %s, offset: %v, length: %v", n.bfsNode.Path, off, len(dest)) - if n.filesystem.Config.BlobFs.Prefetch.Enabled { - buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash) + // If pre-fetch is enabled and the file is large enough, try to prefetch the file using streaming + if n.filesystem.Config.BlobFs.Prefetch.Enabled && n.bfsNode.Attr.Size >= n.filesystem.Config.BlobFs.Prefetch.MinSizeBytes { + log.Printf("Reading file: %s, offset: %v, length: %v", n.bfsNode.Path, off, len(dest)) + + buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash, n.bfsNode.Attr.Size) if buffer != nil { - log.Printf("Prefetch buffer found for file: %s", n.bfsNode.Path) - return fuse.ReadResultData(buffer.buffer), fs.OK + return fuse.ReadResultData(buffer.GetRange(uint64(off), uint64(len(dest)))), fs.OK } } diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 1015a98..abc2995 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -31,12 +31,14 @@ func (pm *PrefetchManager) Start() { } // GetPrefetchBuffer returns an existing prefetch buffer if it exists, or nil. -func (pm *PrefetchManager) GetPrefetchBuffer(hash string) *PrefetchBuffer { +func (pm *PrefetchManager) GetPrefetchBuffer(hash string, fileSize uint64) *PrefetchBuffer { if val, ok := pm.buffers.Load(hash); ok { return val.(*PrefetchBuffer) } - return nil + newBuffer := NewPrefetchBuffer(hash, fileSize, pm.config.BlobFs.Prefetch.MaxBufferSizeBytes) + pm.buffers.Store(hash, newBuffer) + return newBuffer } func (pm *PrefetchManager) evictIdleBuffers() { @@ -48,7 +50,7 @@ func (pm *PrefetchManager) evictIdleBuffers() { pm.buffers.Range(func(key, value any) bool { buffer := value.(*PrefetchBuffer) - if time.Since(buffer.lastRead) > PrefetchIdleTTL { + if buffer.IsStale() { pm.buffers.Delete(key) } @@ -63,4 +65,30 @@ type PrefetchBuffer struct { hash string buffer []byte lastRead time.Time + fileSize uint64 +} + +func NewPrefetchBuffer(hash string, fileSize uint64, bufferSize uint64) *PrefetchBuffer { + return &PrefetchBuffer{ + hash: hash, + lastRead: time.Now(), + buffer: make([]byte, bufferSize), + fileSize: fileSize, + } +} + +func (pb *PrefetchBuffer) IsStale() bool { + return time.Since(pb.lastRead) > PrefetchIdleTTL +} + +func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { + if offset+length > uint64(len(pb.buffer)) { + return nil + } + + go func() { + pb.lastRead = time.Now() + }() + + return pb.buffer[offset : offset+length] } diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index 6ae8524..1b297f4 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -15,6 +15,8 @@ blobfs: prefetch: enabled: false idleTtlS: 60 + minSizeBytes: 1048576 # 1MB + maxBufferSizeBytes: 134217728 # 128MB mountPoint: /tmp/test maxBackgroundTasks: 512 maxReadAheadKB: 128 diff --git a/pkg/types.go b/pkg/types.go index cd759d5..a18944d 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -103,8 +103,10 @@ type BlobFsConfig struct { } type BlobFsPrefetchConfig struct { - Enabled bool `key:"enabled" json:"enabled"` - IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` + Enabled bool `key:"enabled" json:"enabled"` + IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` + MinSizeBytes uint64 `key:"minSizeBytes" json:"min_size_bytes"` + MaxBufferSizeBytes uint64 `key:"maxBufferSizeBytes" json:"max_buffer_size_bytes"` } type SourceConfig struct { From 1bb361612b3e18fea7ef26e948fd013b693a02ae Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 12:32:03 -0500 Subject: [PATCH 06/34] somewhat working prefetch --- pkg/blobfs.go | 2 +- pkg/blobfs_prefetch.go | 148 +++++++++++++++++++++++++++++++++++------ pkg/server.go | 2 +- 3 files changed, 130 insertions(+), 22 deletions(-) diff --git a/pkg/blobfs.go b/pkg/blobfs.go index c72c459..b135c55 100644 --- a/pkg/blobfs.go +++ b/pkg/blobfs.go @@ -171,7 +171,7 @@ func NewFileSystem(ctx context.Context, opts BlobFsSystemOpts) (*BlobFs, error) } if opts.Config.BlobFs.Prefetch.Enabled { - bfs.PrefetchManager = NewPrefetchManager(ctx, opts.Config) + bfs.PrefetchManager = NewPrefetchManager(ctx, opts.Config, opts.Client) bfs.PrefetchManager.Start() } diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index abc2995..686524c 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -2,6 +2,7 @@ package blobcache import ( "context" + "log" "sync" "time" ) @@ -16,13 +17,15 @@ type PrefetchManager struct { ctx context.Context config BlobCacheConfig buffers sync.Map + client *BlobCacheClient } -func NewPrefetchManager(ctx context.Context, config BlobCacheConfig) *PrefetchManager { +func NewPrefetchManager(ctx context.Context, config BlobCacheConfig, client *BlobCacheClient) *PrefetchManager { return &PrefetchManager{ ctx: ctx, config: config, buffers: sync.Map{}, + client: client, } } @@ -30,13 +33,22 @@ func (pm *PrefetchManager) Start() { go pm.evictIdleBuffers() } -// GetPrefetchBuffer returns an existing prefetch buffer if it exists, or nil. +// GetPrefetchBuffer returns an existing prefetch buffer if it exists, or nil func (pm *PrefetchManager) GetPrefetchBuffer(hash string, fileSize uint64) *PrefetchBuffer { if val, ok := pm.buffers.Load(hash); ok { return val.(*PrefetchBuffer) } - newBuffer := NewPrefetchBuffer(hash, fileSize, pm.config.BlobFs.Prefetch.MaxBufferSizeBytes) + ctx, cancel := context.WithCancel(pm.ctx) + newBuffer := NewPrefetchBuffer(PrefetchOpts{ + Ctx: ctx, + CancelFunc: cancel, + Hash: hash, + FileSize: fileSize, + BufferSize: pm.config.BlobFs.Prefetch.MaxBufferSizeBytes, + Client: pm.client, + }) + pm.buffers.Store(hash, newBuffer) return newBuffer } @@ -51,6 +63,7 @@ func (pm *PrefetchManager) evictIdleBuffers() { buffer := value.(*PrefetchBuffer) if buffer.IsStale() { + buffer.Stop() pm.buffers.Delete(key) } @@ -62,33 +75,128 @@ func (pm *PrefetchManager) evictIdleBuffers() { } type PrefetchBuffer struct { - hash string - buffer []byte - lastRead time.Time - fileSize uint64 + ctx context.Context + cancelFunc context.CancelFunc + hash string + buffers map[uint64]*internalBuffer + lastRead time.Time + fileSize uint64 + client *BlobCacheClient + mu sync.Mutex + cond *sync.Cond + bufferSize uint64 +} + +type internalBuffer struct { + data []byte + readLength uint64 + fetching bool } -func NewPrefetchBuffer(hash string, fileSize uint64, bufferSize uint64) *PrefetchBuffer { - return &PrefetchBuffer{ - hash: hash, - lastRead: time.Now(), - buffer: make([]byte, bufferSize), - fileSize: fileSize, +type PrefetchOpts struct { + Ctx context.Context + CancelFunc context.CancelFunc + Hash string + FileSize uint64 + BufferSize uint64 + Offset uint64 + Client *BlobCacheClient +} + +func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { + pb := &PrefetchBuffer{ + ctx: opts.Ctx, + cancelFunc: opts.CancelFunc, + hash: opts.Hash, + lastRead: time.Now(), + buffers: make(map[uint64]*internalBuffer), + fileSize: opts.FileSize, + client: opts.Client, + bufferSize: opts.BufferSize, } + pb.cond = sync.NewCond(&pb.mu) + return pb } func (pb *PrefetchBuffer) IsStale() bool { return time.Since(pb.lastRead) > PrefetchIdleTTL } -func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { - if offset+length > uint64(len(pb.buffer)) { - return nil +func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { + contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) + if err != nil { + // TODO: do something with this error + return + } + + defer log.Printf("Prefetch buffer fetched for: %s at offset %d", pb.hash, offset) + + bufferIndex := offset / bufferSize + + // Initialize internal buffer for this chunk of the content + pb.mu.Lock() + state, exists := pb.buffers[bufferIndex] + if !exists { + state = &internalBuffer{ + data: make([]byte, 0, bufferSize), + fetching: true, + readLength: 0, + } + pb.buffers[bufferIndex] = state + } + pb.mu.Unlock() + + for { + select { + case <-pb.ctx.Done(): + return + case chunk, ok := <-contentChan: + if !ok { + pb.mu.Lock() + state.fetching = false + state.readLength = uint64(len(state.data)) + pb.cond.Broadcast() + pb.mu.Unlock() + return + } + + pb.mu.Lock() + state.data = append(state.data, chunk...) + state.readLength = uint64(len(state.data)) + pb.cond.Broadcast() + pb.mu.Unlock() + } } +} - go func() { - pb.lastRead = time.Now() - }() +func (pb *PrefetchBuffer) Stop() { + pb.cancelFunc() +} - return pb.buffer[offset : offset+length] +func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { + pb.mu.Lock() + defer pb.mu.Unlock() + + bufferSize := pb.bufferSize + bufferIndex := offset / bufferSize + bufferOffset := offset % bufferSize + + for { + state, exists := pb.buffers[bufferIndex] + + // Initiate a fetch operation if the buffer does not exist + if !exists { + go pb.fetch(bufferIndex*bufferSize, bufferSize) + } else if state.readLength >= bufferOffset+length { + go func() { + pb.lastRead = time.Now() + }() + + // Calculate the relative offset within the buffer + relativeOffset := offset - (bufferIndex * bufferSize) + return state.data[relativeOffset : relativeOffset+length] + } + + pb.cond.Wait() // Wait for more data to be available + } } diff --git a/pkg/server.go b/pkg/server.go index d93cc46..6f34ea7 100644 --- a/pkg/server.go +++ b/pkg/server.go @@ -222,7 +222,7 @@ func (cs *CacheService) GetContentStream(req *proto.GetContentRequest, stream pr const chunkSize = getContentStreamChunkSize offset := req.Offset remainingLength := req.Length - Logger.Infof("GetContentStream[ACK] - [%s] - %d bytes", req.Hash, remainingLength) + Logger.Infof("GetContentStream[ACK] - [%s] - offset=%d, length=%d, %d bytes", req.Hash, offset, req.Length, remainingLength) for remainingLength > 0 { currentChunkSize := chunkSize From 1bbe9177fcf5d8eb67d8338a2d1e8568c59d6914 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 14:00:20 -0500 Subject: [PATCH 07/34] fetch fetch fetch --- pkg/blobfs_node.go | 4 +--- pkg/blobfs_prefetch.go | 48 +++++++++++++++++++++++++----------------- 2 files changed, 30 insertions(+), 22 deletions(-) diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 991319f..34c5a69 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -3,7 +3,6 @@ package blobcache import ( "context" "fmt" - "log" "path" "strings" "syscall" @@ -170,8 +169,7 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int // If pre-fetch is enabled and the file is large enough, try to prefetch the file using streaming if n.filesystem.Config.BlobFs.Prefetch.Enabled && n.bfsNode.Attr.Size >= n.filesystem.Config.BlobFs.Prefetch.MinSizeBytes { - log.Printf("Reading file: %s, offset: %v, length: %v", n.bfsNode.Path, off, len(dest)) - + // log.Printf("Reading file: %s, offset: %v, length: %v", n.bfsNode.Path, off, len(dest)) buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash, n.bfsNode.Attr.Size) if buffer != nil { return fuse.ReadResultData(buffer.GetRange(uint64(off), uint64(len(dest)))), fs.OK diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 686524c..84a3ef7 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -2,7 +2,6 @@ package blobcache import ( "context" - "log" "sync" "time" ) @@ -11,6 +10,7 @@ const ( PrefetchEvictionInterval = 30 * time.Second PrefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s PrefetchBufferSize = 0 // if 0, no specific limit, just store all + PreemptiveFetchThreshold = 32 * 1024 * 1024 // 32MB ) type PrefetchManager struct { @@ -123,27 +123,30 @@ func (pb *PrefetchBuffer) IsStale() bool { } func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { + bufferIndex := offset / bufferSize + + // Initialize internal buffer for this chunk of the content + pb.mu.Lock() + _, exists := pb.buffers[bufferIndex] + if exists { + pb.mu.Unlock() + return + } + + state := &internalBuffer{ + data: make([]byte, 0, bufferSize), + fetching: true, + readLength: 0, + } + pb.buffers[bufferIndex] = state + contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) if err != nil { + pb.mu.Unlock() // TODO: do something with this error return } - defer log.Printf("Prefetch buffer fetched for: %s at offset %d", pb.hash, offset) - - bufferIndex := offset / bufferSize - - // Initialize internal buffer for this chunk of the content - pb.mu.Lock() - state, exists := pb.buffers[bufferIndex] - if !exists { - state = &internalBuffer{ - data: make([]byte, 0, bufferSize), - fetching: true, - readLength: 0, - } - pb.buffers[bufferIndex] = state - } pb.mu.Unlock() for { @@ -188,12 +191,19 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { if !exists { go pb.fetch(bufferIndex*bufferSize, bufferSize) } else if state.readLength >= bufferOffset+length { - go func() { - pb.lastRead = time.Now() - }() + pb.lastRead = time.Now() // Calculate the relative offset within the buffer relativeOffset := offset - (bufferIndex * bufferSize) + + // Pre-emptively start fetching the next buffer if within the threshold + if state.readLength-relativeOffset <= PreemptiveFetchThreshold { + nextBufferIndex := bufferIndex + 1 + if _, nextExists := pb.buffers[nextBufferIndex]; !nextExists { + go pb.fetch(nextBufferIndex*bufferSize, bufferSize) + } + } + return state.data[relativeOffset : relativeOffset+length] } From d6081dd3f9517d70b73a4843d126e42abfc8005d Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 14:06:32 -0500 Subject: [PATCH 08/34] embed the lock inside the for loop --- pkg/blobfs_prefetch.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 84a3ef7..f96dbf0 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -177,19 +177,20 @@ func (pb *PrefetchBuffer) Stop() { } func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { - pb.mu.Lock() - defer pb.mu.Unlock() - bufferSize := pb.bufferSize bufferIndex := offset / bufferSize bufferOffset := offset % bufferSize - for { + tryGetDataRange := func() ([]byte, bool) { + pb.mu.Lock() + defer pb.mu.Unlock() + state, exists := pb.buffers[bufferIndex] // Initiate a fetch operation if the buffer does not exist if !exists { go pb.fetch(bufferIndex*bufferSize, bufferSize) + return nil, false } else if state.readLength >= bufferOffset+length { pb.lastRead = time.Now() @@ -204,9 +205,16 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { } } - return state.data[relativeOffset : relativeOffset+length] + return state.data[relativeOffset : relativeOffset+length], true } pb.cond.Wait() // Wait for more data to be available + return nil, false + } + + for { + if data, ready := tryGetDataRange(); ready { + return data + } } } From c2f8904097c2ed718791d464169b819ca13369c3 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 14:10:03 -0500 Subject: [PATCH 09/34] wip --- pkg/blobfs_node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 34c5a69..aada834 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -169,7 +169,6 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int // If pre-fetch is enabled and the file is large enough, try to prefetch the file using streaming if n.filesystem.Config.BlobFs.Prefetch.Enabled && n.bfsNode.Attr.Size >= n.filesystem.Config.BlobFs.Prefetch.MinSizeBytes { - // log.Printf("Reading file: %s, offset: %v, length: %v", n.bfsNode.Path, off, len(dest)) buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash, n.bfsNode.Attr.Size) if buffer != nil { return fuse.ReadResultData(buffer.GetRange(uint64(off), uint64(len(dest)))), fs.OK From 5cc303a5b49952fcc7b7d81e9a4c90390c7710af Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:42:22 -0500 Subject: [PATCH 10/34] fix bugs, verify checksums --- pkg/blobfs_prefetch.go | 94 ++++++++++++++++++++++++------------------ pkg/server.go | 6 ++- 2 files changed, 58 insertions(+), 42 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index f96dbf0..2c202c4 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -10,7 +10,7 @@ const ( PrefetchEvictionInterval = 30 * time.Second PrefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s PrefetchBufferSize = 0 // if 0, no specific limit, just store all - PreemptiveFetchThreshold = 32 * 1024 * 1024 // 32MB + PreemptiveFetchThreshold = 16 * 1024 * 1024 // 16MB ) type PrefetchManager struct { @@ -78,7 +78,7 @@ type PrefetchBuffer struct { ctx context.Context cancelFunc context.CancelFunc hash string - buffers map[uint64]*internalBuffer + segments map[uint64]*segment lastRead time.Time fileSize uint64 client *BlobCacheClient @@ -87,10 +87,9 @@ type PrefetchBuffer struct { bufferSize uint64 } -type internalBuffer struct { +type segment struct { data []byte readLength uint64 - fetching bool } type PrefetchOpts struct { @@ -109,7 +108,7 @@ func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { cancelFunc: opts.CancelFunc, hash: opts.Hash, lastRead: time.Now(), - buffers: make(map[uint64]*internalBuffer), + segments: make(map[uint64]*segment), fileSize: opts.FileSize, client: opts.Client, bufferSize: opts.BufferSize, @@ -127,23 +126,22 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { // Initialize internal buffer for this chunk of the content pb.mu.Lock() - _, exists := pb.buffers[bufferIndex] + _, exists := pb.segments[bufferIndex] if exists { pb.mu.Unlock() return } - state := &internalBuffer{ + s := &segment{ data: make([]byte, 0, bufferSize), - fetching: true, readLength: 0, } - pb.buffers[bufferIndex] = state + pb.segments[bufferIndex] = s contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) if err != nil { pb.mu.Unlock() - // TODO: do something with this error + // TODO: handle this error appropriately return } @@ -156,16 +154,14 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { case chunk, ok := <-contentChan: if !ok { pb.mu.Lock() - state.fetching = false - state.readLength = uint64(len(state.data)) pb.cond.Broadcast() pb.mu.Unlock() return } pb.mu.Lock() - state.data = append(state.data, chunk...) - state.readLength = uint64(len(state.data)) + s.data = append(s.data, chunk...) + s.readLength += uint64(len(chunk)) pb.cond.Broadcast() pb.mu.Unlock() } @@ -181,40 +177,56 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { bufferIndex := offset / bufferSize bufferOffset := offset % bufferSize - tryGetDataRange := func() ([]byte, bool) { - pb.mu.Lock() - defer pb.mu.Unlock() + var result []byte + + for length > 0 { + data, ready := pb.tryGetDataRange(bufferIndex, bufferOffset, offset, length) + if ready { + result = append(result, data...) + dataLen := uint64(len(data)) + length -= dataLen + offset += dataLen + bufferIndex = offset / bufferSize + bufferOffset = offset % bufferSize + } else { + // If data is not ready, wait for more data to be available + pb.mu.Lock() + pb.cond.Wait() + pb.mu.Unlock() + } + } - state, exists := pb.buffers[bufferIndex] + return result +} - // Initiate a fetch operation if the buffer does not exist - if !exists { - go pb.fetch(bufferIndex*bufferSize, bufferSize) - return nil, false - } else if state.readLength >= bufferOffset+length { - pb.lastRead = time.Now() +func (pb *PrefetchBuffer) tryGetDataRange(bufferIndex, bufferOffset, offset, length uint64) ([]byte, bool) { + pb.mu.Lock() + defer pb.mu.Unlock() - // Calculate the relative offset within the buffer - relativeOffset := offset - (bufferIndex * bufferSize) + segment, exists := pb.segments[bufferIndex] - // Pre-emptively start fetching the next buffer if within the threshold - if state.readLength-relativeOffset <= PreemptiveFetchThreshold { - nextBufferIndex := bufferIndex + 1 - if _, nextExists := pb.buffers[nextBufferIndex]; !nextExists { - go pb.fetch(nextBufferIndex*bufferSize, bufferSize) - } + // Initiate a fetch operation if the buffer does not exist + if !exists { + go pb.fetch(bufferIndex*pb.bufferSize, pb.bufferSize) + return nil, false + } else if segment.readLength > bufferOffset { + pb.lastRead = time.Now() + + // Calculate the relative offset within the buffer + relativeOffset := offset - (bufferIndex * pb.bufferSize) + availableLength := segment.readLength - relativeOffset + readLength := min(int64(length), int64(availableLength)) + + // Pre-emptively start fetching the next buffer if within the threshold + if segment.readLength-relativeOffset <= PreemptiveFetchThreshold { + nextBufferIndex := bufferIndex + 1 + if _, nextExists := pb.segments[nextBufferIndex]; !nextExists { + go pb.fetch(nextBufferIndex*pb.bufferSize, pb.bufferSize) } - - return state.data[relativeOffset : relativeOffset+length], true } - pb.cond.Wait() // Wait for more data to be available - return nil, false + return segment.data[relativeOffset : int64(relativeOffset)+int64(readLength)], true } - for { - if data, ready := tryGetDataRange(); ready { - return data - } - } + return nil, false } diff --git a/pkg/server.go b/pkg/server.go index 6f34ea7..7cd6170 100644 --- a/pkg/server.go +++ b/pkg/server.go @@ -28,7 +28,7 @@ const ( writeBufferSizeBytes int = 128 * 1024 getContentBufferPoolSize int = 128 getContentBufferSize int64 = 256 * 1024 - getContentStreamChunkSize int64 = 32 * 1024 * 1024 // 32MB + getContentStreamChunkSize int64 = 16 * 1024 * 1024 // 16MB ) type CacheServiceOpts struct { @@ -237,6 +237,10 @@ func (cs *CacheService) GetContentStream(req *proto.GetContentRequest, stream pr return status.Errorf(codes.NotFound, "Content not found: %v", err) } + if n == 0 { + break + } + Logger.Infof("GetContentStream[TX] - [%s] - %d bytes", req.Hash, n) if err := stream.Send(&proto.GetContentResponse{ Ok: true, From fe8b65d4ae599e54953c2de67997191964218650 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:46:10 -0500 Subject: [PATCH 11/34] rename vars --- pkg/blobfs_prefetch.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 2c202c4..eeb6e4a 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -7,10 +7,9 @@ import ( ) const ( - PrefetchEvictionInterval = 30 * time.Second - PrefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s - PrefetchBufferSize = 0 // if 0, no specific limit, just store all - PreemptiveFetchThreshold = 16 * 1024 * 1024 // 16MB + prefetchEvictionInterval = 30 * time.Second + prefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s + preemptiveFetchThreshold = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) type PrefetchManager struct { @@ -58,7 +57,7 @@ func (pm *PrefetchManager) evictIdleBuffers() { select { case <-pm.ctx.Done(): return - case <-time.After(PrefetchEvictionInterval): + case <-time.After(prefetchEvictionInterval): pm.buffers.Range(func(key, value any) bool { buffer := value.(*PrefetchBuffer) @@ -118,7 +117,7 @@ func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { } func (pb *PrefetchBuffer) IsStale() bool { - return time.Since(pb.lastRead) > PrefetchIdleTTL + return time.Since(pb.lastRead) > prefetchIdleTTL } func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { @@ -218,7 +217,7 @@ func (pb *PrefetchBuffer) tryGetDataRange(bufferIndex, bufferOffset, offset, len readLength := min(int64(length), int64(availableLength)) // Pre-emptively start fetching the next buffer if within the threshold - if segment.readLength-relativeOffset <= PreemptiveFetchThreshold { + if segment.readLength-relativeOffset <= preemptiveFetchThreshold { nextBufferIndex := bufferIndex + 1 if _, nextExists := pb.segments[nextBufferIndex]; !nextExists { go pb.fetch(nextBufferIndex*pb.bufferSize, pb.bufferSize) From 237a30202588b87b176d6cde1271e7df27947a53 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:48:37 -0500 Subject: [PATCH 12/34] more cleanup --- pkg/blobfs_prefetch.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index eeb6e4a..dfb6f28 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -177,9 +177,8 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { bufferOffset := offset % bufferSize var result []byte - for length > 0 { - data, ready := pb.tryGetDataRange(bufferIndex, bufferOffset, offset, length) + data, ready := pb.tryGetRange(bufferIndex, bufferOffset, offset, length) if ready { result = append(result, data...) dataLen := uint64(len(data)) @@ -198,7 +197,7 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { return result } -func (pb *PrefetchBuffer) tryGetDataRange(bufferIndex, bufferOffset, offset, length uint64) ([]byte, bool) { +func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length uint64) ([]byte, bool) { pb.mu.Lock() defer pb.mu.Unlock() From f6a629a0e4a87ebf23fa300f394ea5deb7a4acef Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:49:13 -0500 Subject: [PATCH 13/34] handle error --- pkg/blobfs_prefetch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index dfb6f28..14156dd 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -139,8 +139,8 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) if err != nil { + delete(pb.segments, bufferIndex) pb.mu.Unlock() - // TODO: handle this error appropriately return } From 4535bb398ef317d24ce3fa728a9febdd628aefda Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:51:33 -0500 Subject: [PATCH 14/34] rename configs --- pkg/blobfs_node.go | 2 +- pkg/blobfs_prefetch.go | 70 ++++++++++++++++++++--------------------- pkg/config.default.yaml | 4 +-- pkg/types.go | 8 ++--- 4 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index aada834..49c464d 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -168,7 +168,7 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int } // If pre-fetch is enabled and the file is large enough, try to prefetch the file using streaming - if n.filesystem.Config.BlobFs.Prefetch.Enabled && n.bfsNode.Attr.Size >= n.filesystem.Config.BlobFs.Prefetch.MinSizeBytes { + if n.filesystem.Config.BlobFs.Prefetch.Enabled && n.bfsNode.Attr.Size >= n.filesystem.Config.BlobFs.Prefetch.MinFileSizeBytes { buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash, n.bfsNode.Attr.Size) if buffer != nil { return fuse.ReadResultData(buffer.GetRange(uint64(off), uint64(len(dest)))), fs.OK diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 14156dd..fcf5ba5 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -40,12 +40,12 @@ func (pm *PrefetchManager) GetPrefetchBuffer(hash string, fileSize uint64) *Pref ctx, cancel := context.WithCancel(pm.ctx) newBuffer := NewPrefetchBuffer(PrefetchOpts{ - Ctx: ctx, - CancelFunc: cancel, - Hash: hash, - FileSize: fileSize, - BufferSize: pm.config.BlobFs.Prefetch.MaxBufferSizeBytes, - Client: pm.client, + Ctx: ctx, + CancelFunc: cancel, + Hash: hash, + FileSize: fileSize, + SegmentSize: pm.config.BlobFs.Prefetch.SegmentSizeBytes, + Client: pm.client, }) pm.buffers.Store(hash, newBuffer) @@ -74,16 +74,16 @@ func (pm *PrefetchManager) evictIdleBuffers() { } type PrefetchBuffer struct { - ctx context.Context - cancelFunc context.CancelFunc - hash string - segments map[uint64]*segment - lastRead time.Time - fileSize uint64 - client *BlobCacheClient - mu sync.Mutex - cond *sync.Cond - bufferSize uint64 + ctx context.Context + cancelFunc context.CancelFunc + hash string + segments map[uint64]*segment + segmentSize uint64 + lastRead time.Time + fileSize uint64 + client *BlobCacheClient + mu sync.Mutex + cond *sync.Cond } type segment struct { @@ -92,25 +92,25 @@ type segment struct { } type PrefetchOpts struct { - Ctx context.Context - CancelFunc context.CancelFunc - Hash string - FileSize uint64 - BufferSize uint64 - Offset uint64 - Client *BlobCacheClient + Ctx context.Context + CancelFunc context.CancelFunc + Hash string + FileSize uint64 + SegmentSize uint64 + Offset uint64 + Client *BlobCacheClient } func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { pb := &PrefetchBuffer{ - ctx: opts.Ctx, - cancelFunc: opts.CancelFunc, - hash: opts.Hash, - lastRead: time.Now(), - segments: make(map[uint64]*segment), - fileSize: opts.FileSize, - client: opts.Client, - bufferSize: opts.BufferSize, + ctx: opts.Ctx, + cancelFunc: opts.CancelFunc, + hash: opts.Hash, + lastRead: time.Now(), + segments: make(map[uint64]*segment), + fileSize: opts.FileSize, + client: opts.Client, + segmentSize: opts.SegmentSize, } pb.cond = sync.NewCond(&pb.mu) return pb @@ -172,7 +172,7 @@ func (pb *PrefetchBuffer) Stop() { } func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { - bufferSize := pb.bufferSize + bufferSize := pb.segmentSize bufferIndex := offset / bufferSize bufferOffset := offset % bufferSize @@ -205,13 +205,13 @@ func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length // Initiate a fetch operation if the buffer does not exist if !exists { - go pb.fetch(bufferIndex*pb.bufferSize, pb.bufferSize) + go pb.fetch(bufferIndex*pb.segmentSize, pb.segmentSize) return nil, false } else if segment.readLength > bufferOffset { pb.lastRead = time.Now() // Calculate the relative offset within the buffer - relativeOffset := offset - (bufferIndex * pb.bufferSize) + relativeOffset := offset - (bufferIndex * pb.segmentSize) availableLength := segment.readLength - relativeOffset readLength := min(int64(length), int64(availableLength)) @@ -219,7 +219,7 @@ func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length if segment.readLength-relativeOffset <= preemptiveFetchThreshold { nextBufferIndex := bufferIndex + 1 if _, nextExists := pb.segments[nextBufferIndex]; !nextExists { - go pb.fetch(nextBufferIndex*pb.bufferSize, pb.bufferSize) + go pb.fetch(nextBufferIndex*pb.segmentSize, pb.segmentSize) } } diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index 1b297f4..a4196e9 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -15,8 +15,8 @@ blobfs: prefetch: enabled: false idleTtlS: 60 - minSizeBytes: 1048576 # 1MB - maxBufferSizeBytes: 134217728 # 128MB + minFileSizeBytes: 1048576 # 1MB + segmentSizeBytes: 134217728 # 128MB mountPoint: /tmp/test maxBackgroundTasks: 512 maxReadAheadKB: 128 diff --git a/pkg/types.go b/pkg/types.go index a18944d..1ad8f66 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -103,10 +103,10 @@ type BlobFsConfig struct { } type BlobFsPrefetchConfig struct { - Enabled bool `key:"enabled" json:"enabled"` - IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` - MinSizeBytes uint64 `key:"minSizeBytes" json:"min_size_bytes"` - MaxBufferSizeBytes uint64 `key:"maxBufferSizeBytes" json:"max_buffer_size_bytes"` + Enabled bool `key:"enabled" json:"enabled"` + MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` + IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` + SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` } type SourceConfig struct { From 792deee960e58cd7cd3fc90a6636fc8a3b5085d6 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:54:34 -0500 Subject: [PATCH 15/34] more renames --- pkg/blobfs_prefetch.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index fcf5ba5..948d051 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -7,9 +7,9 @@ import ( ) const ( - prefetchEvictionInterval = 30 * time.Second - prefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s - preemptiveFetchThreshold = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it + prefetchEvictionInterval = 30 * time.Second + prefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s + preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) type PrefetchManager struct { @@ -216,7 +216,7 @@ func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length readLength := min(int64(length), int64(availableLength)) // Pre-emptively start fetching the next buffer if within the threshold - if segment.readLength-relativeOffset <= preemptiveFetchThreshold { + if segment.readLength-relativeOffset <= preemptiveFetchThresholdBytes { nextBufferIndex := bufferIndex + 1 if _, nextExists := pb.segments[nextBufferIndex]; !nextExists { go pb.fetch(nextBufferIndex*pb.segmentSize, pb.segmentSize) From 366dca5f4cacc64c682b638ef2def791e427bb46 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 21:20:31 -0500 Subject: [PATCH 16/34] clear segments --- pkg/blobfs_prefetch.go | 52 +++++++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 948d051..5f6dc33 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -8,7 +8,7 @@ import ( const ( prefetchEvictionInterval = 30 * time.Second - prefetchIdleTTL = 60 * time.Second // remove stale buffers if no read in the past 60s + prefetchIdleTTL = 10 * time.Second // remove stale buffers if no read in the past 60s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) @@ -61,8 +61,12 @@ func (pm *PrefetchManager) evictIdleBuffers() { pm.buffers.Range(func(key, value any) bool { buffer := value.(*PrefetchBuffer) - if buffer.IsStale() { - buffer.Stop() + // If no reads have happened in any segments in the buffer + // stop any fetch operations and clear the buffer so it can + // be garbage collected + unused := buffer.evictIdle() + if unused { + buffer.Clear() pm.buffers.Delete(key) } @@ -87,8 +91,10 @@ type PrefetchBuffer struct { } type segment struct { + index uint64 data []byte readLength uint64 + lastRead time.Time } type PrefetchOpts struct { @@ -107,19 +113,15 @@ func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { cancelFunc: opts.CancelFunc, hash: opts.Hash, lastRead: time.Now(), - segments: make(map[uint64]*segment), fileSize: opts.FileSize, client: opts.Client, + segments: make(map[uint64]*segment), segmentSize: opts.SegmentSize, } pb.cond = sync.NewCond(&pb.mu) return pb } -func (pb *PrefetchBuffer) IsStale() bool { - return time.Since(pb.lastRead) > prefetchIdleTTL -} - func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { bufferIndex := offset / bufferSize @@ -132,6 +134,7 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { } s := &segment{ + index: bufferIndex, data: make([]byte, 0, bufferSize), readLength: 0, } @@ -167,8 +170,35 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { } } -func (pb *PrefetchBuffer) Stop() { - pb.cancelFunc() +func (pb *PrefetchBuffer) evictIdle() bool { + unused := true + var indicesToDelete []uint64 + + pb.mu.Lock() + for index, segment := range pb.segments { + if time.Since(segment.lastRead) > prefetchIdleTTL { + indicesToDelete = append(indicesToDelete, index) + } else { + unused = false + } + } + pb.mu.Unlock() + + for _, index := range indicesToDelete { + pb.mu.Lock() + delete(pb.segments, index) + pb.mu.Unlock() + } + + return unused +} + +func (pb *PrefetchBuffer) Clear() { + pb.cancelFunc() // Stop any fetch operations + + pb.mu.Lock() + pb.segments = make(map[uint64]*segment) // Reinitialize the map to clear all entries + pb.mu.Unlock() } func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { @@ -208,7 +238,7 @@ func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length go pb.fetch(bufferIndex*pb.segmentSize, pb.segmentSize) return nil, false } else if segment.readLength > bufferOffset { - pb.lastRead = time.Now() + segment.lastRead = time.Now() // Calculate the relative offset within the buffer relativeOffset := offset - (bufferIndex * pb.segmentSize) From 25dd64fb3297593148bc0c4a0e2b82cc8eb7e0e8 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 21:21:04 -0500 Subject: [PATCH 17/34] lower eviction interval --- pkg/blobfs_prefetch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 5f6dc33..3661be2 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -7,7 +7,7 @@ import ( ) const ( - prefetchEvictionInterval = 30 * time.Second + prefetchEvictionInterval = 10 * time.Second prefetchIdleTTL = 10 * time.Second // remove stale buffers if no read in the past 60s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) From 4d7fb6002db83ecbeb9338f3dde90656cc1b7ed6 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 21:41:28 -0500 Subject: [PATCH 18/34] fix lock --- pkg/blobfs_prefetch.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 3661be2..071a41e 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -187,6 +187,7 @@ func (pb *PrefetchBuffer) evictIdle() bool { for _, index := range indicesToDelete { pb.mu.Lock() delete(pb.segments, index) + pb.cond.Broadcast() pb.mu.Unlock() } From 45d8e6e9e786f2cf7f6af8628271647a1b422f18 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 22:03:18 -0500 Subject: [PATCH 19/34] fix race --- pkg/blobfs_prefetch.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 071a41e..e674357 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -8,7 +8,7 @@ import ( const ( prefetchEvictionInterval = 10 * time.Second - prefetchIdleTTL = 10 * time.Second // remove stale buffers if no read in the past 60s + prefetchSegmentIdleTTL = 30 * time.Second // remove stale buffers if no read in the past 60s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) @@ -95,6 +95,7 @@ type segment struct { data []byte readLength uint64 lastRead time.Time + fetching bool } type PrefetchOpts struct { @@ -137,6 +138,8 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { index: bufferIndex, data: make([]byte, 0, bufferSize), readLength: 0, + lastRead: time.Now(), + fetching: true, } pb.segments[bufferIndex] = s @@ -156,6 +159,8 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { case chunk, ok := <-contentChan: if !ok { pb.mu.Lock() + s.fetching = false + s.lastRead = time.Now() pb.cond.Broadcast() pb.mu.Unlock() return @@ -176,7 +181,7 @@ func (pb *PrefetchBuffer) evictIdle() bool { pb.mu.Lock() for index, segment := range pb.segments { - if time.Since(segment.lastRead) > prefetchIdleTTL { + if time.Since(segment.lastRead) > prefetchSegmentIdleTTL && !segment.fetching { indicesToDelete = append(indicesToDelete, index) } else { unused = false @@ -186,6 +191,7 @@ func (pb *PrefetchBuffer) evictIdle() bool { for _, index := range indicesToDelete { pb.mu.Lock() + Logger.Debugf("Evicting segment %s-%d", pb.hash, index) delete(pb.segments, index) pb.cond.Broadcast() pb.mu.Unlock() From 51bf4ae4fe3fc666218d0cfbd5299930f7593c52 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 22:08:22 -0500 Subject: [PATCH 20/34] fix comment --- pkg/blobfs_prefetch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index e674357..71239e3 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -8,7 +8,7 @@ import ( const ( prefetchEvictionInterval = 10 * time.Second - prefetchSegmentIdleTTL = 30 * time.Second // remove stale buffers if no read in the past 60s + prefetchSegmentIdleTTL = 30 * time.Second // remove stale segments if no reads in the past 30s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) From b9a30fd4555f3784a57ec20342c87d9e62c0aba6 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Sun, 12 Jan 2025 23:31:49 -0500 Subject: [PATCH 21/34] explicitly clear buffer --- pkg/blobfs_prefetch.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 71239e3..9d04038 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -192,6 +192,8 @@ func (pb *PrefetchBuffer) evictIdle() bool { for _, index := range indicesToDelete { pb.mu.Lock() Logger.Debugf("Evicting segment %s-%d", pb.hash, index) + segment := pb.segments[index] + segment.data = nil delete(pb.segments, index) pb.cond.Broadcast() pb.mu.Unlock() From 368cf3345aea269b4e84c77975b6c075db0008ef Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 09:25:36 -0500 Subject: [PATCH 22/34] more aggressive gc --- pkg/blobfs_prefetch.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 9d04038..ea8f356 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -206,8 +206,14 @@ func (pb *PrefetchBuffer) Clear() { pb.cancelFunc() // Stop any fetch operations pb.mu.Lock() - pb.segments = make(map[uint64]*segment) // Reinitialize the map to clear all entries - pb.mu.Unlock() + defer pb.mu.Unlock() + + // Clear all segment data + for _, segment := range pb.segments { + segment.data = nil + } + // Reinitialize the map to clear all entries + pb.segments = make(map[uint64]*segment) } func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { From c6cfaa22eadf6dfe27e57108770da6ab3bab802b Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 09:59:49 -0500 Subject: [PATCH 23/34] force gc on evict --- pkg/blobfs_prefetch.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index ea8f356..43cf179 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -2,6 +2,7 @@ package blobcache import ( "context" + "runtime" "sync" "time" ) @@ -72,6 +73,8 @@ func (pm *PrefetchManager) evictIdleBuffers() { return true }) + + runtime.GC() } } From 90b665e4dfda97cd45ac4efa5930990f9f8403df Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 10:09:12 -0500 Subject: [PATCH 24/34] remove idle --- pkg/blobfs_prefetch.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 43cf179..59ccb14 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -2,13 +2,12 @@ package blobcache import ( "context" - "runtime" "sync" "time" ) const ( - prefetchEvictionInterval = 10 * time.Second + prefetchEvictionInterval = 5 * time.Second prefetchSegmentIdleTTL = 30 * time.Second // remove stale segments if no reads in the past 30s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) @@ -73,8 +72,6 @@ func (pm *PrefetchManager) evictIdleBuffers() { return true }) - - runtime.GC() } } From c06d3d49d69cad10b6d841e959f480bd25445b62 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 10:38:23 -0500 Subject: [PATCH 25/34] cache prefetch and support ignore file exts --- pkg/blobfs_node.go | 58 +++++++++++++++++++++++++++++------------ pkg/config.default.yaml | 2 ++ pkg/types.go | 9 ++++--- 3 files changed, 49 insertions(+), 20 deletions(-) diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 49c464d..0816ad3 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -12,13 +12,14 @@ import ( ) type BlobFsNode struct { - Path string - ID string - PID string - Name string - Target string - Hash string - Attr fuse.Attr + Path string + ID string + PID string + Name string + Target string + Hash string + Attr fuse.Attr + Prefetch *bool } type FSNode struct { fs.Inode @@ -94,13 +95,14 @@ func (n *FSNode) inodeFromFsId(ctx context.Context, fsId string) (*fs.Inode, *fu // Create a new Inode on lookup node := n.NewInode(ctx, &FSNode{filesystem: n.filesystem, bfsNode: &BlobFsNode{ - Path: metadata.Path, - ID: metadata.ID, - PID: metadata.PID, - Name: metadata.Name, - Hash: metadata.Hash, - Attr: attr, - Target: "", + Path: metadata.Path, + ID: metadata.ID, + PID: metadata.PID, + Name: metadata.Name, + Hash: metadata.Hash, + Attr: attr, + Target: "", + Prefetch: nil, }, attr: attr}, fs.StableAttr{Mode: metadata.Mode, Ino: metadata.Ino, Gen: metadata.Gen}, ) @@ -159,6 +161,30 @@ func (n *FSNode) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuse return nil, 0, fs.OK } +func (n *FSNode) shouldPrefetch(node *BlobFsNode) bool { + if node.Prefetch != nil { + return *node.Prefetch + } + + if !n.filesystem.Config.BlobFs.Prefetch.Enabled { + return false + } + + if n.bfsNode.Attr.Size < n.filesystem.Config.BlobFs.Prefetch.MinFileSizeBytes { + return false + } + + for _, ext := range n.filesystem.Config.BlobFs.Prefetch.IgnoreFileExt { + if strings.HasSuffix(node.Name, ext) { + return false + } + } + + prefetch := true + node.Prefetch = &prefetch + return true +} + func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) { n.log("Read called with offset: %v", off) @@ -167,8 +193,8 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int return fuse.ReadResultData(dest[:0]), fs.OK } - // If pre-fetch is enabled and the file is large enough, try to prefetch the file using streaming - if n.filesystem.Config.BlobFs.Prefetch.Enabled && n.bfsNode.Attr.Size >= n.filesystem.Config.BlobFs.Prefetch.MinFileSizeBytes { + // Attempt to prefetch the file + if n.shouldPrefetch(n.bfsNode) { buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash, n.bfsNode.Attr.Size) if buffer != nil { return fuse.ReadResultData(buffer.GetRange(uint64(off), uint64(len(dest)))), fs.OK diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index a4196e9..6705cb5 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -17,6 +17,8 @@ blobfs: idleTtlS: 60 minFileSizeBytes: 1048576 # 1MB segmentSizeBytes: 134217728 # 128MB + ignoreFileExt: + - .clip mountPoint: /tmp/test maxBackgroundTasks: 512 maxReadAheadKB: 128 diff --git a/pkg/types.go b/pkg/types.go index 1ad8f66..4cd52ff 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -103,10 +103,11 @@ type BlobFsConfig struct { } type BlobFsPrefetchConfig struct { - Enabled bool `key:"enabled" json:"enabled"` - MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` - IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` - SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` + Enabled bool `key:"enabled" json:"enabled"` + MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` + IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` + SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` + IgnoreFileExt []string `key:"ignoreFileExt" json:"ignore_file_ext"` } type SourceConfig struct { From ac62cbbc1f882dc3448c9077437979ea9e1e543c Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:03:06 -0500 Subject: [PATCH 26/34] add data timeout --- pkg/blobfs_node.go | 5 ++++- pkg/blobfs_prefetch.go | 27 ++++++++++++++++++++------- pkg/config.default.yaml | 1 + pkg/types.go | 1 + 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/pkg/blobfs_node.go b/pkg/blobfs_node.go index 0816ad3..76c969f 100644 --- a/pkg/blobfs_node.go +++ b/pkg/blobfs_node.go @@ -197,7 +197,10 @@ func (n *FSNode) Read(ctx context.Context, f fs.FileHandle, dest []byte, off int if n.shouldPrefetch(n.bfsNode) { buffer := n.filesystem.PrefetchManager.GetPrefetchBuffer(n.bfsNode.Hash, n.bfsNode.Attr.Size) if buffer != nil { - return fuse.ReadResultData(buffer.GetRange(uint64(off), uint64(len(dest)))), fs.OK + data, err := buffer.GetRange(uint64(off), uint64(len(dest))) + if err == nil { + return fuse.ReadResultData(data), fs.OK + } } } diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 59ccb14..4992971 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -2,13 +2,14 @@ package blobcache import ( "context" + "fmt" "sync" "time" ) const ( prefetchEvictionInterval = 5 * time.Second - prefetchSegmentIdleTTL = 30 * time.Second // remove stale segments if no reads in the past 30s + prefetchSegmentIdleTTL = 10 * time.Second // remove stale segments if no reads in the past 30s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) @@ -45,6 +46,7 @@ func (pm *PrefetchManager) GetPrefetchBuffer(hash string, fileSize uint64) *Pref Hash: hash, FileSize: fileSize, SegmentSize: pm.config.BlobFs.Prefetch.SegmentSizeBytes, + DataTimeout: time.Second * time.Duration(pm.config.BlobFs.Prefetch.DataTimeoutS), Client: pm.client, }) @@ -88,6 +90,7 @@ type PrefetchBuffer struct { client *BlobCacheClient mu sync.Mutex cond *sync.Cond + dataTimeout time.Duration } type segment struct { @@ -106,6 +109,7 @@ type PrefetchOpts struct { SegmentSize uint64 Offset uint64 Client *BlobCacheClient + DataTimeout time.Duration } func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { @@ -118,6 +122,7 @@ func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { client: opts.Client, segments: make(map[uint64]*segment), segmentSize: opts.SegmentSize, + dataTimeout: opts.DataTimeout, } pb.cond = sync.NewCond(&pb.mu) return pb @@ -212,16 +217,19 @@ func (pb *PrefetchBuffer) Clear() { for _, segment := range pb.segments { segment.data = nil } + // Reinitialize the map to clear all entries pb.segments = make(map[uint64]*segment) } -func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { +func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) ([]byte, error) { bufferSize := pb.segmentSize bufferIndex := offset / bufferSize bufferOffset := offset % bufferSize var result []byte + timeoutChan := time.After(pb.dataTimeout) + for length > 0 { data, ready := pb.tryGetRange(bufferIndex, bufferOffset, offset, length) if ready { @@ -232,14 +240,19 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) []byte { bufferIndex = offset / bufferSize bufferOffset = offset % bufferSize } else { - // If data is not ready, wait for more data to be available - pb.mu.Lock() - pb.cond.Wait() - pb.mu.Unlock() + select { + case <-timeoutChan: + return nil, fmt.Errorf("timeout occurred waiting for prefetch data") + default: + // If data is not ready, wait for more data to be available + pb.mu.Lock() + pb.cond.Wait() + pb.mu.Unlock() + } } } - return result + return result, nil } func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length uint64) ([]byte, bool) { diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index 6705cb5..7111320 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -17,6 +17,7 @@ blobfs: idleTtlS: 60 minFileSizeBytes: 1048576 # 1MB segmentSizeBytes: 134217728 # 128MB + dataTimeoutS: 30 ignoreFileExt: - .clip mountPoint: /tmp/test diff --git a/pkg/types.go b/pkg/types.go index 4cd52ff..8d5723d 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -108,6 +108,7 @@ type BlobFsPrefetchConfig struct { IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` IgnoreFileExt []string `key:"ignoreFileExt" json:"ignore_file_ext"` + DataTimeoutS int `key:"dataTimeoutS" json:"data_timeout_s"` } type SourceConfig struct { From 4630b9f3da2681ecea0c20edf266433a7cea1758 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:57:23 -0500 Subject: [PATCH 27/34] add total prefetch size --- pkg/blobfs_prefetch.go | 61 +++++++++++++++++++++-------------------- pkg/config.default.yaml | 1 + pkg/types.go | 13 +++++---- 3 files changed, 40 insertions(+), 35 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 4992971..e07c41f 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -80,17 +80,18 @@ func (pm *PrefetchManager) evictIdleBuffers() { } type PrefetchBuffer struct { - ctx context.Context - cancelFunc context.CancelFunc - hash string - segments map[uint64]*segment - segmentSize uint64 - lastRead time.Time - fileSize uint64 - client *BlobCacheClient - mu sync.Mutex - cond *sync.Cond - dataTimeout time.Duration + ctx context.Context + cancelFunc context.CancelFunc + hash string + segments map[uint64]*segment + segmentSize uint64 + lastRead time.Time + fileSize uint64 + client *BlobCacheClient + mu sync.Mutex + cond *sync.Cond + dataTimeout time.Duration + totalPrefetchSize uint64 } type segment struct { @@ -102,27 +103,29 @@ type segment struct { } type PrefetchOpts struct { - Ctx context.Context - CancelFunc context.CancelFunc - Hash string - FileSize uint64 - SegmentSize uint64 - Offset uint64 - Client *BlobCacheClient - DataTimeout time.Duration + Ctx context.Context + CancelFunc context.CancelFunc + Hash string + FileSize uint64 + SegmentSize uint64 + Offset uint64 + Client *BlobCacheClient + DataTimeout time.Duration + TotalPrefetchSize uint64 } func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { pb := &PrefetchBuffer{ - ctx: opts.Ctx, - cancelFunc: opts.CancelFunc, - hash: opts.Hash, - lastRead: time.Now(), - fileSize: opts.FileSize, - client: opts.Client, - segments: make(map[uint64]*segment), - segmentSize: opts.SegmentSize, - dataTimeout: opts.DataTimeout, + ctx: opts.Ctx, + cancelFunc: opts.CancelFunc, + hash: opts.Hash, + lastRead: time.Now(), + fileSize: opts.FileSize, + client: opts.Client, + segments: make(map[uint64]*segment), + segmentSize: opts.SegmentSize, + totalPrefetchSize: opts.TotalPrefetchSize, + dataTimeout: opts.DataTimeout, } pb.cond = sync.NewCond(&pb.mu) return pb @@ -196,7 +199,7 @@ func (pb *PrefetchBuffer) evictIdle() bool { for _, index := range indicesToDelete { pb.mu.Lock() - Logger.Debugf("Evicting segment %s-%d", pb.hash, index) + Logger.Infof("Evicting segment %s-%d", pb.hash, index) segment := pb.segments[index] segment.data = nil delete(pb.segments, index) diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index 7111320..b144513 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -17,6 +17,7 @@ blobfs: idleTtlS: 60 minFileSizeBytes: 1048576 # 1MB segmentSizeBytes: 134217728 # 128MB + totalPrefetchSizeBytes: 1073741824 # 1GB dataTimeoutS: 30 ignoreFileExt: - .clip diff --git a/pkg/types.go b/pkg/types.go index 8d5723d..b8ed6a2 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -103,12 +103,13 @@ type BlobFsConfig struct { } type BlobFsPrefetchConfig struct { - Enabled bool `key:"enabled" json:"enabled"` - MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` - IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` - SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` - IgnoreFileExt []string `key:"ignoreFileExt" json:"ignore_file_ext"` - DataTimeoutS int `key:"dataTimeoutS" json:"data_timeout_s"` + Enabled bool `key:"enabled" json:"enabled"` + MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` + TotalPrefetchSizeBytes uint64 `key:"totalPrefetchSizeBytes" json:"total_prefetch_size_bytes"` + IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` + SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` + IgnoreFileExt []string `key:"ignoreFileExt" json:"ignore_file_ext"` + DataTimeoutS int `key:"dataTimeoutS" json:"data_timeout_s"` } type SourceConfig struct { From a79c70997f2c85ebd061fd5bcfdc737894f620b1 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 13:32:06 -0500 Subject: [PATCH 28/34] wip --- pkg/blobfs_prefetch.go | 113 +++++++++++++++++++++++++---------------- 1 file changed, 69 insertions(+), 44 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index e07c41f..012245b 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sync" + "sync/atomic" "time" ) @@ -14,18 +15,22 @@ const ( ) type PrefetchManager struct { - ctx context.Context - config BlobCacheConfig - buffers sync.Map - client *BlobCacheClient + ctx context.Context + config BlobCacheConfig + buffers sync.Map + client *BlobCacheClient + currentPrefetchSizeBytes uint64 + totalPrefetchSizeBytes uint64 } func NewPrefetchManager(ctx context.Context, config BlobCacheConfig, client *BlobCacheClient) *PrefetchManager { return &PrefetchManager{ - ctx: ctx, - config: config, - buffers: sync.Map{}, - client: client, + ctx: ctx, + config: config, + buffers: sync.Map{}, + client: client, + currentPrefetchSizeBytes: 0, + totalPrefetchSizeBytes: config.BlobFs.Prefetch.TotalPrefetchSizeBytes, } } @@ -76,22 +81,34 @@ func (pm *PrefetchManager) evictIdleBuffers() { }) } } +} + +func (pm *PrefetchManager) incrementPrefetchSize(size uint64) bool { + newTotal := atomic.AddUint64(&pm.currentPrefetchSizeBytes, size) + if newTotal > pm.totalPrefetchSizeBytes { + atomic.AddUint64(&pm.currentPrefetchSizeBytes, ^uint64(size-1)) + return false + } + return true +} +func (pm *PrefetchManager) decrementPrefetchSize(size uint64) { + atomic.AddUint64(&pm.currentPrefetchSizeBytes, ^uint64(size-1)) } type PrefetchBuffer struct { - ctx context.Context - cancelFunc context.CancelFunc - hash string - segments map[uint64]*segment - segmentSize uint64 - lastRead time.Time - fileSize uint64 - client *BlobCacheClient - mu sync.Mutex - cond *sync.Cond - dataTimeout time.Duration - totalPrefetchSize uint64 + ctx context.Context + cancelFunc context.CancelFunc + manager *PrefetchManager + hash string + segments map[uint64]*segment + segmentSize uint64 + lastRead time.Time + fileSize uint64 + client *BlobCacheClient + mu sync.Mutex + cond *sync.Cond + dataTimeout time.Duration } type segment struct { @@ -103,29 +120,29 @@ type segment struct { } type PrefetchOpts struct { - Ctx context.Context - CancelFunc context.CancelFunc - Hash string - FileSize uint64 - SegmentSize uint64 - Offset uint64 - Client *BlobCacheClient - DataTimeout time.Duration - TotalPrefetchSize uint64 + Ctx context.Context + CancelFunc context.CancelFunc + Hash string + FileSize uint64 + SegmentSize uint64 + Offset uint64 + Client *BlobCacheClient + DataTimeout time.Duration + Manager *PrefetchManager } func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { pb := &PrefetchBuffer{ - ctx: opts.Ctx, - cancelFunc: opts.CancelFunc, - hash: opts.Hash, - lastRead: time.Now(), - fileSize: opts.FileSize, - client: opts.Client, - segments: make(map[uint64]*segment), - segmentSize: opts.SegmentSize, - totalPrefetchSize: opts.TotalPrefetchSize, - dataTimeout: opts.DataTimeout, + ctx: opts.Ctx, + cancelFunc: opts.CancelFunc, + hash: opts.Hash, + manager: opts.Manager, + lastRead: time.Now(), + fileSize: opts.FileSize, + client: opts.Client, + segments: make(map[uint64]*segment), + segmentSize: opts.SegmentSize, + dataTimeout: opts.DataTimeout, } pb.cond = sync.NewCond(&pb.mu) return pb @@ -134,14 +151,18 @@ func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { bufferIndex := offset / bufferSize - // Initialize internal buffer for this chunk of the content pb.mu.Lock() - _, exists := pb.segments[bufferIndex] - if exists { + if !pb.manager.incrementPrefetchSize(bufferSize) { pb.mu.Unlock() return } + if _, exists := pb.segments[bufferIndex]; exists { + pb.mu.Unlock() + pb.manager.decrementPrefetchSize(bufferSize) + return + } + s := &segment{ index: bufferIndex, data: make([]byte, 0, bufferSize), @@ -150,16 +171,17 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { fetching: true, } pb.segments[bufferIndex] = s + pb.mu.Unlock() contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) if err != nil { + pb.mu.Lock() delete(pb.segments, bufferIndex) pb.mu.Unlock() + pb.manager.decrementPrefetchSize(bufferSize) return } - pb.mu.Unlock() - for { select { case <-pb.ctx.Done(): @@ -201,10 +223,13 @@ func (pb *PrefetchBuffer) evictIdle() bool { pb.mu.Lock() Logger.Infof("Evicting segment %s-%d", pb.hash, index) segment := pb.segments[index] + segmentSize := uint64(len(segment.data)) segment.data = nil delete(pb.segments, index) pb.cond.Broadcast() pb.mu.Unlock() + + pb.manager.decrementPrefetchSize(segmentSize) } return unused From e106b463a507fbf28657bbd2b32b03da69553b84 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 13:53:11 -0500 Subject: [PATCH 29/34] remove logs --- pkg/blobfs_prefetch.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 012245b..4cad67d 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -10,7 +10,7 @@ import ( const ( prefetchEvictionInterval = 5 * time.Second - prefetchSegmentIdleTTL = 10 * time.Second // remove stale segments if no reads in the past 30s + prefetchSegmentIdleTTL = 5 * time.Second // remove stale segments if no reads in the past 30s preemptiveFetchThresholdBytes = 16 * 1024 * 1024 // if the next segment is within 16MB of where we are reading, start fetching it ) @@ -53,6 +53,7 @@ func (pm *PrefetchManager) GetPrefetchBuffer(hash string, fileSize uint64) *Pref SegmentSize: pm.config.BlobFs.Prefetch.SegmentSizeBytes, DataTimeout: time.Second * time.Duration(pm.config.BlobFs.Prefetch.DataTimeoutS), Client: pm.client, + Manager: pm, }) pm.buffers.Store(hash, newBuffer) @@ -89,6 +90,7 @@ func (pm *PrefetchManager) incrementPrefetchSize(size uint64) bool { atomic.AddUint64(&pm.currentPrefetchSizeBytes, ^uint64(size-1)) return false } + return true } @@ -158,8 +160,8 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { } if _, exists := pb.segments[bufferIndex]; exists { - pb.mu.Unlock() pb.manager.decrementPrefetchSize(bufferSize) + pb.mu.Unlock() return } @@ -177,8 +179,8 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { if err != nil { pb.mu.Lock() delete(pb.segments, bufferIndex) - pb.mu.Unlock() pb.manager.decrementPrefetchSize(bufferSize) + pb.mu.Unlock() return } @@ -227,9 +229,9 @@ func (pb *PrefetchBuffer) evictIdle() bool { segment.data = nil delete(pb.segments, index) pb.cond.Broadcast() + pb.manager.decrementPrefetchSize(segmentSize) pb.mu.Unlock() - pb.manager.decrementPrefetchSize(segmentSize) } return unused From 13312f97b34824492da8919acd055e729f9c05ef Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 13:54:38 -0500 Subject: [PATCH 30/34] more cleanup --- pkg/blobfs_prefetch.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 4cad67d..ff3351d 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -228,10 +228,9 @@ func (pb *PrefetchBuffer) evictIdle() bool { segmentSize := uint64(len(segment.data)) segment.data = nil delete(pb.segments, index) - pb.cond.Broadcast() pb.manager.decrementPrefetchSize(segmentSize) + pb.cond.Broadcast() pb.mu.Unlock() - } return unused From 075ddd6154fb1a0fef940a48586ef0c1c591b919 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:00:25 -0500 Subject: [PATCH 31/34] remove prefetch limit --- logs.txt | 1149 +++++++++++++++++++++++++++++++++++++++ pkg/blobfs_prefetch.go | 150 +++-- pkg/config.default.yaml | 3 +- pkg/types.go | 13 +- 4 files changed, 1230 insertions(+), 85 deletions(-) create mode 100644 logs.txt diff --git a/logs.txt b/logs.txt new file mode 100644 index 0000000..3518461 --- /dev/null +++ b/logs.txt @@ -0,0 +1,1149 @@ +{"level":"info","ts":"2025-01-13T20:09:32.103Z","caller":"pkg/logger.go:70","msg":"Mounting to /cache"} +{"level":"info","ts":"2025-01-13T20:09:32.166Z","caller":"pkg/logger.go:70","msg":"Added new host @ blobcache-host-0af2b0.tailc480d.ts.net:2049 (PrivateAddr=172.18.0.3:2049, RTT=0s)"} +{"level":"info","ts":"2025-01-13T20:09:33.246Z","caller":"pkg/logger.go:70","msg":"Fetching segment a4426282975b6f142c5425ad49f569d80198cc80ccda7adeb6a11f084e991e7b-0"} +{"level":"info","ts":"2025-01-13T20:09:33.246Z","caller":"pkg/logger.go:70","msg":"Waiting for prefetch signal"} +fatal error: sync: unlock of unlocked mutex + +goroutine 126 [running]: +sync.fatal({0x1b09702?, 0x100000000000000?}) + /usr/local/go/src/runtime/panic.go:1031 +0x18 +sync.(*Mutex).unlockSlow(0xc000a20680, 0xffffffff) + /usr/local/go/src/sync/mutex.go:231 +0x35 +sync.(*Mutex).Unlock(0x0?) + /usr/local/go/src/sync/mutex.go:225 +0x25 +sync.(*Cond).Wait(0xc00021c4e0?) + /usr/local/go/src/sync/cond.go:70 +0x77 +github.com/beam-cloud/blobcache-v2/pkg.waitForCondition.func1() + /workspace/pkg/blobfs_prefetch.go:353 +0x25 +created by github.com/beam-cloud/blobcache-v2/pkg.waitForCondition in goroutine 200 + /workspace/pkg/blobfs_prefetch.go:352 +0x78 + +goroutine 1 [chan receive]: +main.main() + /workspace/e2e/fs/main.go:49 +0x296 + +goroutine 3 [select]: +tailscale.com/logtail.(*Logger).drainBlock(...) + /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:304 +tailscale.com/logtail.(*Logger).drainPending(0xc000288c08) + /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:359 +0x626 +tailscale.com/logtail.(*Logger).uploading(0xc000288c08, {0x1d901f0, 0xc00015bd60}) + /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:399 +0x88 +created by tailscale.com/logtail.NewLogger in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:179 +0x80a + +goroutine 22 [chan receive]: +tailscale.com/net/tstun.(*fakeTUN).Read(0x0?, {0x0?, 0x0?, 0x0?}, {0x0?, 0x0?, 0x0?}, 0x0?) + /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/fake.go:37 +0x19 +tailscale.com/net/tstun.(*Wrapper).pollVector(0xc0001fc480) + /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:443 +0x306 +created by tailscale.com/net/tstun.wrap in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:275 +0x356 + +goroutine 23 [select]: +tailscale.com/net/tstun.(*Wrapper).pumpEvents(0xc0001fc480) + /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:372 +0x109 +created by tailscale.com/net/tstun.wrap in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:277 +0x396 + +goroutine 24 [select]: +github.com/tailscale/wireguard-go/ratelimiter.(*Ratelimiter).Init.func1() + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/ratelimiter/ratelimiter.go:68 +0x8f +created by github.com/tailscale/wireguard-go/ratelimiter.(*Ratelimiter).Init in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/ratelimiter/ratelimiter.go:64 +0x147 + +goroutine 25 [semacquire]: +sync.runtime_Semacquire(0x0?) + /usr/local/go/src/runtime/sema.go:71 +0x25 +sync.(*WaitGroup).Wait(0x0?) + /usr/local/go/src/sync/waitgroup.go:118 +0x48 +github.com/tailscale/wireguard-go/device.newHandshakeQueue.func1() + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:68 +0x25 +created by github.com/tailscale/wireguard-go/device.newHandshakeQueue in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:67 +0xa8 + +goroutine 26 [semacquire]: +sync.runtime_Semacquire(0x0?) + /usr/local/go/src/runtime/sema.go:71 +0x25 +sync.(*WaitGroup).Wait(0x0?) + /usr/local/go/src/sync/waitgroup.go:118 +0x48 +github.com/tailscale/wireguard-go/device.newOutboundQueue.func1() + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:32 +0x25 +created by github.com/tailscale/wireguard-go/device.newOutboundQueue in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:31 +0xa8 + +goroutine 27 [semacquire]: +sync.runtime_Semacquire(0x0?) + /usr/local/go/src/runtime/sema.go:71 +0x25 +sync.(*WaitGroup).Wait(0x0?) + /usr/local/go/src/sync/waitgroup.go:118 +0x48 +github.com/tailscale/wireguard-go/device.newInboundQueue.func1() + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:50 +0x25 +created by github.com/tailscale/wireguard-go/device.newInboundQueue in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:49 +0xa8 + +goroutine 28 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x1) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 29 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x1) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 30 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x1) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 31 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x2) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 32 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x2) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 33 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x2) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 50 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x3) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 51 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x3) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 52 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x3) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 53 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x4) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 54 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x4) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 55 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x4) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 56 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x5) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 57 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x5) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 58 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x5) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 59 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x6) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 60 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x6) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 61 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x6) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 62 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x7) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 63 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x7) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 64 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x7) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 65 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x8) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 66 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x8) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 67 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x8) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 68 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x9) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 69 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x9) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 70 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x9) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 71 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xa) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 72 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xa) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 73 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xa) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 74 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xb) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 75 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xb) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 76 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xb) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 77 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xc) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 78 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xc) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 79 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xc) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 80 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xd) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 81 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xd) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 82 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xd) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 83 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xe) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 84 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xe) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 85 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xe) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 86 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xf) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 87 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xf) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 88 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xf) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 89 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x10) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b + +goroutine 90 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x10) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 + +goroutine 91 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x10) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee + +goroutine 92 [chan receive]: +tailscale.com/net/tstun.(*Wrapper).Read(0xc0001fc480, {0xc0002e6008, 0x80, 0x80}, {0xc00018a800, 0x80, 0x80}, 0x10) + /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:895 +0xea +github.com/tailscale/wireguard-go/device.(*Device).RoutineReadFromTUN(0xc0000f1908) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:247 +0x2c3 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:322 +0x44c + +goroutine 93 [chan receive]: +github.com/tailscale/wireguard-go/device.(*Device).RoutineTUNEventReader(0xc0000f1908) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/tun.go:19 +0x79 +created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:323 +0x488 + +goroutine 94 [chan receive]: +tailscale.com/wgengine.NewUserspaceEngine.func7() + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:457 +0x45 +created by tailscale.com/wgengine.NewUserspaceEngine in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:455 +0x14d1 + +goroutine 95 [select]: +tailscale.com/wgengine.NewUserspaceEngine.func8() + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:472 +0x5e +created by tailscale.com/wgengine.NewUserspaceEngine in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:471 +0x1513 + +goroutine 96 [IO wait]: +internal/poll.runtime_pollWait(0x2aaafa8e9460, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 +internal/poll.(*pollDesc).wait(0xc0001e6700?, 0x0?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).RawRead(0xc0001e6700, 0xc000780010) + /usr/local/go/src/internal/poll/fd_unix.go:717 +0x125 +net.(*rawConn).Read(0xc0001326b0, 0xc0001e6280?) + /usr/local/go/src/net/rawconn.go:44 +0x36 +golang.org/x/net/internal/socket.(*syscaller).recvmmsg(0xc000310168, {0x1d8ddb0?, 0xc0001326b0?}, {0xc0001e6280?, 0x1b95ff8?, 0x0?}, 0x29f5800?) + /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/mmsghdr_unix.go:120 +0x70 +golang.org/x/net/internal/socket.(*Conn).recvMsgs(0xc0001134a0, {0xc004012b58, 0x2, 0x2}, 0x0) + /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/rawconn_mmsg.go:24 +0x165 +golang.org/x/net/internal/socket.(*Conn).RecvMsgs(...) + /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/socket.go:267 +golang.org/x/net/ipv4.(*payloadHandler).ReadBatch(0xc00023ca10, {0xc004012b58?, 0x18?, 0x2aab0b962618?}, 0xc000657a90?) + /go/pkg/mod/golang.org/x/net@v0.27.0/ipv4/batch.go:80 +0x57 +tailscale.com/wgengine/magicsock.(*linuxBatchingConn).ReadBatch(0xc0000da8a0, {0xc004010008, 0x80, 0x80}, 0x4?) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/batching_conn_linux.go:284 +0x88 +tailscale.com/wgengine/magicsock.(*RebindingUDPConn).ReadBatch(0xc0001e9478, {0xc004010008, 0x80, 0x80}, 0x0) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/rebinding_conn.go:114 +0x97 +tailscale.com/wgengine/magicsock.(*Conn).receiveIPv4.(*Conn).mkReceiveFunc.func1({0xc000526008?, 0x80, 0x80?}, {0xc00052a000, 0x80, 0x0?}, {0xc00052c008, 0x80, 0x0?}) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/magicsock.go:1294 +0x278 +github.com/tailscale/wireguard-go/device.(*Device).RoutineReceiveIncoming(0xc0000f1908, 0x80, 0xc00043c390) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:111 +0x3b2 +created by github.com/tailscale/wireguard-go/device.(*Device).BindUpdate in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:524 +0x3c7 + +goroutine 97 [IO wait]: +internal/poll.runtime_pollWait(0x2aaafa8e9690, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 +internal/poll.(*pollDesc).wait(0xc0001e6600?, 0x0?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).RawRead(0xc0001e6600, 0xc00051c000) + /usr/local/go/src/internal/poll/fd_unix.go:717 +0x125 +net.(*rawConn).Read(0xc000132680, 0xc000340000?) + /usr/local/go/src/net/rawconn.go:44 +0x36 +golang.org/x/net/internal/socket.(*syscaller).recvmmsg(0xc0000faa68, {0x1d8ddb0?, 0xc000132680?}, {0xc000340000?, 0x1b95ff8?, 0x0?}, 0x8?) + /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/mmsghdr_unix.go:120 +0x70 +golang.org/x/net/internal/socket.(*Conn).recvMsgs(0xc000113440, {0xc004015b58, 0x2, 0x2}, 0x0) + /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/rawconn_mmsg.go:24 +0x165 +golang.org/x/net/internal/socket.(*Conn).RecvMsgs(...) + /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/socket.go:267 +golang.org/x/net/ipv6.(*payloadHandler).ReadBatch(0xc00023c9c0, {0xc004015b58?, 0x18?, 0x18be280?}, 0xc000784a90?) + /go/pkg/mod/golang.org/x/net@v0.27.0/ipv6/batch.go:71 +0x57 +tailscale.com/wgengine/magicsock.(*linuxBatchingConn).ReadBatch(0xc0000da7e0, {0xc004013008, 0x80, 0x80}, 0x0?) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/batching_conn_linux.go:284 +0x88 +tailscale.com/wgengine/magicsock.(*RebindingUDPConn).ReadBatch(0xc0001e94a0, {0xc004013008, 0x80, 0x80}, 0x0) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/rebinding_conn.go:114 +0x97 +tailscale.com/wgengine/magicsock.(*Conn).receiveIPv6.(*Conn).mkReceiveFunc.func1({0xc002b86008?, 0x80, 0x80?}, {0xc002b8a000, 0x80, 0x0?}, {0xc002b8c008, 0x80, 0x0?}) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/magicsock.go:1294 +0x278 +github.com/tailscale/wireguard-go/device.(*Device).RoutineReceiveIncoming(0xc0000f1908, 0x80, 0xc00043c3f0) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:111 +0x3b2 +created by github.com/tailscale/wireguard-go/device.(*Device).BindUpdate in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:524 +0x3c7 + +goroutine 98 [chan receive]: +tailscale.com/wgengine/magicsock.(*connBind).receiveDERP(0xc000135950, {0xc000812008, 0x80, 0x143ab57?}, {0xc000816000, 0x80, 0x17d?}, {0xc000818008, 0x80, 0x80}) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/derp.go:683 +0x11d +github.com/tailscale/wireguard-go/device.(*Device).RoutineReceiveIncoming(0xc0000f1908, 0x80, 0xc0001a1cf0) + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:111 +0x3b2 +created by github.com/tailscale/wireguard-go/device.(*Device).BindUpdate in goroutine 1 + /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:524 +0x3c7 + +goroutine 114 [IO wait]: +internal/poll.runtime_pollWait(0x2aaafa8e9578, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 +internal/poll.(*pollDesc).wait(0xc0000da6c0?, 0x100000000?, 0x1) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).RawRead(0xc0000da6c0, 0xc00023c690) + /usr/local/go/src/internal/poll/fd_unix.go:717 +0x125 +os.(*rawConn).Read(0xc000132638, 0xaa6901?) + /usr/local/go/src/os/rawconn.go:31 +0x4a +github.com/mdlayher/socket.rwT[...](0xc0000f79c0, {{0x1d90180, 0x2ab0480}, 0x0, {0x1adc65f, 0x7}, 0xc000118000}) + /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:795 +0x62a +github.com/mdlayher/socket.readT[...](0xc0000f79c0?, {0x1d90180?, 0x2ab0480?}, {0x1adc65f?, 0x7?}, 0xc000118000?) + /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:666 +0x98 +github.com/mdlayher/socket.(*Conn).Recvmsg(0xc0000f79c0, {0x1d90180, 0x2ab0480}, {0xc000714000, 0x1000, 0x1000}, {0x0, 0x0, 0x0}, 0x2) + /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:572 +0x13e +github.com/mdlayher/netlink.(*conn).Receive(0xc000132640) + /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn_linux.go:130 +0xb9 +github.com/mdlayher/netlink.(*Conn).receive(0xc0000f7a00) + /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn.go:279 +0x62 +github.com/mdlayher/netlink.(*Conn).lockedReceive(0xc0000f7a00) + /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn.go:238 +0x27 +github.com/mdlayher/netlink.(*Conn).Receive(0x0?) + /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn.go:231 +0x87 +tailscale.com/net/netmon.(*nlConn).Receive(0xc000223ce0) + /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon_linux.go:72 +0x38 +tailscale.com/net/netmon.(*Monitor).pump(0xc00032c380) + /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:346 +0x7b +created by tailscale.com/net/netmon.(*Monitor).Start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:265 +0x185 + +goroutine 115 [select]: +tailscale.com/net/netmon.(*Monitor).debounce(0xc00032c380) + /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:392 +0xc8 +created by tailscale.com/net/netmon.(*Monitor).Start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:266 +0x1c5 + +goroutine 130 [select]: +github.com/mdlayher/socket.rwT[...].func2() + /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:778 +0xb5 +created by github.com/mdlayher/socket.rwT[...] in goroutine 114 + /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:775 +0x532 + +goroutine 146 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028caa0, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028caa0, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028ca88, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 147 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cb30, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cb30, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cb18, 0xc00043c3f0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 148 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cbc0, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cbc0, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cba8, 0xc00032c380?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 149 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cc50, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cc50, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cc38, 0x1?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 150 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cce0, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cce0, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028ccc8, 0x1?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 151 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cd70, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cd70, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cd58, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 152 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028ce00, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028ce00, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cde8, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 153 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028ce90, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028ce90, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028ce78, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 154 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cf20, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cf20, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cf08, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 155 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cfb0, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cfb0, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cf98, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 156 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d040, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d040, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d028, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 157 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d0d0, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d0d0, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d0b8, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 158 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d160, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d160, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d148, 0xc0001a1cf0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 159 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d1f0, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d1f0, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d1d8, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 160 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d280, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d280, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d268, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 161 [select]: +gvisor.dev/gvisor/pkg/sync.Gopark(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d310, 0x1, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d310, 0x1, 0x0) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b +gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 +gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d2f8, 0x0?) + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 +created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 + /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 + +goroutine 162 [select]: +tailscale.com/wgengine/netstack.(*queue).ReadContext(...) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/link_endpoint.go:48 +tailscale.com/wgengine/netstack.(*linkEndpoint).ReadContext(0x100000001?, {0x1d901f0?, 0xc00015a320?}) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/link_endpoint.go:147 +0x88 +tailscale.com/wgengine/netstack.(*Impl).inject(0xc00032c0e0) + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/netstack.go:828 +0x3e +created by tailscale.com/wgengine/netstack.(*Impl).Start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/netstack.go:565 +0x336 + +goroutine 163 [select]: +tailscale.com/ipn/ipnlocal.(*LocalBackend).readPoller(0xc000810488) + /go/pkg/mod/tailscale.com@v1.72.1/ipn/ipnlocal/local.go:2470 +0x14c +created by tailscale.com/ipn/ipnlocal.(*LocalBackend).Start.func2 in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/ipn/ipnlocal/local.go:1937 +0x5f + +goroutine 164 [select]: +golang.org/x/net/http2.(*ClientConn).roundTrip(0xc0011e4000, 0xc0015ec8c0, 0x0) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1379 +0x4f8 +golang.org/x/net/http2.(*ClientConn).RoundTrip(...) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1276 +tailscale.com/internal/noiseconn.(*Conn).RoundTrip(0xc001678000?, 0x1d901f0?) + /go/pkg/mod/tailscale.com@v1.72.1/internal/noiseconn/conn.go:69 +0x19 +tailscale.com/control/controlclient.(*NoiseClient).RoundTrip(0xa271c8?, 0xc0015ec8c0) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/noise.go:252 +0x5c +net/http.send(0xc0015ec8c0, {0x1d77aa0, 0xc001678000}, {0x1?, 0x4175cb?, 0x0?}) + /usr/local/go/src/net/http/client.go:259 +0x5e4 +net/http.(*Client).send(0xc0015f7dd0, 0xc0015ec8c0, {0x17f29c0?, 0x48?, 0x0?}) + /usr/local/go/src/net/http/client.go:180 +0x98 +net/http.(*Client).do(0xc0015f7dd0, 0xc0015ec8c0) + /usr/local/go/src/net/http/client.go:725 +0x8bc +net/http.(*Client).Do(...) + /usr/local/go/src/net/http/client.go:590 +tailscale.com/control/controlclient.(*Direct).doLogin(0xc0001e3a00, {0x1d901f0, 0xc00015ab90}, {0xe, 0x0, {0x0, 0x0}, 0x0, 0x0, {0x0, ...}}) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/direct.go:673 +0x1ee5 +tailscale.com/control/controlclient.(*Direct).doLoginOrRegen(0xc0001e3a00, {0x1d901f0, 0xc00015ab90}, {0xe, 0x0, {0x0, 0x0}, 0x0, 0x0, {0x0, ...}}) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/direct.go:430 +0x69 +tailscale.com/control/controlclient.(*Direct).TryLogin(0xc0001e3a00, {0x1d901f0, 0xc00015ab90}, 0xe) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/direct.go:418 +0x139 +tailscale.com/control/controlclient.(*Auto).authRoutine(0xc000436b40) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:340 +0x47e +created by tailscale.com/control/controlclient.(*Auto).Start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:227 +0x56 + +goroutine 165 [chan receive]: +tailscale.com/control/controlclient.(*Auto).mapRoutine(0xc000436b40) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:505 +0x2c5 +created by tailscale.com/control/controlclient.(*Auto).Start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:228 +0x96 + +goroutine 166 [select]: +tailscale.com/control/controlclient.(*Auto).updateRoutine(0xc000436b40) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:74 +0x492 +created by tailscale.com/control/controlclient.(*Auto).Start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:229 +0xd6 + +goroutine 119 [select]: +golang.org/x/net/http2.(*clientStream).writeRequest(0xc0011e4180, 0xc0015ec8c0, 0x0) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1532 +0xa65 +golang.org/x/net/http2.(*clientStream).doRequest(0xc0011e4180, 0x897d65?, 0xc00166ea20?) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1410 +0x56 +created by golang.org/x/net/http2.(*ClientConn).roundTrip in goroutine 164 + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1315 +0x3d8 + +goroutine 137 [select]: +google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run(0xc0001a07b0, {0x1d901f0, 0xc00023cb90}) + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:76 +0x10c +created by google.golang.org/grpc/internal/grpcsync.NewCallbackSerializer in goroutine 5 + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:52 +0x11a + +goroutine 171 [select]: +tailscale.com/tsnet.(*Server).printAuthURLLoop(0xc000298680) + /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:733 +0x19b +created by tailscale.com/tsnet.(*Server).start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:633 +0x134b + +goroutine 172 [select]: +tailscale.com/net/memnet.(*Listener).Accept(0x2a43ea0?) + /go/pkg/mod/tailscale.com@v1.72.1/net/memnet/listener.go:55 +0x5e +net/http.(*Server).Serve(0xc00017a1e0, {0x1d8dfa0, 0xc000119440}) + /usr/local/go/src/net/http/server.go:3330 +0x30c +tailscale.com/tsnet.(*Server).start.func6() + /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:648 +0x32 +created by tailscale.com/tsnet.(*Server).start in goroutine 1 + /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:647 +0x1645 + +goroutine 5 [select]: +github.com/beam-cloud/blobcache-v2/pkg.(*DiscoveryClient).StartInBackground(0xc000a801e0, {0x1d90180, 0x2ab0480}) + /workspace/pkg/discovery.go:70 +0x1ee +created by github.com/beam-cloud/blobcache-v2/pkg.NewBlobCacheClient in goroutine 1 + /workspace/pkg/client.go:108 +0x81f + +goroutine 275 [runnable]: +github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).GetContentStream(0xc0000e6008, {0xc00023a3c0, 0x40}, 0x0, 0x8000000) + /workspace/pkg/client.go:313 +0x112 +github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).fetch(0xc0005a4680, 0x0, 0x8000000) + /workspace/pkg/blobfs_prefetch.go:186 +0x2a6 +created by github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).tryGetRange in goroutine 200 + /workspace/pkg/blobfs_prefetch.go:326 +0x3e5 + +goroutine 118 [IO wait]: +internal/poll.runtime_pollWait(0x2aaafa8e8dd0, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 +internal/poll.(*pollDesc).wait(0xc000710680?, 0xc0011e20b0?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000710680, {0xc0011e20b0, 0x3, 0x3}) + /usr/local/go/src/internal/poll/fd_unix.go:165 +0x27a +net.(*netFD).Read(0xc000710680, {0xc0011e20b0?, 0x9?, 0x9?}) + /usr/local/go/src/net/fd_posix.go:55 +0x25 +net.(*conn).Read(0xc0011b8000, {0xc0011e20b0?, 0xfdd?, 0x0?}) + /usr/local/go/src/net/net.go:189 +0x45 +net/http.(*readWriteCloserBody).Read(0x0?, {0xc0011e20b0?, 0x0?, 0xc00168b000?}) + /usr/local/go/src/net/http/transport.go:2504 +0x88 +tailscale.com/net/netutil.wrappedConn.Read(...) + /go/pkg/mod/tailscale.com@v1.72.1/net/netutil/netutil.go:102 +tailscale.com/control/controlbase.(*Conn).readNLocked(0xc0011e2000, 0x3) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlbase/conn.go:115 +0xe2 +tailscale.com/control/controlbase.(*Conn).decryptOneLocked(0xc0011e2000) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlbase/conn.go:223 +0x1f4 +tailscale.com/control/controlbase.(*Conn).Read(0xc0011e2000, {0xc0011e7000, 0x1000, 0x4717dd?}) + /go/pkg/mod/tailscale.com@v1.72.1/control/controlbase/conn.go:253 +0x111 +tailscale.com/internal/noiseconn.(*Conn).Read(0x0?, {0xc0011e7000?, 0x2aab584625f8?, 0x30?}) + /go/pkg/mod/tailscale.com@v1.72.1/internal/noiseconn/conn.go:133 +0x74 +bufio.(*Reader).Read(0xc0011b4360, {0xc0011e8040, 0x9, 0xc001684990?}) + /usr/local/go/src/bufio/bufio.go:241 +0x197 +io.ReadAtLeast({0x1d76540, 0xc0011b4360}, {0xc0011e8040, 0x9, 0x9}, 0x9) + /usr/local/go/src/io/io.go:335 +0x90 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +golang.org/x/net/http2.readFrameHeader({0xc0011e8040, 0x9, 0xc001684990?}, {0x1d76540?, 0xc0011b4360?}) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:237 +0x65 +golang.org/x/net/http2.(*Framer).ReadFrame(0xc0011e8000) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:501 +0x85 +golang.org/x/net/http2.(*clientConnReadLoop).run(0xc0007f7fa8) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:2354 +0xda +golang.org/x/net/http2.(*ClientConn).readLoop(0xc0011e4000) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:2250 +0x7c +created by golang.org/x/net/http2.(*Transport).newClientConn in goroutine 164 + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:865 +0xcfb + +goroutine 210 [select]: +google.golang.org/grpc/internal/transport.(*controlBuffer).get(0xc00015afa0, 0x1) + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/controlbuf.go:418 +0x113 +google.golang.org/grpc/internal/transport.(*loopyWriter).run(0xc0002c1650) + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/controlbuf.go:551 +0x7b +google.golang.org/grpc/internal/transport.newHTTP2Client.func6() + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:454 +0x85 +created by google.golang.org/grpc/internal/transport.newHTTP2Client in goroutine 138 + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:452 +0x22cb + +goroutine 135 [select]: +google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run(0xc0001a0710, {0x1d901f0, 0xc00023caf0}) + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:76 +0x10c +created by google.golang.org/grpc/internal/grpcsync.NewCallbackSerializer in goroutine 5 + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:52 +0x11a + +goroutine 194 [select]: +github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).manageLocalClientCache.func1() + /workspace/pkg/client.go:322 +0xea +created by github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).manageLocalClientCache in goroutine 6 + /workspace/pkg/client.go:317 +0x6b + +goroutine 136 [select]: +google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run(0xc0001a0740, {0x1d901f0, 0xc00023cb40}) + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:76 +0x10c +created by google.golang.org/grpc/internal/grpcsync.NewCallbackSerializer in goroutine 5 + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:52 +0x11a + +goroutine 139 [select]: +github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).monitorHost(0xc0000e6008, 0xc000594450) + /workspace/pkg/client.go:193 +0xe5 +created by github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).addHost in goroutine 5 + /workspace/pkg/client.go:184 +0x7cc + +goroutine 177 [IO wait]: +internal/poll.runtime_pollWait(0x2aaafa8e8970, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 +internal/poll.(*pollDesc).wait(0xc000980400?, 0xc000b80000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000980400, {0xc000b80000, 0x20000, 0x20000}) + /usr/local/go/src/internal/poll/fd_unix.go:165 +0x27a +net.(*netFD).Read(0xc000980400, {0xc000b80000?, 0x1060100000000?, 0x8?}) + /usr/local/go/src/net/fd_posix.go:55 +0x25 +net.(*conn).Read(0xc0001329c8, {0xc000b80000?, 0x800010601?, 0xc000000000?}) + /usr/local/go/src/net/net.go:189 +0x45 +bufio.(*Reader).Read(0xc0007120c0, {0xc00032c9e0, 0x9, 0xc0001f9c08?}) + /usr/local/go/src/bufio/bufio.go:241 +0x197 +io.ReadAtLeast({0x1d76540, 0xc0007120c0}, {0xc00032c9e0, 0x9, 0x9}, 0x9) + /usr/local/go/src/io/io.go:335 +0x90 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +golang.org/x/net/http2.readFrameHeader({0xc00032c9e0, 0x9, 0xc0011f2150?}, {0x1d76540?, 0xc0007120c0?}) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:237 +0x65 +golang.org/x/net/http2.(*Framer).ReadFrame(0xc00032c9a0) + /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:501 +0x85 +google.golang.org/grpc/internal/transport.(*http2Client).reader(0xc0001afd48, 0xc0002c15e0) + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:1593 +0x226 +created by google.golang.org/grpc/internal/transport.newHTTP2Client in goroutine 138 + /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:400 +0x1d1e + +goroutine 196 [select]: +github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchManager).evictIdleBuffers(0xc000a20488) + /workspace/pkg/blobfs_prefetch.go:71 +0xb2 +created by github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchManager).Start in goroutine 1 + /workspace/pkg/blobfs_prefetch.go:44 +0x4f + +goroutine 197 [semacquire]: +sync.runtime_Semacquire(0xc00037ec10?) + /usr/local/go/src/runtime/sema.go:71 +0x25 +sync.(*WaitGroup).Wait(0xc00021c4e0?) + /usr/local/go/src/sync/waitgroup.go:118 +0x48 +github.com/hanwen/go-fuse/v2/fuse.(*Server).Wait(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:450 +github.com/beam-cloud/blobcache-v2/pkg.Mount.func1.1() + /workspace/pkg/blobfs.go:151 +0xa6 +created by github.com/beam-cloud/blobcache-v2/pkg.Mount.func1 in goroutine 1 + /workspace/pkg/blobfs.go:143 +0x6e + +goroutine 199 [syscall]: +os/signal.signal_recv() + /usr/local/go/src/runtime/sigqueue.go:152 +0x29 +os/signal.loop() + /usr/local/go/src/os/signal/signal_unix.go:23 +0x13 +created by os/signal.Notify.func1.1 in goroutine 1 + /usr/local/go/src/os/signal/signal.go:151 +0x1f + +goroutine 200 [select]: +github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).waitForSignal(0xc0005a4680) + /workspace/pkg/blobfs_prefetch.go:301 +0x166 +github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).GetRange(0xc0005a4680, 0xc00023a3c0?, 0x40?) + /workspace/pkg/blobfs_prefetch.go:286 +0x192 +github.com/beam-cloud/blobcache-v2/pkg.(*FSNode).Read(0xc000a40100, {0x1a9b500?, 0x0?}, {0x5b36ca?, 0xc0005950b0?}, {0xc001800000, 0x20000, 0x20000}, 0x0) + /workspace/pkg/blobfs_node.go:200 +0x13f +github.com/hanwen/go-fuse/v2/fs.(*rawBridge).Read(0x0?, 0xc0002bf6c0, 0xc00099e3e0, {0xc001800000, 0x20000, 0x20000}) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fs/bridge.go:770 +0x136 +github.com/hanwen/go-fuse/v2/fuse.doRead(0xc00021c4e0, 0xc00099e248) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/opcode.go:398 +0x7c +github.com/hanwen/go-fuse/v2/fuse.(*Server).handleRequest(0xc00021c4e0, 0xc00099e248) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:527 +0x2d6 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x0) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:500 +0x110 +github.com/hanwen/go-fuse/v2/fuse.(*Server).Serve(0xc00021c4e0) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:422 +0x29 +created by github.com/beam-cloud/blobcache-v2/pkg.Mount.func1.1 in goroutine 197 + /workspace/pkg/blobfs.go:144 +0x65 + +goroutine 141 [syscall]: +syscall.Syscall(0x0, 0x15, 0xc000ea61b0, 0x100080) + /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 +syscall.read(0xc00021c578?, {0xc000ea61b0?, 0x0?, 0x0?}) + /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 +syscall.Read(...) + /usr/local/go/src/syscall/syscall_unix.go:183 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 +github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x8?) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b +created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 + +goroutine 142 [syscall]: +syscall.Syscall(0x0, 0x15, 0xc0010aa1b0, 0x100080) + /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 +syscall.read(0xc00021c578?, {0xc0010aa1b0?, 0x0?, 0xc00026ae90?}) + /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 +syscall.Read(...) + /usr/local/go/src/syscall/syscall_unix.go:183 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 +github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x88?) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b +created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 + +goroutine 143 [syscall]: +syscall.Syscall(0x0, 0x15, 0xc000ba01b0, 0x100080) + /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 +syscall.read(0xc00021c578?, {0xc000ba01b0?, 0x0?, 0xb?}) + /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 +syscall.Read(...) + /usr/local/go/src/syscall/syscall_unix.go:183 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 +github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0xc8?) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b +created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 + +goroutine 144 [syscall]: +syscall.Syscall(0x0, 0x15, 0xc000da41b0, 0x100080) + /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 +syscall.read(0xc00021c578?, {0xc000da41b0?, 0x0?, 0x0?}) + /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 +syscall.Read(...) + /usr/local/go/src/syscall/syscall_unix.go:183 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 +github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x88?) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b +created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 + +goroutine 145 [syscall]: +syscall.Syscall(0x0, 0x15, 0xc000fa81b0, 0x100080) + /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 +syscall.read(0xc00021c578?, {0xc000fa81b0?, 0x0?, 0x0?}) + /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 +syscall.Read(...) + /usr/local/go/src/syscall/syscall_unix.go:183 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 +github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x8?) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b +created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 + +goroutine 226 [syscall]: +syscall.Syscall(0x0, 0x15, 0xc0012001b0, 0x100080) + /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 +syscall.read(0xc00021c578?, {0xc0012001b0?, 0x0?, 0x0?}) + /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 +syscall.Read(...) + /usr/local/go/src/syscall/syscall_unix.go:183 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 +github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 +github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x8?) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 +github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b +created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 + /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 + +goroutine 124 [IO wait]: +internal/poll.runtime_pollWait(0x2aaafa8e8a88, 0x72) + /usr/local/go/src/runtime/netpoll.go:351 +0x85 +internal/poll.(*pollDesc).wait(0xc0005a4480?, 0xc001756000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc0005a4480, {0xc001756000, 0xc00, 0xc00}) + /usr/local/go/src/internal/poll/fd_unix.go:165 +0x27a +net.(*netFD).Read(0xc0005a4480, {0xc001756000?, 0x2a70d00?, 0xc0000b69d0?}) + /usr/local/go/src/net/fd_posix.go:55 +0x25 +net.(*conn).Read(0xc0011b8028, {0xc001756000?, 0xc0011ba2e0?, 0x756993?}) + /usr/local/go/src/net/net.go:189 +0x45 +crypto/tls.(*atLeastReader).Read(0xc0011b0498, {0xc001756000?, 0x0?, 0xc0011b0498?}) + /usr/local/go/src/crypto/tls/conn.go:809 +0x3b +bytes.(*Buffer).ReadFrom(0xc0017462b8, {0x1d74ae0, 0xc0011b0498}) + /usr/local/go/src/bytes/buffer.go:211 +0x98 +crypto/tls.(*Conn).readFromUntil(0xc001746008, {0x1d74bc0, 0xc0011b8028}, 0xc0000b6a40?) + /usr/local/go/src/crypto/tls/conn.go:831 +0xde +crypto/tls.(*Conn).readRecordOrCCS(0xc001746008, 0x0) + /usr/local/go/src/crypto/tls/conn.go:629 +0x3cf +crypto/tls.(*Conn).readRecord(...) + /usr/local/go/src/crypto/tls/conn.go:591 +crypto/tls.(*Conn).Read(0xc001746008, {0xc0011eb000, 0x1000, 0xc0001fa108?}) + /usr/local/go/src/crypto/tls/conn.go:1385 +0x150 +net/http.(*persistConn).Read(0xc00166ec60, {0xc0011eb000?, 0x891525?, 0x1839040?}) + /usr/local/go/src/net/http/transport.go:2052 +0x4a +bufio.(*Reader).fill(0xc001754f60) + /usr/local/go/src/bufio/bufio.go:110 +0x103 +bufio.(*Reader).Peek(0xc001754f60, 0x1) + /usr/local/go/src/bufio/bufio.go:148 +0x53 +net/http.(*persistConn).readLoop(0xc00166ec60) + /usr/local/go/src/net/http/transport.go:2205 +0x185 +created by net/http.(*Transport).dialConn in goroutine 254 + /usr/local/go/src/net/http/transport.go:1874 +0x154f + +goroutine 127 [sync.Cond.Wait]: +sync.runtime_notifyListWait(0xc000350550, 0x0) + /usr/local/go/src/runtime/sema.go:587 +0x159 +sync.(*Cond).Wait(0xc00021c4e0?) + /usr/local/go/src/sync/cond.go:71 +0x85 +github.com/beam-cloud/blobcache-v2/pkg.waitForCondition.func1() + /workspace/pkg/blobfs_prefetch.go:353 +0x25 +created by github.com/beam-cloud/blobcache-v2/pkg.waitForCondition in goroutine 200 + /workspace/pkg/blobfs_prefetch.go:352 +0x78 + +goroutine 125 [select]: +net/http.(*persistConn).writeLoop(0xc00166ec60) + /usr/local/go/src/net/http/transport.go:2519 +0xe7 +created by net/http.(*Transport).dialConn in goroutine 254 + /usr/local/go/src/net/http/transport.go:1875 +0x15a5 + +goroutine 99 [runnable]: +github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).GetContentStream.func1() + /workspace/pkg/client.go:272 +created by github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).GetContentStream in goroutine 275 + /workspace/pkg/client.go:272 +0x112 diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index ff3351d..fa036aa 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "sync" - "sync/atomic" "time" ) @@ -20,7 +19,6 @@ type PrefetchManager struct { buffers sync.Map client *BlobCacheClient currentPrefetchSizeBytes uint64 - totalPrefetchSizeBytes uint64 } func NewPrefetchManager(ctx context.Context, config BlobCacheConfig, client *BlobCacheClient) *PrefetchManager { @@ -30,7 +28,6 @@ func NewPrefetchManager(ctx context.Context, config BlobCacheConfig, client *Blo buffers: sync.Map{}, client: client, currentPrefetchSizeBytes: 0, - totalPrefetchSizeBytes: config.BlobFs.Prefetch.TotalPrefetchSizeBytes, } } @@ -50,7 +47,7 @@ func (pm *PrefetchManager) GetPrefetchBuffer(hash string, fileSize uint64) *Pref CancelFunc: cancel, Hash: hash, FileSize: fileSize, - SegmentSize: pm.config.BlobFs.Prefetch.SegmentSizeBytes, + WindowSize: pm.config.BlobFs.Prefetch.WindowSizeBytes, DataTimeout: time.Second * time.Duration(pm.config.BlobFs.Prefetch.DataTimeoutS), Client: pm.client, Manager: pm, @@ -84,36 +81,22 @@ func (pm *PrefetchManager) evictIdleBuffers() { } } -func (pm *PrefetchManager) incrementPrefetchSize(size uint64) bool { - newTotal := atomic.AddUint64(&pm.currentPrefetchSizeBytes, size) - if newTotal > pm.totalPrefetchSizeBytes { - atomic.AddUint64(&pm.currentPrefetchSizeBytes, ^uint64(size-1)) - return false - } - - return true -} - -func (pm *PrefetchManager) decrementPrefetchSize(size uint64) { - atomic.AddUint64(&pm.currentPrefetchSizeBytes, ^uint64(size-1)) -} - type PrefetchBuffer struct { ctx context.Context cancelFunc context.CancelFunc manager *PrefetchManager hash string - segments map[uint64]*segment - segmentSize uint64 + windows map[uint64]*window + windowSize uint64 lastRead time.Time fileSize uint64 client *BlobCacheClient mu sync.Mutex - cond *sync.Cond + dataCond *sync.Cond dataTimeout time.Duration } -type segment struct { +type window struct { index uint64 data []byte readLength uint64 @@ -126,7 +109,7 @@ type PrefetchOpts struct { CancelFunc context.CancelFunc Hash string FileSize uint64 - SegmentSize uint64 + WindowSize uint64 Offset uint64 Client *BlobCacheClient DataTimeout time.Duration @@ -142,11 +125,12 @@ func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { lastRead: time.Now(), fileSize: opts.FileSize, client: opts.Client, - segments: make(map[uint64]*segment), - segmentSize: opts.SegmentSize, + windows: make(map[uint64]*window), + windowSize: opts.WindowSize, dataTimeout: opts.DataTimeout, + mu: sync.Mutex{}, } - pb.cond = sync.NewCond(&pb.mu) + pb.dataCond = sync.NewCond(&pb.mu) return pb } @@ -154,32 +138,25 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { bufferIndex := offset / bufferSize pb.mu.Lock() - if !pb.manager.incrementPrefetchSize(bufferSize) { + if _, exists := pb.windows[bufferIndex]; exists { pb.mu.Unlock() return } - if _, exists := pb.segments[bufferIndex]; exists { - pb.manager.decrementPrefetchSize(bufferSize) - pb.mu.Unlock() - return - } - - s := &segment{ + w := &window{ index: bufferIndex, data: make([]byte, 0, bufferSize), readLength: 0, lastRead: time.Now(), fetching: true, } - pb.segments[bufferIndex] = s + pb.windows[bufferIndex] = w pb.mu.Unlock() contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) if err != nil { pb.mu.Lock() - delete(pb.segments, bufferIndex) - pb.manager.decrementPrefetchSize(bufferSize) + delete(pb.windows, bufferIndex) pb.mu.Unlock() return } @@ -191,17 +168,18 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { case chunk, ok := <-contentChan: if !ok { pb.mu.Lock() - s.fetching = false - s.lastRead = time.Now() - pb.cond.Broadcast() + w.fetching = false + w.lastRead = time.Now() + pb.dataCond.Broadcast() pb.mu.Unlock() return } pb.mu.Lock() - s.data = append(s.data, chunk...) - s.readLength += uint64(len(chunk)) - pb.cond.Broadcast() + w.data = append(w.data, chunk...) + w.readLength += uint64(len(chunk)) + w.lastRead = time.Now() + pb.dataCond.Broadcast() pb.mu.Unlock() } } @@ -212,8 +190,8 @@ func (pb *PrefetchBuffer) evictIdle() bool { var indicesToDelete []uint64 pb.mu.Lock() - for index, segment := range pb.segments { - if time.Since(segment.lastRead) > prefetchSegmentIdleTTL && !segment.fetching { + for index, window := range pb.windows { + if time.Since(window.lastRead) > prefetchSegmentIdleTTL && !window.fetching { indicesToDelete = append(indicesToDelete, index) } else { unused = false @@ -224,12 +202,9 @@ func (pb *PrefetchBuffer) evictIdle() bool { for _, index := range indicesToDelete { pb.mu.Lock() Logger.Infof("Evicting segment %s-%d", pb.hash, index) - segment := pb.segments[index] - segmentSize := uint64(len(segment.data)) - segment.data = nil - delete(pb.segments, index) - pb.manager.decrementPrefetchSize(segmentSize) - pb.cond.Broadcast() + window := pb.windows[index] + window.data = nil + delete(pb.windows, index) pb.mu.Unlock() } @@ -242,22 +217,21 @@ func (pb *PrefetchBuffer) Clear() { pb.mu.Lock() defer pb.mu.Unlock() - // Clear all segment data - for _, segment := range pb.segments { - segment.data = nil + // Clear all window data + for _, window := range pb.windows { + window.data = nil } // Reinitialize the map to clear all entries - pb.segments = make(map[uint64]*segment) + pb.windows = make(map[uint64]*window) } -func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) ([]byte, error) { - bufferSize := pb.segmentSize +func (pb *PrefetchBuffer) GetRange(offset, length uint64) ([]byte, error) { + bufferSize := pb.windowSize bufferIndex := offset / bufferSize bufferOffset := offset % bufferSize var result []byte - timeoutChan := time.After(pb.dataTimeout) for length > 0 { data, ready := pb.tryGetRange(bufferIndex, bufferOffset, offset, length) @@ -269,14 +243,9 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) ([]byte, error) bufferIndex = offset / bufferSize bufferOffset = offset % bufferSize } else { - select { - case <-timeoutChan: - return nil, fmt.Errorf("timeout occurred waiting for prefetch data") - default: - // If data is not ready, wait for more data to be available - pb.mu.Lock() - pb.cond.Wait() - pb.mu.Unlock() + Logger.Infof("Waiting for prefetch signal") + if err := pb.waitForSignal(); err != nil { + return nil, err } } } @@ -284,34 +253,63 @@ func (pb *PrefetchBuffer) GetRange(offset uint64, length uint64) ([]byte, error) return result, nil } +func (pb *PrefetchBuffer) waitForSignal() error { + timeoutChan := time.After(pb.dataTimeout) + + for { + select { + case <-waitForCondition(pb.dataCond): + Logger.Infof("Prefetch data ready") + return nil + case <-timeoutChan: + Logger.Infof("Timeout occurred waiting for prefetch data") + return fmt.Errorf("timeout occurred waiting for prefetch data") + case <-pb.ctx.Done(): + return fmt.Errorf("context canceled") + } + } +} + func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length uint64) ([]byte, bool) { pb.mu.Lock() defer pb.mu.Unlock() - segment, exists := pb.segments[bufferIndex] + window, exists := pb.windows[bufferIndex] // Initiate a fetch operation if the buffer does not exist if !exists { - go pb.fetch(bufferIndex*pb.segmentSize, pb.segmentSize) + Logger.Infof("Fetching segment %s-%d", pb.hash, bufferIndex) + go pb.fetch(bufferIndex*pb.windowSize, pb.windowSize) return nil, false - } else if segment.readLength > bufferOffset { - segment.lastRead = time.Now() + } else if window.readLength > bufferOffset { + window.lastRead = time.Now() // Calculate the relative offset within the buffer - relativeOffset := offset - (bufferIndex * pb.segmentSize) - availableLength := segment.readLength - relativeOffset + relativeOffset := offset - (bufferIndex * pb.windowSize) + availableLength := window.readLength - relativeOffset readLength := min(int64(length), int64(availableLength)) // Pre-emptively start fetching the next buffer if within the threshold - if segment.readLength-relativeOffset <= preemptiveFetchThresholdBytes { + if window.readLength-relativeOffset <= preemptiveFetchThresholdBytes { nextBufferIndex := bufferIndex + 1 - if _, nextExists := pb.segments[nextBufferIndex]; !nextExists { - go pb.fetch(nextBufferIndex*pb.segmentSize, pb.segmentSize) + if _, nextExists := pb.windows[nextBufferIndex]; !nextExists { + go pb.fetch(nextBufferIndex*pb.windowSize, pb.windowSize) } } - return segment.data[relativeOffset : int64(relativeOffset)+int64(readLength)], true + return window.data[relativeOffset : int64(relativeOffset)+int64(readLength)], true } return nil, false } + +func waitForCondition(cond *sync.Cond) <-chan struct{} { + ch := make(chan struct{}) + go func() { + cond.L.Lock() + cond.Wait() + cond.L.Unlock() + close(ch) + }() + return ch +} diff --git a/pkg/config.default.yaml b/pkg/config.default.yaml index b144513..5aacb66 100644 --- a/pkg/config.default.yaml +++ b/pkg/config.default.yaml @@ -16,8 +16,7 @@ blobfs: enabled: false idleTtlS: 60 minFileSizeBytes: 1048576 # 1MB - segmentSizeBytes: 134217728 # 128MB - totalPrefetchSizeBytes: 1073741824 # 1GB + windowSizeBytes: 134217728 # 128MB dataTimeoutS: 30 ignoreFileExt: - .clip diff --git a/pkg/types.go b/pkg/types.go index b8ed6a2..0121bac 100644 --- a/pkg/types.go +++ b/pkg/types.go @@ -103,13 +103,12 @@ type BlobFsConfig struct { } type BlobFsPrefetchConfig struct { - Enabled bool `key:"enabled" json:"enabled"` - MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` - TotalPrefetchSizeBytes uint64 `key:"totalPrefetchSizeBytes" json:"total_prefetch_size_bytes"` - IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` - SegmentSizeBytes uint64 `key:"segmentSizeBytes" json:"segment_size_bytes"` - IgnoreFileExt []string `key:"ignoreFileExt" json:"ignore_file_ext"` - DataTimeoutS int `key:"dataTimeoutS" json:"data_timeout_s"` + Enabled bool `key:"enabled" json:"enabled"` + MinFileSizeBytes uint64 `key:"minFileSizeBytes" json:"min_file_size_bytes"` + IdleTtlS int `key:"idleTtlS" json:"idle_ttl_s"` + WindowSizeBytes uint64 `key:"windowSizeBytes" json:"window_size_bytes"` + IgnoreFileExt []string `key:"ignoreFileExt" json:"ignore_file_ext"` + DataTimeoutS int `key:"dataTimeoutS" json:"data_timeout_s"` } type SourceConfig struct { From b6a1d93119258dcd1d351aac5dfc88378aa94eb7 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:00:35 -0500 Subject: [PATCH 32/34] remove logs.txt --- logs.txt | 1149 ------------------------------------------------------ 1 file changed, 1149 deletions(-) delete mode 100644 logs.txt diff --git a/logs.txt b/logs.txt deleted file mode 100644 index 3518461..0000000 --- a/logs.txt +++ /dev/null @@ -1,1149 +0,0 @@ -{"level":"info","ts":"2025-01-13T20:09:32.103Z","caller":"pkg/logger.go:70","msg":"Mounting to /cache"} -{"level":"info","ts":"2025-01-13T20:09:32.166Z","caller":"pkg/logger.go:70","msg":"Added new host @ blobcache-host-0af2b0.tailc480d.ts.net:2049 (PrivateAddr=172.18.0.3:2049, RTT=0s)"} -{"level":"info","ts":"2025-01-13T20:09:33.246Z","caller":"pkg/logger.go:70","msg":"Fetching segment a4426282975b6f142c5425ad49f569d80198cc80ccda7adeb6a11f084e991e7b-0"} -{"level":"info","ts":"2025-01-13T20:09:33.246Z","caller":"pkg/logger.go:70","msg":"Waiting for prefetch signal"} -fatal error: sync: unlock of unlocked mutex - -goroutine 126 [running]: -sync.fatal({0x1b09702?, 0x100000000000000?}) - /usr/local/go/src/runtime/panic.go:1031 +0x18 -sync.(*Mutex).unlockSlow(0xc000a20680, 0xffffffff) - /usr/local/go/src/sync/mutex.go:231 +0x35 -sync.(*Mutex).Unlock(0x0?) - /usr/local/go/src/sync/mutex.go:225 +0x25 -sync.(*Cond).Wait(0xc00021c4e0?) - /usr/local/go/src/sync/cond.go:70 +0x77 -github.com/beam-cloud/blobcache-v2/pkg.waitForCondition.func1() - /workspace/pkg/blobfs_prefetch.go:353 +0x25 -created by github.com/beam-cloud/blobcache-v2/pkg.waitForCondition in goroutine 200 - /workspace/pkg/blobfs_prefetch.go:352 +0x78 - -goroutine 1 [chan receive]: -main.main() - /workspace/e2e/fs/main.go:49 +0x296 - -goroutine 3 [select]: -tailscale.com/logtail.(*Logger).drainBlock(...) - /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:304 -tailscale.com/logtail.(*Logger).drainPending(0xc000288c08) - /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:359 +0x626 -tailscale.com/logtail.(*Logger).uploading(0xc000288c08, {0x1d901f0, 0xc00015bd60}) - /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:399 +0x88 -created by tailscale.com/logtail.NewLogger in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/logtail/logtail.go:179 +0x80a - -goroutine 22 [chan receive]: -tailscale.com/net/tstun.(*fakeTUN).Read(0x0?, {0x0?, 0x0?, 0x0?}, {0x0?, 0x0?, 0x0?}, 0x0?) - /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/fake.go:37 +0x19 -tailscale.com/net/tstun.(*Wrapper).pollVector(0xc0001fc480) - /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:443 +0x306 -created by tailscale.com/net/tstun.wrap in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:275 +0x356 - -goroutine 23 [select]: -tailscale.com/net/tstun.(*Wrapper).pumpEvents(0xc0001fc480) - /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:372 +0x109 -created by tailscale.com/net/tstun.wrap in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:277 +0x396 - -goroutine 24 [select]: -github.com/tailscale/wireguard-go/ratelimiter.(*Ratelimiter).Init.func1() - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/ratelimiter/ratelimiter.go:68 +0x8f -created by github.com/tailscale/wireguard-go/ratelimiter.(*Ratelimiter).Init in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/ratelimiter/ratelimiter.go:64 +0x147 - -goroutine 25 [semacquire]: -sync.runtime_Semacquire(0x0?) - /usr/local/go/src/runtime/sema.go:71 +0x25 -sync.(*WaitGroup).Wait(0x0?) - /usr/local/go/src/sync/waitgroup.go:118 +0x48 -github.com/tailscale/wireguard-go/device.newHandshakeQueue.func1() - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:68 +0x25 -created by github.com/tailscale/wireguard-go/device.newHandshakeQueue in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:67 +0xa8 - -goroutine 26 [semacquire]: -sync.runtime_Semacquire(0x0?) - /usr/local/go/src/runtime/sema.go:71 +0x25 -sync.(*WaitGroup).Wait(0x0?) - /usr/local/go/src/sync/waitgroup.go:118 +0x48 -github.com/tailscale/wireguard-go/device.newOutboundQueue.func1() - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:32 +0x25 -created by github.com/tailscale/wireguard-go/device.newOutboundQueue in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:31 +0xa8 - -goroutine 27 [semacquire]: -sync.runtime_Semacquire(0x0?) - /usr/local/go/src/runtime/sema.go:71 +0x25 -sync.(*WaitGroup).Wait(0x0?) - /usr/local/go/src/sync/waitgroup.go:118 +0x48 -github.com/tailscale/wireguard-go/device.newInboundQueue.func1() - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:50 +0x25 -created by github.com/tailscale/wireguard-go/device.newInboundQueue in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/channels.go:49 +0xa8 - -goroutine 28 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x1) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 29 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x1) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 30 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x1) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 31 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x2) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 32 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x2) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 33 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x2) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 50 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x3) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 51 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x3) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 52 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x3) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 53 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x4) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 54 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x4) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 55 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x4) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 56 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x5) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 57 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x5) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 58 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x5) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 59 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x6) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 60 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x6) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 61 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x6) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 62 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x7) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 63 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x7) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 64 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x7) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 65 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x8) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 66 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x8) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 67 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x8) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 68 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x9) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 69 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x9) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 70 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x9) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 71 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xa) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 72 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xa) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 73 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xa) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 74 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xb) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 75 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xb) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 76 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xb) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 77 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xc) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 78 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xc) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 79 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xc) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 80 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xd) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 81 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xd) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 82 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xd) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 83 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xe) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 84 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xe) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 85 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xe) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 86 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0xf) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 87 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0xf) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 88 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0xf) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 89 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineEncryption(0xc0000f1908, 0x10) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:451 +0x187 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:315 +0x35b - -goroutine 90 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineDecryption(0xc0000f1908, 0x10) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:245 +0x185 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:316 +0x3a5 - -goroutine 91 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineHandshake(0xc0000f1908, 0x10) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:279 +0x16a -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:317 +0x2ee - -goroutine 92 [chan receive]: -tailscale.com/net/tstun.(*Wrapper).Read(0xc0001fc480, {0xc0002e6008, 0x80, 0x80}, {0xc00018a800, 0x80, 0x80}, 0x10) - /go/pkg/mod/tailscale.com@v1.72.1/net/tstun/wrap.go:895 +0xea -github.com/tailscale/wireguard-go/device.(*Device).RoutineReadFromTUN(0xc0000f1908) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/send.go:247 +0x2c3 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:322 +0x44c - -goroutine 93 [chan receive]: -github.com/tailscale/wireguard-go/device.(*Device).RoutineTUNEventReader(0xc0000f1908) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/tun.go:19 +0x79 -created by github.com/tailscale/wireguard-go/device.NewDevice in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:323 +0x488 - -goroutine 94 [chan receive]: -tailscale.com/wgengine.NewUserspaceEngine.func7() - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:457 +0x45 -created by tailscale.com/wgengine.NewUserspaceEngine in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:455 +0x14d1 - -goroutine 95 [select]: -tailscale.com/wgengine.NewUserspaceEngine.func8() - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:472 +0x5e -created by tailscale.com/wgengine.NewUserspaceEngine in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/userspace.go:471 +0x1513 - -goroutine 96 [IO wait]: -internal/poll.runtime_pollWait(0x2aaafa8e9460, 0x72) - /usr/local/go/src/runtime/netpoll.go:351 +0x85 -internal/poll.(*pollDesc).wait(0xc0001e6700?, 0x0?, 0x0) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 -internal/poll.(*pollDesc).waitRead(...) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 -internal/poll.(*FD).RawRead(0xc0001e6700, 0xc000780010) - /usr/local/go/src/internal/poll/fd_unix.go:717 +0x125 -net.(*rawConn).Read(0xc0001326b0, 0xc0001e6280?) - /usr/local/go/src/net/rawconn.go:44 +0x36 -golang.org/x/net/internal/socket.(*syscaller).recvmmsg(0xc000310168, {0x1d8ddb0?, 0xc0001326b0?}, {0xc0001e6280?, 0x1b95ff8?, 0x0?}, 0x29f5800?) - /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/mmsghdr_unix.go:120 +0x70 -golang.org/x/net/internal/socket.(*Conn).recvMsgs(0xc0001134a0, {0xc004012b58, 0x2, 0x2}, 0x0) - /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/rawconn_mmsg.go:24 +0x165 -golang.org/x/net/internal/socket.(*Conn).RecvMsgs(...) - /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/socket.go:267 -golang.org/x/net/ipv4.(*payloadHandler).ReadBatch(0xc00023ca10, {0xc004012b58?, 0x18?, 0x2aab0b962618?}, 0xc000657a90?) - /go/pkg/mod/golang.org/x/net@v0.27.0/ipv4/batch.go:80 +0x57 -tailscale.com/wgengine/magicsock.(*linuxBatchingConn).ReadBatch(0xc0000da8a0, {0xc004010008, 0x80, 0x80}, 0x4?) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/batching_conn_linux.go:284 +0x88 -tailscale.com/wgengine/magicsock.(*RebindingUDPConn).ReadBatch(0xc0001e9478, {0xc004010008, 0x80, 0x80}, 0x0) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/rebinding_conn.go:114 +0x97 -tailscale.com/wgengine/magicsock.(*Conn).receiveIPv4.(*Conn).mkReceiveFunc.func1({0xc000526008?, 0x80, 0x80?}, {0xc00052a000, 0x80, 0x0?}, {0xc00052c008, 0x80, 0x0?}) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/magicsock.go:1294 +0x278 -github.com/tailscale/wireguard-go/device.(*Device).RoutineReceiveIncoming(0xc0000f1908, 0x80, 0xc00043c390) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:111 +0x3b2 -created by github.com/tailscale/wireguard-go/device.(*Device).BindUpdate in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:524 +0x3c7 - -goroutine 97 [IO wait]: -internal/poll.runtime_pollWait(0x2aaafa8e9690, 0x72) - /usr/local/go/src/runtime/netpoll.go:351 +0x85 -internal/poll.(*pollDesc).wait(0xc0001e6600?, 0x0?, 0x0) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 -internal/poll.(*pollDesc).waitRead(...) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 -internal/poll.(*FD).RawRead(0xc0001e6600, 0xc00051c000) - /usr/local/go/src/internal/poll/fd_unix.go:717 +0x125 -net.(*rawConn).Read(0xc000132680, 0xc000340000?) - /usr/local/go/src/net/rawconn.go:44 +0x36 -golang.org/x/net/internal/socket.(*syscaller).recvmmsg(0xc0000faa68, {0x1d8ddb0?, 0xc000132680?}, {0xc000340000?, 0x1b95ff8?, 0x0?}, 0x8?) - /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/mmsghdr_unix.go:120 +0x70 -golang.org/x/net/internal/socket.(*Conn).recvMsgs(0xc000113440, {0xc004015b58, 0x2, 0x2}, 0x0) - /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/rawconn_mmsg.go:24 +0x165 -golang.org/x/net/internal/socket.(*Conn).RecvMsgs(...) - /go/pkg/mod/golang.org/x/net@v0.27.0/internal/socket/socket.go:267 -golang.org/x/net/ipv6.(*payloadHandler).ReadBatch(0xc00023c9c0, {0xc004015b58?, 0x18?, 0x18be280?}, 0xc000784a90?) - /go/pkg/mod/golang.org/x/net@v0.27.0/ipv6/batch.go:71 +0x57 -tailscale.com/wgengine/magicsock.(*linuxBatchingConn).ReadBatch(0xc0000da7e0, {0xc004013008, 0x80, 0x80}, 0x0?) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/batching_conn_linux.go:284 +0x88 -tailscale.com/wgengine/magicsock.(*RebindingUDPConn).ReadBatch(0xc0001e94a0, {0xc004013008, 0x80, 0x80}, 0x0) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/rebinding_conn.go:114 +0x97 -tailscale.com/wgengine/magicsock.(*Conn).receiveIPv6.(*Conn).mkReceiveFunc.func1({0xc002b86008?, 0x80, 0x80?}, {0xc002b8a000, 0x80, 0x0?}, {0xc002b8c008, 0x80, 0x0?}) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/magicsock.go:1294 +0x278 -github.com/tailscale/wireguard-go/device.(*Device).RoutineReceiveIncoming(0xc0000f1908, 0x80, 0xc00043c3f0) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:111 +0x3b2 -created by github.com/tailscale/wireguard-go/device.(*Device).BindUpdate in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:524 +0x3c7 - -goroutine 98 [chan receive]: -tailscale.com/wgengine/magicsock.(*connBind).receiveDERP(0xc000135950, {0xc000812008, 0x80, 0x143ab57?}, {0xc000816000, 0x80, 0x17d?}, {0xc000818008, 0x80, 0x80}) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/magicsock/derp.go:683 +0x11d -github.com/tailscale/wireguard-go/device.(*Device).RoutineReceiveIncoming(0xc0000f1908, 0x80, 0xc0001a1cf0) - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/receive.go:111 +0x3b2 -created by github.com/tailscale/wireguard-go/device.(*Device).BindUpdate in goroutine 1 - /go/pkg/mod/github.com/tailscale/wireguard-go@v0.0.0-20240731203015-71393c576b98/device/device.go:524 +0x3c7 - -goroutine 114 [IO wait]: -internal/poll.runtime_pollWait(0x2aaafa8e9578, 0x72) - /usr/local/go/src/runtime/netpoll.go:351 +0x85 -internal/poll.(*pollDesc).wait(0xc0000da6c0?, 0x100000000?, 0x1) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 -internal/poll.(*pollDesc).waitRead(...) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 -internal/poll.(*FD).RawRead(0xc0000da6c0, 0xc00023c690) - /usr/local/go/src/internal/poll/fd_unix.go:717 +0x125 -os.(*rawConn).Read(0xc000132638, 0xaa6901?) - /usr/local/go/src/os/rawconn.go:31 +0x4a -github.com/mdlayher/socket.rwT[...](0xc0000f79c0, {{0x1d90180, 0x2ab0480}, 0x0, {0x1adc65f, 0x7}, 0xc000118000}) - /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:795 +0x62a -github.com/mdlayher/socket.readT[...](0xc0000f79c0?, {0x1d90180?, 0x2ab0480?}, {0x1adc65f?, 0x7?}, 0xc000118000?) - /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:666 +0x98 -github.com/mdlayher/socket.(*Conn).Recvmsg(0xc0000f79c0, {0x1d90180, 0x2ab0480}, {0xc000714000, 0x1000, 0x1000}, {0x0, 0x0, 0x0}, 0x2) - /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:572 +0x13e -github.com/mdlayher/netlink.(*conn).Receive(0xc000132640) - /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn_linux.go:130 +0xb9 -github.com/mdlayher/netlink.(*Conn).receive(0xc0000f7a00) - /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn.go:279 +0x62 -github.com/mdlayher/netlink.(*Conn).lockedReceive(0xc0000f7a00) - /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn.go:238 +0x27 -github.com/mdlayher/netlink.(*Conn).Receive(0x0?) - /go/pkg/mod/github.com/mdlayher/netlink@v1.7.2/conn.go:231 +0x87 -tailscale.com/net/netmon.(*nlConn).Receive(0xc000223ce0) - /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon_linux.go:72 +0x38 -tailscale.com/net/netmon.(*Monitor).pump(0xc00032c380) - /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:346 +0x7b -created by tailscale.com/net/netmon.(*Monitor).Start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:265 +0x185 - -goroutine 115 [select]: -tailscale.com/net/netmon.(*Monitor).debounce(0xc00032c380) - /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:392 +0xc8 -created by tailscale.com/net/netmon.(*Monitor).Start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/net/netmon/netmon.go:266 +0x1c5 - -goroutine 130 [select]: -github.com/mdlayher/socket.rwT[...].func2() - /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:778 +0xb5 -created by github.com/mdlayher/socket.rwT[...] in goroutine 114 - /go/pkg/mod/github.com/mdlayher/socket@v0.5.0/conn.go:775 +0x532 - -goroutine 146 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028caa0, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028caa0, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028ca88, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 147 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cb30, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cb30, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cb18, 0xc00043c3f0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 148 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cbc0, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cbc0, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cba8, 0xc00032c380?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 149 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cc50, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cc50, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cc38, 0x1?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 150 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cce0, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cce0, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028ccc8, 0x1?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 151 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cd70, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cd70, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cd58, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 152 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028ce00, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028ce00, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cde8, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 153 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028ce90, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028ce90, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028ce78, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 154 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cf20, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cf20, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cf08, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 155 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028cfb0, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028cfb0, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028cf98, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 156 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d040, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d040, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d028, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 157 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d0d0, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d0d0, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d0b8, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 158 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d160, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d160, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d148, 0xc0001a1cf0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 159 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d1f0, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d1f0, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d1d8, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 160 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d280, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d280, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d268, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 161 [select]: -gvisor.dev/gvisor/pkg/sync.Gopark(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sync/runtime_unsafe.go:33 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).nextWaker(0xc00028d310, 0x1, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:210 +0x79 -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).fetch(0xc00028d310, 0x1, 0x0) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:257 +0x2b -gvisor.dev/gvisor/pkg/sleep.(*Sleeper).Fetch(...) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/sleep/sleep_unsafe.go:280 -gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*processor).start(0xc00028d2f8, 0x0?) - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:292 +0xa9 -created by gvisor.dev/gvisor/pkg/tcpip/transport/tcp.(*dispatcher).init in goroutine 1 - /go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20240722211153-64c016c92987/pkg/tcpip/transport/tcp/dispatcher.go:398 +0x136 - -goroutine 162 [select]: -tailscale.com/wgengine/netstack.(*queue).ReadContext(...) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/link_endpoint.go:48 -tailscale.com/wgengine/netstack.(*linkEndpoint).ReadContext(0x100000001?, {0x1d901f0?, 0xc00015a320?}) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/link_endpoint.go:147 +0x88 -tailscale.com/wgengine/netstack.(*Impl).inject(0xc00032c0e0) - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/netstack.go:828 +0x3e -created by tailscale.com/wgengine/netstack.(*Impl).Start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/wgengine/netstack/netstack.go:565 +0x336 - -goroutine 163 [select]: -tailscale.com/ipn/ipnlocal.(*LocalBackend).readPoller(0xc000810488) - /go/pkg/mod/tailscale.com@v1.72.1/ipn/ipnlocal/local.go:2470 +0x14c -created by tailscale.com/ipn/ipnlocal.(*LocalBackend).Start.func2 in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/ipn/ipnlocal/local.go:1937 +0x5f - -goroutine 164 [select]: -golang.org/x/net/http2.(*ClientConn).roundTrip(0xc0011e4000, 0xc0015ec8c0, 0x0) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1379 +0x4f8 -golang.org/x/net/http2.(*ClientConn).RoundTrip(...) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1276 -tailscale.com/internal/noiseconn.(*Conn).RoundTrip(0xc001678000?, 0x1d901f0?) - /go/pkg/mod/tailscale.com@v1.72.1/internal/noiseconn/conn.go:69 +0x19 -tailscale.com/control/controlclient.(*NoiseClient).RoundTrip(0xa271c8?, 0xc0015ec8c0) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/noise.go:252 +0x5c -net/http.send(0xc0015ec8c0, {0x1d77aa0, 0xc001678000}, {0x1?, 0x4175cb?, 0x0?}) - /usr/local/go/src/net/http/client.go:259 +0x5e4 -net/http.(*Client).send(0xc0015f7dd0, 0xc0015ec8c0, {0x17f29c0?, 0x48?, 0x0?}) - /usr/local/go/src/net/http/client.go:180 +0x98 -net/http.(*Client).do(0xc0015f7dd0, 0xc0015ec8c0) - /usr/local/go/src/net/http/client.go:725 +0x8bc -net/http.(*Client).Do(...) - /usr/local/go/src/net/http/client.go:590 -tailscale.com/control/controlclient.(*Direct).doLogin(0xc0001e3a00, {0x1d901f0, 0xc00015ab90}, {0xe, 0x0, {0x0, 0x0}, 0x0, 0x0, {0x0, ...}}) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/direct.go:673 +0x1ee5 -tailscale.com/control/controlclient.(*Direct).doLoginOrRegen(0xc0001e3a00, {0x1d901f0, 0xc00015ab90}, {0xe, 0x0, {0x0, 0x0}, 0x0, 0x0, {0x0, ...}}) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/direct.go:430 +0x69 -tailscale.com/control/controlclient.(*Direct).TryLogin(0xc0001e3a00, {0x1d901f0, 0xc00015ab90}, 0xe) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/direct.go:418 +0x139 -tailscale.com/control/controlclient.(*Auto).authRoutine(0xc000436b40) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:340 +0x47e -created by tailscale.com/control/controlclient.(*Auto).Start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:227 +0x56 - -goroutine 165 [chan receive]: -tailscale.com/control/controlclient.(*Auto).mapRoutine(0xc000436b40) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:505 +0x2c5 -created by tailscale.com/control/controlclient.(*Auto).Start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:228 +0x96 - -goroutine 166 [select]: -tailscale.com/control/controlclient.(*Auto).updateRoutine(0xc000436b40) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:74 +0x492 -created by tailscale.com/control/controlclient.(*Auto).Start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/control/controlclient/auto.go:229 +0xd6 - -goroutine 119 [select]: -golang.org/x/net/http2.(*clientStream).writeRequest(0xc0011e4180, 0xc0015ec8c0, 0x0) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1532 +0xa65 -golang.org/x/net/http2.(*clientStream).doRequest(0xc0011e4180, 0x897d65?, 0xc00166ea20?) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1410 +0x56 -created by golang.org/x/net/http2.(*ClientConn).roundTrip in goroutine 164 - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:1315 +0x3d8 - -goroutine 137 [select]: -google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run(0xc0001a07b0, {0x1d901f0, 0xc00023cb90}) - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:76 +0x10c -created by google.golang.org/grpc/internal/grpcsync.NewCallbackSerializer in goroutine 5 - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:52 +0x11a - -goroutine 171 [select]: -tailscale.com/tsnet.(*Server).printAuthURLLoop(0xc000298680) - /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:733 +0x19b -created by tailscale.com/tsnet.(*Server).start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:633 +0x134b - -goroutine 172 [select]: -tailscale.com/net/memnet.(*Listener).Accept(0x2a43ea0?) - /go/pkg/mod/tailscale.com@v1.72.1/net/memnet/listener.go:55 +0x5e -net/http.(*Server).Serve(0xc00017a1e0, {0x1d8dfa0, 0xc000119440}) - /usr/local/go/src/net/http/server.go:3330 +0x30c -tailscale.com/tsnet.(*Server).start.func6() - /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:648 +0x32 -created by tailscale.com/tsnet.(*Server).start in goroutine 1 - /go/pkg/mod/tailscale.com@v1.72.1/tsnet/tsnet.go:647 +0x1645 - -goroutine 5 [select]: -github.com/beam-cloud/blobcache-v2/pkg.(*DiscoveryClient).StartInBackground(0xc000a801e0, {0x1d90180, 0x2ab0480}) - /workspace/pkg/discovery.go:70 +0x1ee -created by github.com/beam-cloud/blobcache-v2/pkg.NewBlobCacheClient in goroutine 1 - /workspace/pkg/client.go:108 +0x81f - -goroutine 275 [runnable]: -github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).GetContentStream(0xc0000e6008, {0xc00023a3c0, 0x40}, 0x0, 0x8000000) - /workspace/pkg/client.go:313 +0x112 -github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).fetch(0xc0005a4680, 0x0, 0x8000000) - /workspace/pkg/blobfs_prefetch.go:186 +0x2a6 -created by github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).tryGetRange in goroutine 200 - /workspace/pkg/blobfs_prefetch.go:326 +0x3e5 - -goroutine 118 [IO wait]: -internal/poll.runtime_pollWait(0x2aaafa8e8dd0, 0x72) - /usr/local/go/src/runtime/netpoll.go:351 +0x85 -internal/poll.(*pollDesc).wait(0xc000710680?, 0xc0011e20b0?, 0x0) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 -internal/poll.(*pollDesc).waitRead(...) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 -internal/poll.(*FD).Read(0xc000710680, {0xc0011e20b0, 0x3, 0x3}) - /usr/local/go/src/internal/poll/fd_unix.go:165 +0x27a -net.(*netFD).Read(0xc000710680, {0xc0011e20b0?, 0x9?, 0x9?}) - /usr/local/go/src/net/fd_posix.go:55 +0x25 -net.(*conn).Read(0xc0011b8000, {0xc0011e20b0?, 0xfdd?, 0x0?}) - /usr/local/go/src/net/net.go:189 +0x45 -net/http.(*readWriteCloserBody).Read(0x0?, {0xc0011e20b0?, 0x0?, 0xc00168b000?}) - /usr/local/go/src/net/http/transport.go:2504 +0x88 -tailscale.com/net/netutil.wrappedConn.Read(...) - /go/pkg/mod/tailscale.com@v1.72.1/net/netutil/netutil.go:102 -tailscale.com/control/controlbase.(*Conn).readNLocked(0xc0011e2000, 0x3) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlbase/conn.go:115 +0xe2 -tailscale.com/control/controlbase.(*Conn).decryptOneLocked(0xc0011e2000) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlbase/conn.go:223 +0x1f4 -tailscale.com/control/controlbase.(*Conn).Read(0xc0011e2000, {0xc0011e7000, 0x1000, 0x4717dd?}) - /go/pkg/mod/tailscale.com@v1.72.1/control/controlbase/conn.go:253 +0x111 -tailscale.com/internal/noiseconn.(*Conn).Read(0x0?, {0xc0011e7000?, 0x2aab584625f8?, 0x30?}) - /go/pkg/mod/tailscale.com@v1.72.1/internal/noiseconn/conn.go:133 +0x74 -bufio.(*Reader).Read(0xc0011b4360, {0xc0011e8040, 0x9, 0xc001684990?}) - /usr/local/go/src/bufio/bufio.go:241 +0x197 -io.ReadAtLeast({0x1d76540, 0xc0011b4360}, {0xc0011e8040, 0x9, 0x9}, 0x9) - /usr/local/go/src/io/io.go:335 +0x90 -io.ReadFull(...) - /usr/local/go/src/io/io.go:354 -golang.org/x/net/http2.readFrameHeader({0xc0011e8040, 0x9, 0xc001684990?}, {0x1d76540?, 0xc0011b4360?}) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:237 +0x65 -golang.org/x/net/http2.(*Framer).ReadFrame(0xc0011e8000) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:501 +0x85 -golang.org/x/net/http2.(*clientConnReadLoop).run(0xc0007f7fa8) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:2354 +0xda -golang.org/x/net/http2.(*ClientConn).readLoop(0xc0011e4000) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:2250 +0x7c -created by golang.org/x/net/http2.(*Transport).newClientConn in goroutine 164 - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/transport.go:865 +0xcfb - -goroutine 210 [select]: -google.golang.org/grpc/internal/transport.(*controlBuffer).get(0xc00015afa0, 0x1) - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/controlbuf.go:418 +0x113 -google.golang.org/grpc/internal/transport.(*loopyWriter).run(0xc0002c1650) - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/controlbuf.go:551 +0x7b -google.golang.org/grpc/internal/transport.newHTTP2Client.func6() - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:454 +0x85 -created by google.golang.org/grpc/internal/transport.newHTTP2Client in goroutine 138 - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:452 +0x22cb - -goroutine 135 [select]: -google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run(0xc0001a0710, {0x1d901f0, 0xc00023caf0}) - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:76 +0x10c -created by google.golang.org/grpc/internal/grpcsync.NewCallbackSerializer in goroutine 5 - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:52 +0x11a - -goroutine 194 [select]: -github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).manageLocalClientCache.func1() - /workspace/pkg/client.go:322 +0xea -created by github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).manageLocalClientCache in goroutine 6 - /workspace/pkg/client.go:317 +0x6b - -goroutine 136 [select]: -google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run(0xc0001a0740, {0x1d901f0, 0xc00023cb40}) - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:76 +0x10c -created by google.golang.org/grpc/internal/grpcsync.NewCallbackSerializer in goroutine 5 - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/grpcsync/callback_serializer.go:52 +0x11a - -goroutine 139 [select]: -github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).monitorHost(0xc0000e6008, 0xc000594450) - /workspace/pkg/client.go:193 +0xe5 -created by github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).addHost in goroutine 5 - /workspace/pkg/client.go:184 +0x7cc - -goroutine 177 [IO wait]: -internal/poll.runtime_pollWait(0x2aaafa8e8970, 0x72) - /usr/local/go/src/runtime/netpoll.go:351 +0x85 -internal/poll.(*pollDesc).wait(0xc000980400?, 0xc000b80000?, 0x0) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 -internal/poll.(*pollDesc).waitRead(...) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 -internal/poll.(*FD).Read(0xc000980400, {0xc000b80000, 0x20000, 0x20000}) - /usr/local/go/src/internal/poll/fd_unix.go:165 +0x27a -net.(*netFD).Read(0xc000980400, {0xc000b80000?, 0x1060100000000?, 0x8?}) - /usr/local/go/src/net/fd_posix.go:55 +0x25 -net.(*conn).Read(0xc0001329c8, {0xc000b80000?, 0x800010601?, 0xc000000000?}) - /usr/local/go/src/net/net.go:189 +0x45 -bufio.(*Reader).Read(0xc0007120c0, {0xc00032c9e0, 0x9, 0xc0001f9c08?}) - /usr/local/go/src/bufio/bufio.go:241 +0x197 -io.ReadAtLeast({0x1d76540, 0xc0007120c0}, {0xc00032c9e0, 0x9, 0x9}, 0x9) - /usr/local/go/src/io/io.go:335 +0x90 -io.ReadFull(...) - /usr/local/go/src/io/io.go:354 -golang.org/x/net/http2.readFrameHeader({0xc00032c9e0, 0x9, 0xc0011f2150?}, {0x1d76540?, 0xc0007120c0?}) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:237 +0x65 -golang.org/x/net/http2.(*Framer).ReadFrame(0xc00032c9a0) - /go/pkg/mod/golang.org/x/net@v0.27.0/http2/frame.go:501 +0x85 -google.golang.org/grpc/internal/transport.(*http2Client).reader(0xc0001afd48, 0xc0002c15e0) - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:1593 +0x226 -created by google.golang.org/grpc/internal/transport.newHTTP2Client in goroutine 138 - /go/pkg/mod/google.golang.org/grpc@v1.62.0/internal/transport/http2_client.go:400 +0x1d1e - -goroutine 196 [select]: -github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchManager).evictIdleBuffers(0xc000a20488) - /workspace/pkg/blobfs_prefetch.go:71 +0xb2 -created by github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchManager).Start in goroutine 1 - /workspace/pkg/blobfs_prefetch.go:44 +0x4f - -goroutine 197 [semacquire]: -sync.runtime_Semacquire(0xc00037ec10?) - /usr/local/go/src/runtime/sema.go:71 +0x25 -sync.(*WaitGroup).Wait(0xc00021c4e0?) - /usr/local/go/src/sync/waitgroup.go:118 +0x48 -github.com/hanwen/go-fuse/v2/fuse.(*Server).Wait(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:450 -github.com/beam-cloud/blobcache-v2/pkg.Mount.func1.1() - /workspace/pkg/blobfs.go:151 +0xa6 -created by github.com/beam-cloud/blobcache-v2/pkg.Mount.func1 in goroutine 1 - /workspace/pkg/blobfs.go:143 +0x6e - -goroutine 199 [syscall]: -os/signal.signal_recv() - /usr/local/go/src/runtime/sigqueue.go:152 +0x29 -os/signal.loop() - /usr/local/go/src/os/signal/signal_unix.go:23 +0x13 -created by os/signal.Notify.func1.1 in goroutine 1 - /usr/local/go/src/os/signal/signal.go:151 +0x1f - -goroutine 200 [select]: -github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).waitForSignal(0xc0005a4680) - /workspace/pkg/blobfs_prefetch.go:301 +0x166 -github.com/beam-cloud/blobcache-v2/pkg.(*PrefetchBuffer).GetRange(0xc0005a4680, 0xc00023a3c0?, 0x40?) - /workspace/pkg/blobfs_prefetch.go:286 +0x192 -github.com/beam-cloud/blobcache-v2/pkg.(*FSNode).Read(0xc000a40100, {0x1a9b500?, 0x0?}, {0x5b36ca?, 0xc0005950b0?}, {0xc001800000, 0x20000, 0x20000}, 0x0) - /workspace/pkg/blobfs_node.go:200 +0x13f -github.com/hanwen/go-fuse/v2/fs.(*rawBridge).Read(0x0?, 0xc0002bf6c0, 0xc00099e3e0, {0xc001800000, 0x20000, 0x20000}) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fs/bridge.go:770 +0x136 -github.com/hanwen/go-fuse/v2/fuse.doRead(0xc00021c4e0, 0xc00099e248) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/opcode.go:398 +0x7c -github.com/hanwen/go-fuse/v2/fuse.(*Server).handleRequest(0xc00021c4e0, 0xc00099e248) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:527 +0x2d6 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x0) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:500 +0x110 -github.com/hanwen/go-fuse/v2/fuse.(*Server).Serve(0xc00021c4e0) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:422 +0x29 -created by github.com/beam-cloud/blobcache-v2/pkg.Mount.func1.1 in goroutine 197 - /workspace/pkg/blobfs.go:144 +0x65 - -goroutine 141 [syscall]: -syscall.Syscall(0x0, 0x15, 0xc000ea61b0, 0x100080) - /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 -syscall.read(0xc00021c578?, {0xc000ea61b0?, 0x0?, 0x0?}) - /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 -syscall.Read(...) - /usr/local/go/src/syscall/syscall_unix.go:183 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 -github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x8?) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b -created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 - -goroutine 142 [syscall]: -syscall.Syscall(0x0, 0x15, 0xc0010aa1b0, 0x100080) - /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 -syscall.read(0xc00021c578?, {0xc0010aa1b0?, 0x0?, 0xc00026ae90?}) - /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 -syscall.Read(...) - /usr/local/go/src/syscall/syscall_unix.go:183 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 -github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x88?) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b -created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 - -goroutine 143 [syscall]: -syscall.Syscall(0x0, 0x15, 0xc000ba01b0, 0x100080) - /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 -syscall.read(0xc00021c578?, {0xc000ba01b0?, 0x0?, 0xb?}) - /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 -syscall.Read(...) - /usr/local/go/src/syscall/syscall_unix.go:183 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 -github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0xc8?) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b -created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 - -goroutine 144 [syscall]: -syscall.Syscall(0x0, 0x15, 0xc000da41b0, 0x100080) - /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 -syscall.read(0xc00021c578?, {0xc000da41b0?, 0x0?, 0x0?}) - /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 -syscall.Read(...) - /usr/local/go/src/syscall/syscall_unix.go:183 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 -github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x88?) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b -created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 - -goroutine 145 [syscall]: -syscall.Syscall(0x0, 0x15, 0xc000fa81b0, 0x100080) - /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 -syscall.read(0xc00021c578?, {0xc000fa81b0?, 0x0?, 0x0?}) - /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 -syscall.Read(...) - /usr/local/go/src/syscall/syscall_unix.go:183 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 -github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x8?) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b -created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 - -goroutine 226 [syscall]: -syscall.Syscall(0x0, 0x15, 0xc0012001b0, 0x100080) - /usr/local/go/src/syscall/syscall_linux.go:73 +0x25 -syscall.read(0xc00021c578?, {0xc0012001b0?, 0x0?, 0x0?}) - /usr/local/go/src/syscall/zsyscall_linux_amd64.go:736 +0x38 -syscall.Read(...) - /usr/local/go/src/syscall/syscall_unix.go:183 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest.func1(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:336 -github.com/hanwen/go-fuse/v2/fuse.handleEINTR(...) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:309 -github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest(0xc00021c4e0, 0x8?) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:334 +0x1d2 -github.com/hanwen/go-fuse/v2/fuse.(*Server).loop(0xc00021c4e0, 0x1) - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:478 +0x7b -created by github.com/hanwen/go-fuse/v2/fuse.(*Server).readRequest in goroutine 200 - /go/pkg/mod/github.com/hanwen/go-fuse/v2@v2.5.1/fuse/server.go:367 +0x547 - -goroutine 124 [IO wait]: -internal/poll.runtime_pollWait(0x2aaafa8e8a88, 0x72) - /usr/local/go/src/runtime/netpoll.go:351 +0x85 -internal/poll.(*pollDesc).wait(0xc0005a4480?, 0xc001756000?, 0x0) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0x27 -internal/poll.(*pollDesc).waitRead(...) - /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 -internal/poll.(*FD).Read(0xc0005a4480, {0xc001756000, 0xc00, 0xc00}) - /usr/local/go/src/internal/poll/fd_unix.go:165 +0x27a -net.(*netFD).Read(0xc0005a4480, {0xc001756000?, 0x2a70d00?, 0xc0000b69d0?}) - /usr/local/go/src/net/fd_posix.go:55 +0x25 -net.(*conn).Read(0xc0011b8028, {0xc001756000?, 0xc0011ba2e0?, 0x756993?}) - /usr/local/go/src/net/net.go:189 +0x45 -crypto/tls.(*atLeastReader).Read(0xc0011b0498, {0xc001756000?, 0x0?, 0xc0011b0498?}) - /usr/local/go/src/crypto/tls/conn.go:809 +0x3b -bytes.(*Buffer).ReadFrom(0xc0017462b8, {0x1d74ae0, 0xc0011b0498}) - /usr/local/go/src/bytes/buffer.go:211 +0x98 -crypto/tls.(*Conn).readFromUntil(0xc001746008, {0x1d74bc0, 0xc0011b8028}, 0xc0000b6a40?) - /usr/local/go/src/crypto/tls/conn.go:831 +0xde -crypto/tls.(*Conn).readRecordOrCCS(0xc001746008, 0x0) - /usr/local/go/src/crypto/tls/conn.go:629 +0x3cf -crypto/tls.(*Conn).readRecord(...) - /usr/local/go/src/crypto/tls/conn.go:591 -crypto/tls.(*Conn).Read(0xc001746008, {0xc0011eb000, 0x1000, 0xc0001fa108?}) - /usr/local/go/src/crypto/tls/conn.go:1385 +0x150 -net/http.(*persistConn).Read(0xc00166ec60, {0xc0011eb000?, 0x891525?, 0x1839040?}) - /usr/local/go/src/net/http/transport.go:2052 +0x4a -bufio.(*Reader).fill(0xc001754f60) - /usr/local/go/src/bufio/bufio.go:110 +0x103 -bufio.(*Reader).Peek(0xc001754f60, 0x1) - /usr/local/go/src/bufio/bufio.go:148 +0x53 -net/http.(*persistConn).readLoop(0xc00166ec60) - /usr/local/go/src/net/http/transport.go:2205 +0x185 -created by net/http.(*Transport).dialConn in goroutine 254 - /usr/local/go/src/net/http/transport.go:1874 +0x154f - -goroutine 127 [sync.Cond.Wait]: -sync.runtime_notifyListWait(0xc000350550, 0x0) - /usr/local/go/src/runtime/sema.go:587 +0x159 -sync.(*Cond).Wait(0xc00021c4e0?) - /usr/local/go/src/sync/cond.go:71 +0x85 -github.com/beam-cloud/blobcache-v2/pkg.waitForCondition.func1() - /workspace/pkg/blobfs_prefetch.go:353 +0x25 -created by github.com/beam-cloud/blobcache-v2/pkg.waitForCondition in goroutine 200 - /workspace/pkg/blobfs_prefetch.go:352 +0x78 - -goroutine 125 [select]: -net/http.(*persistConn).writeLoop(0xc00166ec60) - /usr/local/go/src/net/http/transport.go:2519 +0xe7 -created by net/http.(*Transport).dialConn in goroutine 254 - /usr/local/go/src/net/http/transport.go:1875 +0x15a5 - -goroutine 99 [runnable]: -github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).GetContentStream.func1() - /workspace/pkg/client.go:272 -created by github.com/beam-cloud/blobcache-v2/pkg.(*BlobCacheClient).GetContentStream in goroutine 275 - /workspace/pkg/client.go:272 +0x112 From d5a12d00cc0658723126a80707630f3a3b951652 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 17:13:58 -0500 Subject: [PATCH 33/34] test sliding windows --- pkg/blobfs_prefetch.go | 122 +++++++++++++++++++++-------------------- 1 file changed, 64 insertions(+), 58 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index fa036aa..4ac1652 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -82,18 +82,20 @@ func (pm *PrefetchManager) evictIdleBuffers() { } type PrefetchBuffer struct { - ctx context.Context - cancelFunc context.CancelFunc - manager *PrefetchManager - hash string - windows map[uint64]*window - windowSize uint64 - lastRead time.Time - fileSize uint64 - client *BlobCacheClient - mu sync.Mutex - dataCond *sync.Cond - dataTimeout time.Duration + ctx context.Context + cancelFunc context.CancelFunc + manager *PrefetchManager + hash string + windowSize uint64 + lastRead time.Time + fileSize uint64 + client *BlobCacheClient + mu sync.Mutex + dataCond *sync.Cond + dataTimeout time.Duration + currentWindow *window + nextWindow *window + prevWindow *window } type window struct { @@ -118,17 +120,19 @@ type PrefetchOpts struct { func NewPrefetchBuffer(opts PrefetchOpts) *PrefetchBuffer { pb := &PrefetchBuffer{ - ctx: opts.Ctx, - cancelFunc: opts.CancelFunc, - hash: opts.Hash, - manager: opts.Manager, - lastRead: time.Now(), - fileSize: opts.FileSize, - client: opts.Client, - windows: make(map[uint64]*window), - windowSize: opts.WindowSize, - dataTimeout: opts.DataTimeout, - mu: sync.Mutex{}, + ctx: opts.Ctx, + cancelFunc: opts.CancelFunc, + hash: opts.Hash, + manager: opts.Manager, + lastRead: time.Now(), + fileSize: opts.FileSize, + client: opts.Client, + windowSize: opts.WindowSize, + dataTimeout: opts.DataTimeout, + mu: sync.Mutex{}, + currentWindow: nil, + nextWindow: nil, + prevWindow: nil, } pb.dataCond = sync.NewCond(&pb.mu) return pb @@ -138,9 +142,12 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { bufferIndex := offset / bufferSize pb.mu.Lock() - if _, exists := pb.windows[bufferIndex]; exists { - pb.mu.Unlock() - return + windows := []*window{pb.currentWindow, pb.nextWindow, pb.prevWindow} + for _, w := range windows { + if w != nil && w.index == bufferIndex { + pb.mu.Unlock() + return + } } w := &window{ @@ -150,14 +157,15 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { lastRead: time.Now(), fetching: true, } - pb.windows[bufferIndex] = w + + // Slide windows + pb.prevWindow = pb.currentWindow + pb.currentWindow = pb.nextWindow + pb.nextWindow = w pb.mu.Unlock() contentChan, err := pb.client.GetContentStream(pb.hash, int64(offset), int64(bufferSize)) if err != nil { - pb.mu.Lock() - delete(pb.windows, bufferIndex) - pb.mu.Unlock() return } @@ -187,27 +195,20 @@ func (pb *PrefetchBuffer) fetch(offset uint64, bufferSize uint64) { func (pb *PrefetchBuffer) evictIdle() bool { unused := true - var indicesToDelete []uint64 pb.mu.Lock() - for index, window := range pb.windows { - if time.Since(window.lastRead) > prefetchSegmentIdleTTL && !window.fetching { - indicesToDelete = append(indicesToDelete, index) + windows := []*window{pb.prevWindow, pb.currentWindow, pb.nextWindow} + for _, w := range windows { + if w != nil && time.Since(w.lastRead) > prefetchSegmentIdleTTL && !w.fetching { + Logger.Infof("Evicting segment %s-%d", pb.hash, w.index) + w.data = nil } else { unused = false } } + pb.prevWindow, pb.currentWindow, pb.nextWindow = windows[0], windows[1], windows[2] pb.mu.Unlock() - for _, index := range indicesToDelete { - pb.mu.Lock() - Logger.Infof("Evicting segment %s-%d", pb.hash, index) - window := pb.windows[index] - window.data = nil - delete(pb.windows, index) - pb.mu.Unlock() - } - return unused } @@ -218,12 +219,10 @@ func (pb *PrefetchBuffer) Clear() { defer pb.mu.Unlock() // Clear all window data - for _, window := range pb.windows { + windows := []*window{pb.prevWindow, pb.currentWindow, pb.nextWindow} + for _, window := range windows { window.data = nil } - - // Reinitialize the map to clear all entries - pb.windows = make(map[uint64]*window) } func (pb *PrefetchBuffer) GetRange(offset, length uint64) ([]byte, error) { @@ -262,10 +261,9 @@ func (pb *PrefetchBuffer) waitForSignal() error { Logger.Infof("Prefetch data ready") return nil case <-timeoutChan: - Logger.Infof("Timeout occurred waiting for prefetch data") return fmt.Errorf("timeout occurred waiting for prefetch data") case <-pb.ctx.Done(): - return fmt.Errorf("context canceled") + return pb.ctx.Err() } } } @@ -274,30 +272,38 @@ func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length pb.mu.Lock() defer pb.mu.Unlock() - window, exists := pb.windows[bufferIndex] + var w *window + var windows []*window = []*window{pb.currentWindow, pb.nextWindow, pb.prevWindow} + for _, win := range windows { + if win != nil && win.index == bufferIndex { + w = win + break + } + } - // Initiate a fetch operation if the buffer does not exist - if !exists { + if w == nil { Logger.Infof("Fetching segment %s-%d", pb.hash, bufferIndex) go pb.fetch(bufferIndex*pb.windowSize, pb.windowSize) return nil, false - } else if window.readLength > bufferOffset { - window.lastRead = time.Now() + } + + if w.readLength > bufferOffset { + w.lastRead = time.Now() // Calculate the relative offset within the buffer relativeOffset := offset - (bufferIndex * pb.windowSize) - availableLength := window.readLength - relativeOffset + availableLength := w.readLength - relativeOffset readLength := min(int64(length), int64(availableLength)) // Pre-emptively start fetching the next buffer if within the threshold - if window.readLength-relativeOffset <= preemptiveFetchThresholdBytes { + if w.readLength-relativeOffset <= preemptiveFetchThresholdBytes { nextBufferIndex := bufferIndex + 1 - if _, nextExists := pb.windows[nextBufferIndex]; !nextExists { + if pb.nextWindow == nil || pb.nextWindow.index != nextBufferIndex { go pb.fetch(nextBufferIndex*pb.windowSize, pb.windowSize) } } - return window.data[relativeOffset : int64(relativeOffset)+int64(readLength)], true + return w.data[relativeOffset : int64(relativeOffset)+int64(readLength)], true } return nil, false From 7b3f08e3fdbd513fdd54dbe05995b95f62b5bcd5 Mon Sep 17 00:00:00 2001 From: luke-lombardi <33990301+luke-lombardi@users.noreply.github.com> Date: Mon, 13 Jan 2025 17:28:14 -0500 Subject: [PATCH 34/34] remove debug logs --- pkg/blobfs_prefetch.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/blobfs_prefetch.go b/pkg/blobfs_prefetch.go index 4ac1652..030b0eb 100644 --- a/pkg/blobfs_prefetch.go +++ b/pkg/blobfs_prefetch.go @@ -200,7 +200,7 @@ func (pb *PrefetchBuffer) evictIdle() bool { windows := []*window{pb.prevWindow, pb.currentWindow, pb.nextWindow} for _, w := range windows { if w != nil && time.Since(w.lastRead) > prefetchSegmentIdleTTL && !w.fetching { - Logger.Infof("Evicting segment %s-%d", pb.hash, w.index) + Logger.Debugf("Evicting segment %s-%d", pb.hash, w.index) w.data = nil } else { unused = false @@ -242,7 +242,6 @@ func (pb *PrefetchBuffer) GetRange(offset, length uint64) ([]byte, error) { bufferIndex = offset / bufferSize bufferOffset = offset % bufferSize } else { - Logger.Infof("Waiting for prefetch signal") if err := pb.waitForSignal(); err != nil { return nil, err } @@ -258,7 +257,6 @@ func (pb *PrefetchBuffer) waitForSignal() error { for { select { case <-waitForCondition(pb.dataCond): - Logger.Infof("Prefetch data ready") return nil case <-timeoutChan: return fmt.Errorf("timeout occurred waiting for prefetch data") @@ -282,7 +280,6 @@ func (pb *PrefetchBuffer) tryGetRange(bufferIndex, bufferOffset, offset, length } if w == nil { - Logger.Infof("Fetching segment %s-%d", pb.hash, bufferIndex) go pb.fetch(bufferIndex*pb.windowSize, pb.windowSize) return nil, false }