Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions g3doc/user_guide/filesystem.md
Original file line number Diff line number Diff line change
Expand Up @@ -273,3 +273,30 @@ runsc --root=/path/to/rootdir debug --mount erofs:{source}:{destination}
```

[Production guide]: production.md

## Custom Gofer Providers

The stock gofer serves host filesystem mounts via LISAFS. For workloads that
need a different filesystem backend (a network-backed store, an encrypted
filesystem, a tiered cache), the `runsc/gofer/provider` package lets you plug in
a custom `lisafs.ServerImpl` without forking the runsc binary.

A provider registers itself at startup and claims mounts by path. For example, a
binary might register one provider that handles mounts under `/storage` via an
HTTP blob service, and another that handles `/encrypted` via a local encryption
layer. Unclaimed mounts fall through to the stock fsgofer as usual.

To build a custom gofer:

1. Implement the `provider.Provider` interface: `Name`, `NewServer`, and
`SeccompRules`.
2. Register your provider in `init()` or early `main()`.
3. Build a binary that imports `runsc/cli` and your provider package.

The provider's `NewServer` returns a `*lisafs.Server` for mounts it handles, or
`(nil, nil)` to decline. Per-mount configuration (endpoints, volume keys) is
passed via OCI annotations on the spec. The `SeccompRules` method declares any
additional syscalls the provider needs beyond the stock gofer allowlist (e.g.
for networking). See the `runsc/gofer/provider` package documentation and
`pkg/lisafs/testsuite` for testing custom `ServerImpl` implementations.

7 changes: 7 additions & 0 deletions pkg/lisafs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,13 @@ accessed/mutated via RPCs by LISAFS clients. The server is a trusted process.
For security reasons, the server must assume that the client can be potentially
compromised and act maliciously.

The server-side interface is `ServerImpl` (defined in `server.go`). The stock
implementation is `runsc/fsgofer`, which serves host filesystem mounts. The
`runsc/gofer/provider` package allows registering alternative `ServerImpl`
implementations for specific mounts, so that a custom gofer binary can serve
some mounts from a different backend (e.g. a network filesystem) while the
stock fsgofer handles the rest.

#### Concurrency

The server must execute file system operations under appropriate concurrency
Expand Down
3 changes: 3 additions & 0 deletions runsc/cmd/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,12 @@ go_library(
"//pkg/cpuid",
"//pkg/fd",
"//pkg/hostarch",
"//pkg/lisafs",
"//pkg/log",
"//pkg/metric",
"//pkg/prometheus",
"//pkg/ring0",
"//pkg/seccomp",
"//pkg/sentry/control",
"//pkg/sentry/devices/nvproxy/nvconf",
"//pkg/sentry/devices/tpuproxy",
Expand All @@ -119,6 +121,7 @@ go_library(
"//runsc/flag",
"//runsc/fsgofer",
"//runsc/fsgofer/filter",
"//runsc/gofer/provider",
"//runsc/metricserver/containermetrics",
"//runsc/mitigate",
"//runsc/profile",
Expand Down
53 changes: 48 additions & 5 deletions runsc/cmd/gofer.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ import (
"github.com/google/subcommands"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/lisafs"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/seccomp"
"gvisor.dev/gvisor/pkg/unet"
"gvisor.dev/gvisor/pkg/urpc"
"gvisor.dev/gvisor/runsc/cmd/sandboxsetup"
Expand All @@ -35,6 +37,7 @@ import (
"gvisor.dev/gvisor/runsc/flag"
"gvisor.dev/gvisor/runsc/fsgofer"
"gvisor.dev/gvisor/runsc/fsgofer/filter"
"gvisor.dev/gvisor/runsc/gofer/provider"
"gvisor.dev/gvisor/runsc/profile"
"gvisor.dev/gvisor/runsc/specutils"
)
Expand Down Expand Up @@ -293,7 +296,11 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcomm
DirectFS: conf.DirectFS,
CgoEnabled: config.CgoEnabled,
}
if err := filter.Install(opts); err != nil {
extraRules := seccomp.NewSyscallRules()
for _, p := range provider.Registered() {
extraRules.Merge(p.SeccompRules())
}
if err := filter.Install(opts, extraRules); err != nil {
util.Fatalf("installing seccomp filters: %v", err)
}

Expand All @@ -305,6 +312,8 @@ func (g *Gofer) serve(spec *specs.Spec, conf *config.Config, root string, ruid i
sock *unet.Socket
mountPath string
readonly bool
mountConf specutils.GoferMountConf
mount *specs.Mount
}
cfgs := make([]connectionConfig, 0, len(spec.Mounts)+1)
server := fsgofer.NewLisafsServer(fsgofer.Config{
Expand All @@ -327,14 +336,16 @@ func (g *Gofer) serve(spec *specs.Spec, conf *config.Config, root string, ruid i
sock: sandboxsetup.NewSocket(ioFDs[0]),
mountPath: "/", // fsgofer process is always chroot()ed. So serve root.
readonly: spec.Root.Readonly || rootfsConf.ShouldUseOverlayfs(),
mountConf: rootfsConf,
})
log.Infof("Serving %q mapped to %q on FD %d (ro: %t)", "/", root, ioFDs[0], cfgs[0].readonly)
ioFDs = ioFDs[1:]
}

mountIdx := 1 // first one is the root
for _, m := range spec.Mounts {
if !specutils.HasMountConfig(m) {
for i := range spec.Mounts {
m := &spec.Mounts[i]
if !specutils.HasMountConfig(*m) {
continue
}
mountConf := g.mountConfs[mountIdx]
Expand All @@ -356,6 +367,8 @@ func (g *Gofer) serve(spec *specs.Spec, conf *config.Config, root string, ruid i
sock: sandboxsetup.NewSocket(ioFD),
mountPath: m.Destination,
readonly: readonly,
mountConf: mountConf,
mount: m,
})
log.Infof("Serving %q mapped on FD %d (ro: %t)", m.Destination, ioFD, readonly)
}
Expand All @@ -372,15 +385,45 @@ func (g *Gofer) serve(spec *specs.Spec, conf *config.Config, root string, ruid i
log.Infof("Serving /dev mapped on FD %d (ro: false)", g.devIoFD)
}

var providerServers []*lisafs.Server
seen := map[*lisafs.Server]bool{}
for _, cfg := range cfgs {
conn, err := server.CreateConnection(cfg.sock, cfg.mountPath, cfg.readonly)
var srv *lisafs.Server
// /dev is always served by the stock fsgofer.
if cfg.mountPath != "/dev" {
for _, p := range provider.Registered() {
var err error
srv, err = p.NewServer(spec, cfg.mount, cfg.mountPath, cfg.mountConf, cfg.readonly)
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems like an anti pattern that we create a new server for each connection. The model we try to have is that we create one gofer server which can handle multiple connections. The server embeds lisafs.Server. That struct does a bunch of work, including providing concurrency guarantees across RPCs. See usages of the renameMu and also the entire filesystem tree it maintains. Each node has an opsMu which synchronizes RPCs on the same file.

I think what we should do is only 1 gofer server. And allow lisafs connections to define custom behavior. I think what you want to do is hijack the fsgofer Server Mount implementation:

func (s *LisafsServer) Mount(c *lisafs.Connection, mountNode *lisafs.Node) (*lisafs.ControlFD, lisafs.Statx, int, error) {
mountPath := mountNode.FilePath()

Notice that you have the Connection struct there. You also have the full mountPath there. If that is not sufficient to make the decision to pivot into your custom gofer, you can add whatever optional fields in lisafs.Connection to help you make that decision at Mount time. This RPC is called only once per connection.

In that method, you return your custom implementation of lisafs.ControlFDImpl. And that should be all. Every RPC on that connection will then be routed to your lisafs.ControlFDImpl implementation.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for the detailed read.

Seems like an anti pattern that we create a new server for each connection. The model we try to have is that we create one gofer server which can handle multiple connections.....

Yeah, some of this is influenced by that long-lived sentry hardened design :D. The shape in this PR comes from the customer gofer, which implements lisafs.ServerImpl directly because it needs values for SupportedMessages and ServerOpts that diverge from fsgofer's, along with its own per-implementation lifecycle for things like a background uploader and lease heartbeat. For example, it advertises Flush so the sentry sends us the Flush RPC on close(2), which is how I learn about writes that bypassed the gofer through the donated host FD, and it runs with ServerOpts{} because the custom gofer's storage model cannot honor OpenOnDeleted or SetAttrOnDeleted. Those values are configured per-ServerImpl server-wide today, but the wire protocol already encodes them per-mount in MountResp.SupportedMs and MountResp.MaxMessageSize. That mismatch is what made multi-Server look like the cheap fix to me when I shaped this PR, since each backend brings its own lisafs.Server and sets those values independently, which is how our fork runs today.

Looking at it again with your framing, there are costs I underweighted for sure. Each registered backend ends up as its own lisafs.Server lifecycle where the gofer command has to fan out Wait and Destroy, and each backend gets its own connWg and root Node tree on top of the points you mentioned. As I am familiarizing myself more with the codebase, I am also realizing this design does not help augment-style backends (the future example posts and documentation on how to bring in a custom backend), because they would have to bring their own server and lose the controlFDLisa infrastructure they would otherwise inherit.

Your point about allowing lisafs connections to define custom behavior, and about adding optional fields in lisafs.Connection, reads to me as opening the door to making those values per-Connection, which would make the single-Server shape work for both augment-style and replace-style backends, IIUC?

If that direction sounds right to you, I think the cleanest path is two PRs where this one gets repurposed for the second. The first would be a pkg/lisafs/ change that moves SupportedMessages, MaxMessageSize, and ServerOpts from per-ServerImpl to per-Connection, with 0 behavior change for fsgofer because it would set per-connection what it sets per-server today. I think that should be ok because the wire format already encodes these per-mount and the per-ServerImpl methods are the inconsistency (?)

Once that lands, I would rebase this PR on top with the reworked extension mechanism, where there is one lisafs.Server and custom backends register a Extension that hijacks LisafsServer.Mount, returns a ControlFDImpl plus the per-connection values. Then any custom augment-style and replace-style extensions/abckends plug in the same way, and the relocate and rename from your other comment land here naturally.

What do you think on the sequencing, and is there anything on your end I should be considering before I rework this? Especially the stuff around moving SupportedMessages and rest to per Connection?

Thanks for helping brainstorm on this too!

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I totally agree with your plan of action. It makes sense. Yeah the SupportedMessages and MaxMessageSize are already set up per-connection basis. Decoupling that from the server makes sense. Looking forward!

if err != nil {
util.Fatalf("provider %s for %q: %v", p.Name(), cfg.mountPath, err)
}
if srv != nil {
if !seen[srv] {
seen[srv] = true
providerServers = append(providerServers, srv)
}
log.Infof("Serving %q via provider %s on FD %d", cfg.mountPath, p.Name(), cfg.sock.FD())
break
}
}
}
if srv == nil {
srv = &server.Server
}
conn, err := srv.CreateConnection(cfg.sock, cfg.mountPath, cfg.readonly)
if err != nil {
util.Fatalf("starting connection on FD %d for gofer mount failed: %v", cfg.sock.FD(), err)
}
server.StartConnection(conn)
srv.StartConnection(conn)
}
server.Wait()
for _, ps := range providerServers {
ps.Wait()
}
server.Destroy()
for _, ps := range providerServers {
ps.Destroy()
}
log.Infof("All lisafs servers exited.")
if g.stopProfiling != nil {
g.stopProfiling()
Expand Down
11 changes: 9 additions & 2 deletions runsc/fsgofer/filter/filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,17 @@ func Rules(opt Options) seccomp.SyscallRules {
return s
}

// Install installs seccomp filters.
func Install(opt Options) error {
// Install installs seccomp filters. Any extras are merged into the stock
// allowlist before installatio
func Install(opt Options, extras ...seccomp.SyscallRules) error {
s := Rules(opt)
for _, extra := range extras {
s.Merge(extra)
}
return install(s)
}

func install(s seccomp.SyscallRules) error {
program := &seccomp.Program{
RuleSets: []seccomp.RuleSet{
{
Expand Down
4 changes: 4 additions & 0 deletions runsc/gofer/BUILD
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm IMO we move all this into runsc/fsgofer. Since this is very specific to filesystem gofers (as opposed to network gofers).

fsgofer and gofer as sibling packages seems a bit odd. You can have it under runsc/fsgofer/gofer_impl.go to represent other gofer implementations. "Provider" probably doesn't fit this too well, since it is not "providing" the gofer. The custom gofers are expected to register themselves.

We have a similar concept in our shim, where we allow shim extensions: pkg/shim/v1/extension/extension.go. Maybe you can call it GoferRegistry? Up to you.

Copy link
Copy Markdown
Contributor Author

@shayonj shayonj May 7, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, def fair points. I had runsc/gofer/provider/ as a sibling because I was reading runsc/fsgofer/ as one specific filesystem-gofer implementation rather than the umbrella for filesystem gofers, but that distinction is pretty thin and runsc/fsgofer/extension/ is the right spot.

On the name, agreed Provider does not quite fit because the registrant is the thing providing. I think my first iterate was Extension based on the shim extension itself but i saw SocketProvider and got carried away. Happy to keep the pattern of Extension going.

Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
package(
default_applicable_licenses = ["//:license"],
licenses = ["notice"],
)
31 changes: 31 additions & 0 deletions runsc/gofer/provider/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
load("//tools:defs.bzl", "go_library", "go_test")

package(
default_applicable_licenses = ["//:license"],
licenses = ["notice"],
)

go_library(
name = "provider",
srcs = ["provider.go"],
visibility = ["//runsc:__subpackages__"],
deps = [
"//pkg/lisafs",
"//pkg/seccomp",
"//runsc/specutils",
"@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
],
)

go_test(
name = "provider_test",
size = "small",
srcs = ["provider_test.go"],
library = ":provider",
deps = [
"//pkg/lisafs",
"//pkg/seccomp",
"//runsc/specutils",
"@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
],
)
60 changes: 60 additions & 0 deletions runsc/gofer/provider/provider.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
// Copyright 2026 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Package provider defines an interface for pluggable gofer filesystem
// backends. The stock fsgofer handles any mount no Provider claims.
package provider

import (
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.dev/gvisor/pkg/lisafs"
"gvisor.dev/gvisor/pkg/seccomp"
"gvisor.dev/gvisor/runsc/specutils"
)

// Provider is implemented by alternative LisaFS backends. It follows the
// same pattern as socket.Provider: the first registered Provider whose
// NewServer returns a non-nil server handles the mount.
type Provider interface {
// Name identifies the provider in log messages.
Name() string

// NewServer returns a LisaFS server for the given mount, or
// (nil, nil) if this provider does not handle it. A non-nil error
// means the provider claims the mount but failed to initialize.
//
// mount is nil for the root filesystem (root is not present in
// spec.Mounts). Per-sandbox config may be read from spec.Annotations.
//
// A Provider may return the same *lisafs.Server across calls to
// multiplex mounts onto one server; the caller deduplicates.
NewServer(spec *specs.Spec, mount *specs.Mount, mountPath string, conf specutils.GoferMountConf, readonly bool) (*lisafs.Server, error)

// SeccompRules returns additional rules to merge into the stock
// gofer's seccomp allowlist.
SeccompRules() seccomp.SyscallRules
}

var registered []Provider

// Register adds p to the provider list. Must be called during init or
// early in main, before Registered is iterated.
func Register(p Provider) {
registered = append(registered, p)
}

// Registered returns all registered providers in registration order.
func Registered() []Provider {
return registered
}
Loading
Loading