diff --git a/docs/how-to/how-to-use-virtio-fs-nydus-with-kata.md b/docs/how-to/how-to-use-virtio-fs-nydus-with-kata.md index bbc177e0fbb..9b04d49cf28 100644 --- a/docs/how-to/how-to-use-virtio-fs-nydus-with-kata.md +++ b/docs/how-to/how-to-use-virtio-fs-nydus-with-kata.md @@ -2,7 +2,7 @@ ## Introduction -Refer to [kata-`nydus`-design](../design/kata-nydus-design.md) +Refer to [kata-`nydus`-design](../design/kata-nydus-design.md) for introduction and `nydus` has supported Kata Containers with hypervisor `QEMU` and `CLH` currently. ## How to @@ -16,7 +16,7 @@ You can use Kata Containers with `nydus` as follows, 4. Use [kata-containers](https://github.com/kata-containers/kata-containers) `latest` branch to compile and build `kata-containers.img`; -5. Update `configuration-qemu.toml` to include: +5. Update `configuration-qemu.toml` or `configuration-clh.toml`to include: ```toml shared_fs = "virtio-fs-nydus" @@ -24,7 +24,7 @@ virtio_fs_daemon = "" virtio_fs_extra_args = [] ``` -6. run `crictl run -r kata-qemu nydus-container.yaml nydus-sandbox.yaml`; +6. run `crictl run -r kata nydus-container.yaml nydus-sandbox.yaml`; The `nydus-sandbox.yaml` looks like below: diff --git a/src/runtime/Makefile b/src/runtime/Makefile index f936bd796ca..45c1ff1b6e1 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -163,6 +163,7 @@ DEFENTROPYSOURCE := /dev/urandom DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"] DEFDISABLEBLOCK := false +DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/kata-qemu/virtiofsd DEFVALIDVIRTIOFSDAEMONPATHS := [\"$(DEFVIRTIOFSDAEMON)\"] @@ -437,6 +438,7 @@ USER_VARS += DEFDISABLEBLOCK USER_VARS += DEFBLOCKSTORAGEDRIVER_ACRN USER_VARS += DEFBLOCKSTORAGEDRIVER_FC USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU +USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS USER_VARS += DEFVIRTIOFSDAEMON USER_VARS += DEFVALIDVIRTIOFSDAEMONPATHS diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index 7f06dd8eb36..07e3f31a4b8 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -70,6 +70,11 @@ default_memory = @DEFMEMSZ@ # This is will determine the times that memory will be hotadded to sandbox/VM. #memory_slots = @DEFMEMSLOTS@ +# Shared file system type: +# - virtio-fs (default) +# - virtio-fs-nydus +shared_fs = "@DEFSHAREDFS_CLH_VIRTIOFS@" + # Path to vhost-user-fs daemon. virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 06f6a6df027..a34c229ac64 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -426,7 +426,7 @@ func (h hypervisor) sharedFS() (string, error) { supportedSharedFS := []string{config.Virtio9P, config.VirtioFS, config.VirtioFSNydus} if h.SharedFS == "" { - return config.Virtio9P, nil + return config.VirtioFS, nil } for _, fs := range supportedSharedFS { @@ -644,14 +644,9 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - if sharedFS == config.VirtioFS && h.VirtioFSDaemon == "" { + if (sharedFS == config.VirtioFS || sharedFS == config.VirtioFSNydus) && h.VirtioFSDaemon == "" { return vc.HypervisorConfig{}, - errors.New("cannot enable virtio-fs without daemon path in configuration file") - } - - if sharedFS == config.VirtioFSNydus && h.VirtioFSDaemon == "" { - return vc.HypervisorConfig{}, - errors.New("cannot enable virtio nydus without nydusd daemon path in configuration file") + fmt.Errorf("cannot enable %s without daemon path in configuration file", sharedFS) } if vSock, err := utils.SupportsVsocks(); !vSock { @@ -822,11 +817,18 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - sharedFS := config.VirtioFS + sharedFS, err := h.sharedFS() + if err != nil { + return vc.HypervisorConfig{}, err + } + + if sharedFS != config.VirtioFS && sharedFS != config.VirtioFSNydus { + return vc.HypervisorConfig{}, errors.New("clh only support virtio-fs or virtio-fs-nydus") + } if h.VirtioFSDaemon == "" { return vc.HypervisorConfig{}, - errors.New("virtio-fs daemon path is missing in configuration file") + fmt.Errorf("cannot enable %s without daemon path in configuration file", sharedFS) } return vc.HypervisorConfig{ diff --git a/src/runtime/pkg/katautils/config_test.go b/src/runtime/pkg/katautils/config_test.go index 5bfd6125071..268e50276d1 100644 --- a/src/runtime/pkg/katautils/config_test.go +++ b/src/runtime/pkg/katautils/config_test.go @@ -633,6 +633,8 @@ func TestNewQemuHypervisorConfig(t *testing.T) { PCIeRootPort: pcieRootPort, RxRateLimiterMaxRate: rxRateLimiterMaxRate, TxRateLimiterMaxRate: txRateLimiterMaxRate, + SharedFS: "virtio-fs", + VirtioFSDaemon: filepath.Join(dir, "virtiofsd"), } files := []string{hypervisorPath, kernelPath, imagePath} @@ -1388,6 +1390,8 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) { Image: "/", Firmware: "/", FirmwareVolume: "/", + SharedFS: "virtio-fs", + VirtioFSDaemon: "/usr/libexec/kata-qemu/virtiofsd", }, }, } diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index f08eba67f49..4ab1400dad3 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -144,27 +144,27 @@ func (c *clhClientApi) VmRemoveDevicePut(ctx context.Context, vmRemoveDevice chc // Cloud hypervisor state // type CloudHypervisorState struct { - apiSocket string - PID int - VirtiofsdPID int - state clhState + apiSocket string + PID int + VirtiofsDaemonPid int + state clhState } func (s *CloudHypervisorState) reset() { s.PID = 0 - s.VirtiofsdPID = 0 + s.VirtiofsDaemonPid = 0 s.state = clhNotReady } type cloudHypervisor struct { - console console.Console - virtiofsd VirtiofsDaemon - APIClient clhClient - ctx context.Context - id string - vmconfig chclient.VmConfig - state CloudHypervisorState - config HypervisorConfig + console console.Console + virtiofsDaemon VirtiofsDaemon + APIClient clhClient + ctx context.Context + id string + vmconfig chclient.VmConfig + state CloudHypervisorState + config HypervisorConfig } var clhKernelParams = []Param{ @@ -198,6 +198,10 @@ func (clh *cloudHypervisor) setConfig(config *HypervisorConfig) error { return nil } +func (clh *cloudHypervisor) nydusdAPISocketPath(id string) (string, error) { + return utils.BuildSocketPath(clh.config.VMStorePath, id, nydusdAPISock) +} + // For cloudHypervisor this call only sets the internal structure up. // The VM will be created and started through StartVM(). func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Network, hypervisorConfig *HypervisorConfig) error { @@ -223,8 +227,8 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net if clh.state.PID > 0 { clh.Logger().WithField("function", "CreateVM").Info("Sandbox already exist, loading from state") - clh.virtiofsd = &virtiofsd{ - PID: clh.state.VirtiofsdPID, + clh.virtiofsDaemon = &virtiofsd{ + PID: clh.state.VirtiofsDaemonPid, sourcePath: hypervisorConfig.SharedPath, debug: clh.config.Debug, socketPath: virtiofsdSocketPath, @@ -349,7 +353,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net ApiInternal: chclient.NewAPIClient(cfg).DefaultApi, } - clh.virtiofsd = &virtiofsd{ + clh.virtiofsDaemon = &virtiofsd{ path: clh.config.VirtioFSDaemon, sourcePath: filepath.Join(GetSharePath(clh.id)), socketPath: virtiofsdSocketPath, @@ -358,6 +362,25 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net cache: clh.config.VirtioFSCache, } + if clh.config.SharedFS == config.VirtioFSNydus { + apiSockPath, err := clh.nydusdAPISocketPath(clh.id) + if err != nil { + clh.Logger().WithError(err).Error("Invalid api socket path for nydusd") + return err + } + nd := &nydusd{ + path: clh.config.VirtioFSDaemon, + sockPath: virtiofsdSocketPath, + apiSockPath: apiSockPath, + sourcePath: filepath.Join(GetSharePath(clh.id)), + debug: clh.config.Debug, + extraArgs: clh.config.VirtioFSExtraArgs, + startFn: startInShimNS, + } + nd.setupShareDirFn = nd.setupPassthroughFS + clh.virtiofsDaemon = nd + } + if clh.config.SGXEPCSize > 0 { epcSection := chclient.NewSgxEpcConfig("kata-epc", clh.config.SGXEPCSize) epcSection.Prefault = func(b bool) *bool { return &b }(true) @@ -389,8 +412,8 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error { return err } - if clh.virtiofsd == nil { - return errors.New("Missing virtiofsd configuration") + if clh.virtiofsDaemon == nil { + return errors.New("Missing virtiofsDaemon configuration") } // This needs to be done as late as possible, just before launching @@ -402,23 +425,23 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error { } defer label.SetProcessLabel("") - if clh.config.SharedFS == config.VirtioFS { - clh.Logger().WithField("function", "StartVM").Info("Starting virtiofsd") - pid, err := clh.virtiofsd.Start(ctx, func() { + if clh.config.SharedFS == config.VirtioFS || clh.config.SharedFS == config.VirtioFSNydus { + clh.Logger().WithField("function", "StartVM").Info("Starting virtiofsDaemon") + pid, err := clh.virtiofsDaemon.Start(ctx, func() { clh.StopVM(ctx, false) }) if err != nil { return err } - clh.state.VirtiofsdPID = pid + clh.state.VirtiofsDaemonPid = pid } else { return errors.New("cloud-hypervisor only supports virtio based file sharing") } pid, err := clh.launchClh() if err != nil { - if shutdownErr := clh.virtiofsd.Stop(ctx); shutdownErr != nil { - clh.Logger().WithError(shutdownErr).Warn("error shutting down Virtiofsd") + if shutdownErr := clh.virtiofsDaemon.Stop(ctx); shutdownErr != nil { + clh.Logger().WithError(shutdownErr).Warn("error shutting down VirtiofsDaemon") } return fmt.Errorf("failed to launch cloud-hypervisor: %q", err) } @@ -759,14 +782,14 @@ func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) { func (clh *cloudHypervisor) Save() (s hv.HypervisorState) { s.Pid = clh.state.PID s.Type = string(ClhHypervisor) - s.VirtiofsDaemonPid = clh.state.VirtiofsdPID + s.VirtiofsDaemonPid = clh.state.VirtiofsDaemonPid s.APISocket = clh.state.apiSocket return } func (clh *cloudHypervisor) Load(s hv.HypervisorState) { clh.state.PID = s.Pid - clh.state.VirtiofsdPID = s.VirtiofsDaemonPid + clh.state.VirtiofsDaemonPid = s.VirtiofsDaemonPid clh.state.apiSocket = s.APISocket } @@ -790,7 +813,7 @@ func (clh *cloudHypervisor) GetPids() []int { } func (clh *cloudHypervisor) GetVirtioFsPid() *int { - return &clh.state.VirtiofsdPID + return &clh.state.VirtiofsDaemonPid } func (clh *cloudHypervisor) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error { @@ -872,13 +895,13 @@ func (clh *cloudHypervisor) terminate(ctx context.Context, waitOnly bool) (err e return err } - if clh.virtiofsd == nil { - return errors.New("virtiofsd config is nil, failed to stop it") + if clh.virtiofsDaemon == nil { + return errors.New("virtiofsDaemon config is nil, failed to stop it") } - clh.Logger().Debug("stop virtiofsd") - if err = clh.virtiofsd.Stop(ctx); err != nil { - clh.Logger().WithError(err).Error("failed to stop virtiofsd") + clh.Logger().Debug("stop virtiofsDaemon") + if err = clh.virtiofsDaemon.Stop(ctx); err != nil { + clh.Logger().WithError(err).Error("failed to stop virtiofsDaemon") } return @@ -1181,7 +1204,7 @@ func (clh *cloudHypervisor) addNet(e Endpoint) error { // Add shared Volume using virtiofs func (clh *cloudHypervisor) addVolume(volume types.Volume) error { - if clh.config.SharedFS != config.VirtioFS { + if clh.config.SharedFS != config.VirtioFS && clh.config.SharedFS != config.VirtioFSNydus { return fmt.Errorf("shared fs method not supported %s", clh.config.SharedFS) } diff --git a/src/runtime/virtcontainers/clh_test.go b/src/runtime/virtcontainers/clh_test.go index d350dd9e97f..7fbdd17fa8f 100644 --- a/src/runtime/virtcontainers/clh_test.go +++ b/src/runtime/virtcontainers/clh_test.go @@ -296,7 +296,7 @@ func TestClhCreateVM(t *testing.T) { assert.Exactly(clhConfig, clh.config) } -func TestClooudHypervisorStartSandbox(t *testing.T) { +func TestCloudHypervisorStartSandbox(t *testing.T) { assert := assert.New(t) clhConfig, err := newClhConfig() assert.NoError(err) @@ -308,9 +308,9 @@ func TestClooudHypervisorStartSandbox(t *testing.T) { clhConfig.RunStorePath = store.RunStoragePath() clh := &cloudHypervisor{ - config: clhConfig, - APIClient: &clhClientMock{}, - virtiofsd: &virtiofsdMock{}, + config: clhConfig, + APIClient: &clhClientMock{}, + virtiofsDaemon: &virtiofsdMock{}, } err = clh.StartVM(context.Background(), 10) diff --git a/src/runtime/virtcontainers/kata_agent.go b/src/runtime/virtcontainers/kata_agent.go index dc7b998174a..74f35cf6cf8 100644 --- a/src/runtime/virtcontainers/kata_agent.go +++ b/src/runtime/virtcontainers/kata_agent.go @@ -1272,13 +1272,24 @@ func (k *kataAgent) rollbackFailingContainerCreation(ctx context.Context, c *Con } } -func (k *kataAgent) buildContainerRootfsWithNydus(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) { - if sandbox.GetHypervisorType() != string(QemuHypervisor) { - // qemu is supported first, other hypervisors will next - // https://github.com/kata-containers/kata-containers/issues/2724 +func getVirtiofsDaemonForNydus(sandbox *Sandbox) (VirtiofsDaemon, error) { + var virtiofsDaemon VirtiofsDaemon + switch sandbox.GetHypervisorType() { + case string(QemuHypervisor): + virtiofsDaemon = sandbox.hypervisor.(*qemu).virtiofsDaemon + case string(ClhHypervisor): + virtiofsDaemon = sandbox.hypervisor.(*cloudHypervisor).virtiofsDaemon + default: return nil, errNydusdNotSupport } - q, _ := sandbox.hypervisor.(*qemu) + return virtiofsDaemon, nil +} + +func (k *kataAgent) buildContainerRootfsWithNydus(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) { + virtiofsDaemon, err := getVirtiofsDaemonForNydus(sandbox) + if err != nil { + return nil, err + } extraOption, err := parseExtraOption(c.rootFs.Options) if err != nil { return nil, err @@ -1290,7 +1301,7 @@ func (k *kataAgent) buildContainerRootfsWithNydus(sandbox *Sandbox, c *Container } k.Logger().Infof("nydus option: %v", extraOption) // mount lowerdir to guest /run/kata-containers/shared/images//lowerdir - if err := q.virtiofsDaemon.Mount(*mountOpt); err != nil { + if err := virtiofsDaemon.Mount(*mountOpt); err != nil { return nil, err } rootfs := &grpc.Storage{} diff --git a/src/runtime/virtcontainers/mount.go b/src/runtime/virtcontainers/mount.go index b23bd087930..f7e65b69ee8 100644 --- a/src/runtime/virtcontainers/mount.go +++ b/src/runtime/virtcontainers/mount.go @@ -390,13 +390,11 @@ func bindUnmountContainerSnapshotDir(ctx context.Context, sharedDir, cID string) func nydusContainerCleanup(ctx context.Context, sharedDir string, c *Container) error { sandbox := c.sandbox - if sandbox.GetHypervisorType() != string(QemuHypervisor) { - // qemu is supported first, other hypervisors will next - // https://github.com/kata-containers/kata-containers/issues/2724 - return errNydusdNotSupport + virtiofsDaemon, err := getVirtiofsDaemonForNydus(sandbox) + if err != nil { + return err } - q, _ := sandbox.hypervisor.(*qemu) - if err := q.virtiofsDaemon.Umount(rafsMountPath(c.id)); err != nil { + if err := virtiofsDaemon.Umount(rafsMountPath(c.id)); err != nil { return errors.Wrap(err, "umount rafs failed") } if err := bindUnmountContainerSnapshotDir(ctx, sharedDir, c.id); err != nil { diff --git a/src/runtime/virtcontainers/nydusd.go b/src/runtime/virtcontainers/nydusd.go index 1a09b24b16f..c9315ee3701 100644 --- a/src/runtime/virtcontainers/nydusd.go +++ b/src/runtime/virtcontainers/nydusd.go @@ -68,7 +68,7 @@ var ( errNydusdSockPathInvalid = errors.New("nydusd sock path is invalid") errNydusdAPISockPathInvalid = errors.New("nydusd api sock path is invalid") errNydusdSourcePathInvalid = errors.New("nydusd resource path is invalid") - errNydusdNotSupport = errors.New("nydusd only supports the QEMU hypervisor currently (see https://github.com/kata-containers/kata-containers/issues/2724)") + errNydusdNotSupport = errors.New("nydusd only supports the QEMU/CLH hypervisor currently (see https://github.com/kata-containers/kata-containers/issues/3654)") ) type nydusd struct {