From 7a5a65c7699d75e597b7ce81f4df9a6ebd6ec469 Mon Sep 17 00:00:00 2001 From: Hamza El-Saawy Date: Tue, 7 Nov 2023 13:06:12 -0500 Subject: [PATCH] Add WCOW and vSMB functional tests Update and un-skip WCOW uVM and container tests (and add WCOW uVM benchmarks), as well as WCOW vSMB and LCOW boto files tests. Add WCOW host process tests, including dedicated tests for setting username, and verifying hostname and volume mounts. Moved: - `lcow_bench_test.go` to `uvm_bench_test.go` - `lcow_container_test.go` to `container_test.go` - `lcow_test.go` to `lcow_uvm_test.go` and `uvm_test.go` Fix bug where removing a direct-mapped vSMB share fails. Run (non-virtualization/uVM) functional tests within CI. Add `util.Context` function to create context that times out before test timeout, to help with timing issues and allow time for cleanup and logging. Make sure container specs are created with the default working directory (`C:\`), similar to how `internal\cmd` works). Rename `cri_util` to `criutil`, since underscores are frowned upon in package names. Relies on PR: https://github.com/microsoft/hcsshim/pull/1974 Signed-off-by: Hamza El-Saawy --- .github/workflows/ci.yml | 51 +- internal/jobcontainers/jobcontainer.go | 18 +- internal/jobcontainers/storage.go | 29 +- internal/uvm/vsmb.go | 75 +- test/functional/container_test.go | 665 ++++++++++++++++++ test/functional/hostprocess_test.go | 400 +++++++++++ test/functional/lcow_bench_test.go | 106 --- test/functional/lcow_container_bench_test.go | 25 +- test/functional/lcow_container_test.go | 169 ----- test/functional/lcow_networking_test.go | 3 +- test/functional/lcow_policy_test.go | 2 +- test/functional/lcow_test.go | 303 -------- test/functional/lcow_uvm_test.go | 271 +++++++ test/functional/main_test.go | 10 + test/functional/uvm_bench_test.go | 123 ++++ test/functional/uvm_plannine_test.go | 10 +- test/functional/uvm_test.go | 70 ++ test/functional/uvm_update_test.go | 4 +- test/functional/uvm_vsmb_test.go | 198 ++++-- .../{wcow_test.go => wcow_uvm_test.go} | 0 test/go.mod | 2 +- test/internal/cmd/cmd.go | 3 + test/internal/cmd/io.go | 46 +- test/internal/container/container.go | 1 - test/internal/oci/oci.go | 42 ++ test/internal/util/util.go | 47 ++ test/pkg/uvm/uvm.go | 32 + test/pkg/uvm/wcow.go | 3 +- 28 files changed, 2003 insertions(+), 705 deletions(-) create mode 100644 test/functional/container_test.go create mode 100644 test/functional/hostprocess_test.go delete mode 100644 test/functional/lcow_bench_test.go delete mode 100644 test/functional/lcow_container_test.go delete mode 100644 test/functional/lcow_test.go create mode 100644 test/functional/lcow_uvm_test.go create mode 100644 test/functional/uvm_bench_test.go create mode 100644 test/functional/uvm_test.go rename test/functional/{wcow_test.go => wcow_uvm_test.go} (100%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ddafb87e1d..fd8e0f6e97 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -314,6 +314,30 @@ jobs: - name: Install gotestsum run: go install gotest.tools/gotestsum@${{ env.GOTESTSUM_VERSION }} + # Download PsExec so we can run (functional) tests as 'NT Authority\System'. + # Needed for hostprocess tests, as well ensuring backup and restore privileges for + # unpacking WCOW images. + - name: Install PsExec.exe + run: | + New-Item -ItemType Directory -Force '${{ github.workspace }}\bin' > $null + '${{ github.workspace }}\bin' | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + curl.exe -L --no-progress-meter --fail-with-body -o 'C:\PSTools.zip' ` + 'https://download.sysinternals.com/files/PSTools.zip' 2>&1 + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not download PSTools.zip' + exit $LASTEXITCODE + } + + tar.exe xf 'C:\PSTools.zip' -C '${{ github.workspace }}\bin' 'PsExec*' 2>&1 + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not extract PsExec.exe' + exit $LASTEXITCODE + } + + # accept the eula + & '${{ github.workspace }}/bin/psexec' -accepteula -nobanner cmd /c "exit 0" 2>$null + # run tests - name: Test repo run: ${{ env.GOTESTSUM_CMD }} -gcflags=all=-d=checkptr -tags admin -timeout=20m ./... @@ -343,13 +367,34 @@ jobs: ${{ env.GOTESTSUM_CMD_RAW }} ./containerd-shim-runhcs-v1.test.exe '-test.v' working-directory: test + - name: Build and run functional testing binary + run: | + ${{ env.GO_BUILD_TEST_CMD }} ./functional + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not build functional.test.exe' + exit $LASTEXITCODE + } + + # PsExec doesn't load GOBIN into path, so resolve gotestsum path + # don't run uVM (ie, nested virt) or LCOW integrity tests + $cmd = '${{ env.GOTESTSUM_CMD_RAW }} ./functional.test.exe -exclude="LCOW,LCOWIntegrity,uVM" -test.timeout=1h -test.v' + $cmd = $cmd -replace 'gotestsum', ((Get-Command gotestsum)[0].Source) + Write-Host "gotestsum command: $cmd" + + # it appears, that in a GH runner, PsExec always runs noninteractively (even with `-i`) and + # doesn't capture or redirect std IO. + # Instead, write stdout/stderr to a file. + psexec -nobanner -w (Get-Location) -s cmd /c "$cmd > out.txt 2>&1" + $ec = $LASTEXITCODE + Get-Content out.txt + + exit $ec + working-directory: test + # build testing binaries - name: Build cri-containerd Testing Binary run: ${{ env.GO_BUILD_TEST_CMD }} ./cri-containerd working-directory: test - - name: Build functional Testing Binary - run: ${{ env.GO_BUILD_TEST_CMD }} ./functional - working-directory: test - name: Build runhcs Testing Binary run: ${{ env.GO_BUILD_TEST_CMD }} ./runhcs working-directory: test diff --git a/internal/jobcontainers/jobcontainer.go b/internal/jobcontainers/jobcontainer.go index 49faafd443..fc8aa80464 100644 --- a/internal/jobcontainers/jobcontainer.go +++ b/internal/jobcontainers/jobcontainer.go @@ -30,11 +30,6 @@ import ( "golang.org/x/sys/windows" ) -var ( - fileBindingSupport bool - checkBindSupportOnce sync.Once -) - const ( // jobContainerNameFmt is the naming format that job objects for job containers will follow. jobContainerNameFmt = "JobContainer_%s" @@ -181,15 +176,8 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ * // show up at beforehand as you would need to know the containers ID before you launched it. Now that the // rootfs location can be static, a user can easily supply C:\hpc\rest\of\path as their work dir and still // supply anything outside of C:\hpc if they want another location on the host. - checkBindSupportOnce.Do(func() { - bindDLL := `C:\windows\system32\bindfltapi.dll` - if _, err := os.Stat(bindDLL); err == nil { - fileBindingSupport = true - } - }) - var closer resources.ResourceCloser - if fileBindingSupport { + if FileBindingSupported() { closer, err = container.bindSetup(ctx, s) } else { closer, err = container.fallbackSetup(ctx, s) @@ -254,7 +242,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ // If the working directory was changed, that means the user supplied %CONTAINER_SANDBOX_MOUNT_POINT%\\my\dir or something similar. // In that case there's nothing left to do, as we don't want to join it with the mount point again.. If it *wasn't* changed, and there's // no bindflt support then we need to join it with the mount point, as it's some normal path. - if !changed && !fileBindingSupport { + if !changed && !FileBindingSupported() { workDir = filepath.Join(c.rootfsLocation, removeDriveLetter(workDir)) } } @@ -335,7 +323,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ // (cmd in this case) after launch can now see C:\ as it's in the silo. We could // also add a new mode/flag for the shim where it's just a dummy process launcher, so we can invoke // the shim instead of cmd and have more control over things. - if fileBindingSupport { + if FileBindingSupported() { commandLine = "cmd /c " + commandLine } diff --git a/internal/jobcontainers/storage.go b/internal/jobcontainers/storage.go index 180c27a862..b38c9fca81 100644 --- a/internal/jobcontainers/storage.go +++ b/internal/jobcontainers/storage.go @@ -8,22 +8,24 @@ import ( "os" "path/filepath" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/resources" + "github.com/Microsoft/hcsshim/internal/sync" "github.com/Microsoft/hcsshim/internal/wclayer" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // fallbackRootfsFormat is the fallback location for the rootfs if file binding support isn't available. // %s will be expanded with the container ID. Trailing backslash required for SetVolumeMountPoint and -// DeleteVolumeMountPoint +// DeleteVolumeMountPoint. const fallbackRootfsFormat = `C:\hpc\%s\` // defaultSiloRootfsLocation is the default location the rootfs for the container will show up // inside of a given silo. If bind filter support isn't available the rootfs will be -// C:\hpc\ +// C:\hpc\. const defaultSiloRootfsLocation = `C:\hpc\` func (c *JobContainer) mountLayers(ctx context.Context, containerID string, s *specs.Spec, volumeMountPath string) (_ resources.ResourceCloser, err error) { @@ -72,3 +74,22 @@ func (c *JobContainer) setupRootfsBinding(root, target string) error { } return nil } + +var fileBindingSupportedOnce = sync.OnceValue(func() (bool, error) { + // TODO: use windows.NewLazySystemDLL("bindfltapi.dll").Load() (or windows.LoadLibraryEx directly) + + root := os.Getenv("SystemRoot") + if root == "" { + root = `C:\windows` // shouldn't really need this fall back, but ... + } + bindDLL := filepath.Join(root, `system32\bindfltapi.dll`) + if _, err := os.Stat(bindDLL); err != nil { + return false, err + } + return true, nil +}) + +func FileBindingSupported() bool { + b, _ := fileBindingSupportedOnce() + return b +} diff --git a/internal/uvm/vsmb.go b/internal/uvm/vsmb.go index 05480df74c..2ad1454163 100644 --- a/internal/uvm/vsmb.go +++ b/internal/uvm/vsmb.go @@ -4,6 +4,7 @@ package uvm import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -26,9 +27,9 @@ const ( vsmbSharePrefix = `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` ) -// VSMBShare contains the host path for a Vsmb Mount +// VSMBShare contains the host path for a Vsmb Mount. type VSMBShare struct { - // UVM the resource belongs to + // UVM the resource belongs to. vm *UtilityVM HostPath string refCount uint32 @@ -36,11 +37,14 @@ type VSMBShare struct { allowedFiles []string guestPath string options hcsschema.VirtualSmbShareOptions + // whether the share is mapping an entire directory. + // ie, if the share is stored in [vm.vsmbDirShares] or [vm.vsmbFileShares]. + isDirShare bool } // Release frees the resources of the corresponding vsmb Mount func (vsmb *VSMBShare) Release(ctx context.Context) error { - if err := vsmb.vm.RemoveVSMB(ctx, vsmb.HostPath, vsmb.options.ReadOnly); err != nil { + if err := vsmb.vm.removeVSMB(ctx, vsmb.HostPath, vsmb.options.ReadOnly, vsmb.isDirShare); err != nil { return fmt.Errorf("failed to remove VSMB share: %s", err) } return nil @@ -62,7 +66,7 @@ func (uvm *UtilityVM) DefaultVSMBOptions(readOnly bool) *hcsschema.VirtualSmbSha } // findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`. -func (uvm *UtilityVM) findVSMBShare(ctx context.Context, m map[string]*VSMBShare, shareKey string) (*VSMBShare, error) { +func (*UtilityVM) findVSMBShare(_ context.Context, m map[string]*VSMBShare, shareKey string) (*VSMBShare, error) { share, ok := m[shareKey] if !ok { return nil, ErrNotAttached @@ -129,7 +133,12 @@ func forceNoDirectMap(path string) (bool, error) { var info winapi.FILE_ID_INFO // We check for any error, rather than just ERROR_INVALID_PARAMETER. It seems better to also // fall back if e.g. some other backing filesystem is used which returns a different error. - if err := windows.GetFileInformationByHandleEx(h, winapi.FileIdInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))); err != nil { + if err := windows.GetFileInformationByHandleEx( + h, + winapi.FileIdInfo, + (*byte)(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info)), + ); err != nil { return true, nil } return false, nil @@ -181,16 +190,17 @@ func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcs var requestType = guestrequest.RequestTypeUpdate shareKey := getVSMBShareKey(hostPath, options.ReadOnly) share, err := uvm.findVSMBShare(ctx, m, shareKey) - if err == ErrNotAttached { + if errors.Is(err, ErrNotAttached) { requestType = guestrequest.RequestTypeAdd uvm.vsmbCounter++ shareName := "s" + strconv.FormatUint(uvm.vsmbCounter, 16) share = &VSMBShare{ - vm: uvm, - name: shareName, - guestPath: vsmbSharePrefix + shareName, - HostPath: hostPath, + vm: uvm, + name: shareName, + guestPath: vsmbSharePrefix + shareName, + HostPath: hostPath, + isDirShare: st.IsDir(), } } newAllowedFiles := share.allowedFiles @@ -234,6 +244,26 @@ func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcs // RemoveVSMB removes a VSMB share from a utility VM. Each VSMB share is ref-counted // and only actually removed when the ref-count drops to zero. func (uvm *UtilityVM) RemoveVSMB(ctx context.Context, hostPath string, readOnly bool) error { + st, err := os.Stat(hostPath) + if err != nil { + return err + } + isDir := st.IsDir() + if !isDir { + hostPath = filepath.Dir(hostPath) + } + return uvm.removeVSMB(ctx, hostPath, readOnly, isDir) +} + +// removeVSMB removes the share for the directory at hostPath. +// +// directoryShare indicates if the share is stored in [uvm.vsmbDirShares] or [uvm.vsmbFileShares]. +// Ie, it should match [VSMBShare.isDirShare]. +// +// Regardless of whether the vSMB share is mapping a file or directory, hostPath must be the +// directory that was shared into the uVM (and the keyname the [VSMBShare] in either +// [uvm.vsmbDirShares] or [uvm.vsmbFileShares]). +func (uvm *UtilityVM) removeVSMB(ctx context.Context, hostPath string, readOnly, directoryShare bool) error { if uvm.operatingSystem != "windows" { return errNotSupported } @@ -241,14 +271,9 @@ func (uvm *UtilityVM) RemoveVSMB(ctx context.Context, hostPath string, readOnly uvm.m.Lock() defer uvm.m.Unlock() - st, err := os.Stat(hostPath) - if err != nil { - return err - } m := uvm.vsmbDirShares - if !st.IsDir() { + if !directoryShare { m = uvm.vsmbFileShares - hostPath = filepath.Dir(hostPath) } hostPath = filepath.Clean(hostPath) shareKey := getVSMBShareKey(hostPath, readOnly) @@ -262,6 +287,24 @@ func (uvm *UtilityVM) RemoveVSMB(ctx context.Context, hostPath string, readOnly return nil } + // Cannot remove a directmapped vSMB share without first closing all open handles to the + // share files from inside the the uVM (otherwise, the removal would un-map the files from + // the uVM's memory and subsequent access's would fail). + // Rather than forgetting about the share on the host side, keep it (with refCount == 0) + // in case that directory is re-added back for some reason. + // + // Note: HCS (vmcompute.exe) issues a remove vSMB request to the guest GCS iff: + // - vmwp.exe direct mapped the vSMB share; and + // - the GCS (on its internal bridge) has the PurgeVSmbCachedHandlesSupported capability. + // We do not (currently) have the ability to check for either. + if !share.options.NoDirectmap { + log.G(ctx).WithFields(logrus.Fields{ + "name": share.name, + "path": hostPath, + }).Debug("skipping remove of directmapped vSMB share") + return nil + } + modification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeRemove, Settings: hcsschema.VirtualSmbShare{Name: share.name}, diff --git a/test/functional/container_test.go b/test/functional/container_test.go new file mode 100644 index 0000000000..5de68b09fe --- /dev/null +++ b/test/functional/container_test.go @@ -0,0 +1,665 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "fmt" + "testing" + + ctrdoci "github.com/containerd/containerd/oci" + "golang.org/x/sys/windows" + + "github.com/Microsoft/hcsshim/internal/jobcontainers" + "github.com/Microsoft/hcsshim/osversion" + + "github.com/Microsoft/hcsshim/test/internal/cmd" + "github.com/Microsoft/hcsshim/test/internal/container" + "github.com/Microsoft/hcsshim/test/internal/layers" + testoci "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func TestContainerLifecycle(t *testing.T) { + requireFeatures(t, featureContainer) + requireAnyFeature(t, featureUVM, featureLCOW, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + t.Run("LCOW", func(t *testing.T) { + requireFeatures(t, featureLCOW, featureUVM) + + ls := linuxImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultLCOWOptions(ctx, t)) + + scratch, _ := layers.ScratchSpace(ctx, t, vm, "", "", "") + cID := vm.ID() + "-container" + spec := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithProcessArgs("/bin/sh", "-c", testoci.TailNullArgs), + testoci.WithWindowsLayerFolders(append(ls, scratch)))...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + init := container.Start(ctx, t, c, nil) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.Kill(ctx, t, init) + cmd.WaitExitCode(ctx, t, init, cmd.ForcedKilledExitCode) + }) // LCOW + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + cID := vm.ID() + "-container" + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.Kill(ctx, t, init) + cmd.WaitExitCode(ctx, t, init, int(windows.ERROR_PROCESS_ABORTED)) + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(windowsImageLayers(ctx, t), scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.Kill(ctx, t, init) + cmd.WaitExitCode(ctx, t, init, int(windows.ERROR_PROCESS_ABORTED)) + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(windowsImageLayers(ctx, t), scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + if _, ok := c.(*jobcontainers.JobContainer); !ok { + t.Fatalf("expected type JobContainer; got %T", c) + } + + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.Kill(ctx, t, init) + cmd.WaitExitCode(ctx, t, init, 1) + }) // WCOW HostProcess +} + +var ioTests = []struct { + name string + lcowArgs []string + wcowCmd string + in string + want string +}{ + { + name: "true", + lcowArgs: []string{"/bin/sh", "-c", "true"}, + wcowCmd: "cmd /c (exit 0)", + want: "", + }, + { + name: "echo", + lcowArgs: []string{"/bin/sh", "-c", `echo -n "hi y'all"`}, + wcowCmd: `cmd /c echo hi y'all`, + want: "hi y'all", + }, + { + name: "tee", + lcowArgs: []string{"/bin/sh", "-c", "tee"}, + wcowCmd: "", // TODO: figure out cmd.exe equivalent + in: "are you copying me?", + want: "are you copying me?", + }, +} + +func TestContainerIO(t *testing.T) { + requireFeatures(t, featureContainer) + requireAnyFeature(t, featureUVM, featureLCOW, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + t.Run("LCOW", func(t *testing.T) { + requireFeatures(t, featureLCOW, featureUVM) + + opts := defaultLCOWOptions(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, opts) + + ls := linuxImageLayers(ctx, t) + cache := layers.CacheFile(ctx, t, "") + + for _, tt := range ioTests { + if len(tt.lcowArgs) == 0 { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := testName(t, "container") + + scratch, _ := layers.ScratchSpace(ctx, t, vm, "", "", cache) + spec := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithProcessArgs(tt.lcowArgs...), + testoci.WithWindowsLayerFolders(append(ls, scratch)))...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + init := container.Start(ctx, t, c, io) + + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // LCOW + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := vm.ID() + "-container" + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + ls := windowsImageLayers(ctx, t) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + ls := windowsImageLayers(ctx, t) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW HostProcess +} + +func TestContainerExec(t *testing.T) { + requireFeatures(t, featureContainer) + requireAnyFeature(t, featureUVM, featureLCOW, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + t.Run("LCOW", func(t *testing.T) { + requireFeatures(t, featureLCOW, featureUVM) + + opts := defaultLCOWOptions(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, opts) + + ls := linuxImageLayers(ctx, t) + scratch, _ := layers.ScratchSpace(ctx, t, vm, "", "", "") + + cID := vm.ID() + "-container" + spec := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithProcessArgs("/bin/sh", "-c", testoci.TailNullArgs), + testoci.WithWindowsLayerFolders(append(ls, scratch)))...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.Start(ctx, t, c, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if len(tt.lcowArgs) == 0 { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateLinuxSpec(ctx, t, cID, + testoci.DefaultLinuxSpecOpts(cID, + ctrdoci.WithDefaultPathEnv, + ctrdoci.WithProcessArgs(tt.lcowArgs...))..., + ).Process + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // LCOW + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + cID := vm.ID() + "-container" + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + )...).Process + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + )...).Process + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + for _, tt := range ioTests { + if tt.wcowCmd == "" { + continue + } + + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.wcowCmd), + )...).Process + + io := cmd.NewBufferedIO() + if tt.in != "" { + io = cmd.NewBufferedIOFromString(tt.in) + } + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestOutput(t, tt.want, nil) + }) + } + }) // WCOW HostProcess +} + +func TestContainerExec_DoubleQuotes(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW) + requireAnyFeature(t, featureUVM, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + + dir := `C:\hcsshim test temp dir with spaces` + acl := "CREATOR OWNER:(OI)(CI)(IO)(F)" + cmdLine := fmt.Sprintf(`cmd /C mkdir "%s" && icacls "%s" /grant "%s" /T && icacls "%s"`, dir, dir, acl, dir) + t.Logf("command line:\n%s", cmdLine) + + t.Run("WCOW Hyper-V", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureUVM) + + ls := windowsImageLayers(ctx, t) + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + cID := vm.ID() + "-container" + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(cmdLine), + )...).Process + + io := cmd.NewBufferedIO() + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestStdOutContains(t, []string{acl}, nil) + }) // WCOW Hyper-V + + t.Run("WCOW Process", func(t *testing.T) { + requireFeatures(t, featureWCOW) + + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(cmdLine), + )...).Process + + io := cmd.NewBufferedIO() + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestStdOutContains(t, []string{acl}, nil) + }) // WCOW Process + + t.Run("WCOW HostProcess", func(t *testing.T) { + requireFeatures(t, featureWCOW, featureHostProcess) + + ls := windowsImageLayers(ctx, t) + + // the directory will be created on the host from inside the HPC, so remove it + // this is mostly to avoid test failures, since `mkdir` errors if the directory already exists + t.Cleanup(func() { _ = util.RemoveAll(dir) }) + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(testoci.PingSelfCmd), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + init := container.StartWithSpec(ctx, t, c, spec.Process, nil) + t.Cleanup(func() { + cmd.Kill(ctx, t, init) + cmd.Wait(ctx, t, init) + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + ps := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(cmdLine), + )...).Process + + io := cmd.NewBufferedIO() + p := cmd.Create(ctx, t, c, ps, io) + cmd.Start(ctx, t, p) + + cmd.WaitExitCode(ctx, t, p, 0) + io.TestStdOutContains(t, []string{acl}, nil) + }) // WCOW HostProcess +} diff --git a/test/functional/hostprocess_test.go b/test/functional/hostprocess_test.go new file mode 100644 index 0000000000..5768dbf4cf --- /dev/null +++ b/test/functional/hostprocess_test.go @@ -0,0 +1,400 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + "testing" + + ctrdoci "github.com/containerd/containerd/oci" + "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/Microsoft/hcsshim/internal/jobcontainers" + "github.com/Microsoft/hcsshim/internal/sync" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/Microsoft/hcsshim/osversion" + + "github.com/Microsoft/hcsshim/test/internal/cmd" + "github.com/Microsoft/hcsshim/test/internal/container" + "github.com/Microsoft/hcsshim/test/internal/layers" + testoci "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" +) + +// TODO: +// - Environment +// - working directory +// - "microsoft.com/hostprocess-rootfs-location" and check that rootfs location exists +// - bind suppport? + +const ( + system = `NT AUTHORITY\System` + localService = `NT AUTHORITY\Local Service` +) + +func TestHostProcess_whoami(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + username := getCurrentUsername(ctx, t) + t.Logf("current username: %s", username) + + // theres probably a better way to test for this *shrug* + isSystem := strings.EqualFold(username, system) + + for _, tt := range []struct { + name string + user ctrdoci.SpecOpts + whoiam string + }{ + // Logging in as the current user may require a password. + // Theres noo guarantee that Administrator, DefaultAccount, or Guest are enabled, so + // we cannot use them. + // Best bet is to login into a service user account, which is only possible if we are already + // running from `NT AUTHORITY\System`. + { + name: "username", + user: ctrdoci.WithUser(system), + whoiam: system, + }, + { + name: "username", + user: ctrdoci.WithUser(localService), + whoiam: localService, + }, + { + name: "inherit", + user: testoci.HostProcessInheritUser(), + whoiam: username, + }, + } { + t.Run(tt.name+" "+tt.whoiam, func(t *testing.T) { + if strings.HasPrefix(strings.ToLower(tt.whoiam), `nt authority\`) && !isSystem { + t.Skipf("starting HostProcess with account %q as requires running tests as %q", tt.whoiam, system) + } + + cID := testName(t, "container") + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("whoami.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + tt.user, + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + + io.TestOutput(t, tt.whoiam, nil) + }) + } + + t.Run("newgroup", func(t *testing.T) { + // CreateProcessAsUser needs SE_INCREASE_QUOTA_NAME and SE_ASSIGNPRIMARYTOKEN_NAME + // privileges, which we is not guaranteed for Administrators to have. + // So, if not System or LocalService, skip. + // + // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessasuserw + if !isSystem { + t.Skipf("starting HostProcess within a new localgroup requires running tests as %q", system) + } + + cID := testName(t, "container") + + groupName := testName(t) + newLocalGroup(ctx, t, groupName) + + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("whoami.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + ctrdoci.WithUser(groupName), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + + hostname := getHostname(ctx, t) + expectedUser := cID[:winapi.UserNameCharLimit] + // whoami returns domain\username + io.TestOutput(t, hostname+`\`+expectedUser, nil) + + checkLocalGroupMember(ctx, t, groupName, expectedUser) + }) +} + +func TestHostProcess_hostname(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + hostname := getHostname(ctx, t) + t.Logf("current hostname: %s", hostname) + + cID := testName(t, "container") + + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("hostname.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + + io.TestOutput(t, hostname, nil) +} + +// validate if we see the same volumes on the host as in the container. +func TestHostProcess_mountvol(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + cID := testName(t, "container") + + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine("mountvol.exe"), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + cmd.WaitExitCode(ctx, t, init, 0) + + // container has been launched as the containers scratch space is a new volume + volumes, err := exec.CommandContext(ctx, "mountvol.exe").Output() + t.Logf("host mountvol.exe output:\n%s", string(volumes)) + if err != nil { + t.Fatalf("failed to exec mountvol: %v", err) + } + + io.TestOutput(t, string(volumes), nil) +} + +func TestHostProcess_VolumeMount(t *testing.T) { + requireFeatures(t, featureContainer, featureWCOW, featureHostProcess) + require.Build(t, osversion.RS5) + + ctx := util.Context(namespacedContext(context.Background()), t) + ls := windowsImageLayers(ctx, t) + + dir := t.TempDir() + containerDir := `C:\hcsshim_test\path\in\container` + + tmpfileName := "tmpfile" + containerTmpfile := filepath.Join(containerDir, tmpfileName) + + tmpfile := filepath.Join(dir, tmpfileName) + if err := os.WriteFile(tmpfile, []byte("test"), 0600); err != nil { + t.Fatalf("could not create temp file: %v", err) + } + + for _, tt := range []struct { + name string + hostPath string + containerPath string + cmd string + needsBindFilter bool + }{ + // CRI is responsible for adding `C:` to the start, and converting `/` to `\`, + // so here we make everything how Windows wants it + { + name: "dir absolute", + hostPath: dir, + containerPath: containerDir, + cmd: fmt.Sprintf(`dir.exe %s`, containerDir), + needsBindFilter: true, + }, + { + name: "dir relative", + hostPath: dir, + containerPath: containerDir, + cmd: fmt.Sprintf(`dir.exe %s`, strings.ReplaceAll(containerDir, `C:`, `%CONTAINER_SANDBOX_MOUNT_POINT%`)), + }, + { + name: "file absolute", + hostPath: tmpfile, + containerPath: containerTmpfile, + cmd: fmt.Sprintf(`cmd.exe /c type %s`, containerTmpfile), + needsBindFilter: true, + }, + { + name: "file relative", + hostPath: tmpfile, + containerPath: containerTmpfile, + cmd: fmt.Sprintf(`cmd.exe /c type %s`, strings.ReplaceAll(containerTmpfile, `C:`, `%CONTAINER_SANDBOX_MOUNT_POINT%`)), + }, + } { + t.Run(tt.name, func(t *testing.T) { + if tt.needsBindFilter && !jobcontainers.FileBindingSupported() { + t.Skip("bind filter support is required") + } + + // hpc mount will create the directory on the host, so remove it after test + t.Cleanup(func() { _ = util.RemoveAll(containerDir) }) + + cID := testName(t, "container") + + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + ctrdoci.WithProcessCommandLine(tt.cmd), + ctrdoci.WithMounts([]specs.Mount{ + { + Source: tt.hostPath, + Destination: tt.containerPath, + }, + }), + testoci.WithWindowsLayerFolders(append(ls, scratch)), + testoci.AsHostProcessContainer(), + testoci.HostProcessInheritUser(), + )...) + + c, _, cleanup := container.Create(ctx, t, nil, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + io := cmd.NewBufferedIO() // dir.exe and type.exe will error if theres stdout/err to write to + init := container.StartWithSpec(ctx, t, c, spec.Process, io) + t.Cleanup(func() { + container.Kill(ctx, t, c) + container.Wait(ctx, t, c) + }) + + if ee := cmd.Wait(ctx, t, init); ee != 0 { + out, err := io.Output() + if out != "" { + t.Logf("stdout:\n%s", out) + } + if err != nil { + t.Logf("stderr:\n%v", err) + } + t.Errorf("got exit code %d, wanted %d", ee, 0) + } + }) + } +} + +func newLocalGroup(ctx context.Context, tb testing.TB, name string) { + tb.Helper() + + c := exec.CommandContext(ctx, "net", "localgroup", name, "/add") + if output, err := c.CombinedOutput(); err != nil { + tb.Logf("command %q output:\n%s", c.String(), strings.TrimSpace(string(output))) + tb.Fatalf("failed to create localgroup %q with: %v", name, err) + } + tb.Logf("created localgroup: %s", name) + + tb.Cleanup(func() { + deleteLocalGroup(ctx, tb, name) + }) +} + +func deleteLocalGroup(ctx context.Context, tb testing.TB, name string) { + tb.Helper() + + c := exec.CommandContext(ctx, "net", "localgroup", name, "/delete") + if output, err := c.CombinedOutput(); err != nil { + tb.Logf("command %q output:\n%s", c.String(), strings.TrimSpace(string(output))) + tb.Fatalf("failed to delete localgroup %q: %v", name, err) + } + tb.Logf("deleted localgroup: %s", name) +} + +// Checks if userName is present in the group `groupName`. +func checkLocalGroupMember(ctx context.Context, tb testing.TB, groupName, userName string) { + tb.Helper() + + c := exec.CommandContext(ctx, "net", "localgroup", groupName) + b, err := c.CombinedOutput() + output := strings.TrimSpace(string(b)) + tb.Logf("command %q output:\n%s", c.String(), output) + if err != nil { + tb.Fatalf("failed to check members for localgroup %q: %v", groupName, err) + } + if !strings.Contains(strings.ToLower(output), strings.ToLower(userName)) { + tb.Fatalf("user %s not present in the local group %s", userName, groupName) + } +} + +func getCurrentUsername(_ context.Context, tb testing.TB) string { + tb.Helper() + + u, err := user.Current() // cached, so no need to save on lookup + if err != nil { + tb.Fatalf("could not lookup current user: %v", err) + } + return u.Username +} + +var hostnameOnce = sync.OnceValue(os.Hostname) + +func getHostname(_ context.Context, tb testing.TB) string { + tb.Helper() + + n, err := hostnameOnce() + if err != nil { + tb.Fatalf("could not get hostname: %v", err) + } + return n +} diff --git a/test/functional/lcow_bench_test.go b/test/functional/lcow_bench_test.go deleted file mode 100644 index d6fcac15ea..0000000000 --- a/test/functional/lcow_bench_test.go +++ /dev/null @@ -1,106 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package functional - -import ( - "context" - "testing" - - "github.com/Microsoft/hcsshim/osversion" - - "github.com/Microsoft/hcsshim/test/internal/util" - "github.com/Microsoft/hcsshim/test/pkg/require" - "github.com/Microsoft/hcsshim/test/pkg/uvm" -) - -func BenchmarkLCOW_UVM(b *testing.B) { - requireFeatures(b, featureLCOW, featureUVM) - require.Build(b, osversion.RS5) - - pCtx := context.Background() - - b.Run("Create", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - - b.StartTimer() - _, cleanup := uvm.CreateLCOW(ctx, b, opts) - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) - - b.Run("Start", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - vm, cleanup := uvm.CreateLCOW(ctx, b, opts) - - b.StartTimer() - if err := vm.Start(ctx); err != nil { - b.Fatalf("could not start UVM: %v", err) - } - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) - - b.Run("Kill", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - vm, cleanup := uvm.CreateLCOW(ctx, b, opts) - uvm.Start(ctx, b, vm) - - b.StartTimer() - uvm.Kill(ctx, b, vm) - if err := vm.WaitCtx(ctx); err != nil { - b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) - } - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) - - b.Run("Close", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) - - opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) - vm, cleanup := uvm.CreateLCOW(ctx, b, opts) - uvm.Start(ctx, b, vm) - - b.StartTimer() - if err := vm.CloseCtx(ctx); err != nil { - b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) - } - b.StopTimer() - - cleanup(ctx) - cancel() - } - }) -} diff --git a/test/functional/lcow_container_bench_test.go b/test/functional/lcow_container_bench_test.go index ebbd456109..2f17989989 100644 --- a/test/functional/lcow_container_bench_test.go +++ b/test/functional/lcow_container_bench_test.go @@ -11,7 +11,7 @@ import ( "testing" ctrdoci "github.com/containerd/containerd/oci" - cri_util "github.com/containerd/containerd/pkg/cri/util" + criutil "github.com/containerd/containerd/pkg/cri/util" "github.com/Microsoft/hcsshim/internal/cmd" "github.com/Microsoft/hcsshim/internal/hcsoci" @@ -33,7 +33,7 @@ func BenchmarkLCOW_Container(b *testing.B) { requireFeatures(b, featureLCOW, featureUVM, featureContainer) require.Build(b, osversion.RS5) - pCtx := namespacedContext(context.Background()) + pCtx := util.Context(namespacedContext(context.Background()), b) ls := linuxImageLayers(pCtx, b) // Create a new uVM per benchmark in case any left over state lingers @@ -66,13 +66,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, @@ -146,13 +145,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, @@ -201,13 +199,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, @@ -259,13 +256,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, @@ -323,13 +319,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, @@ -388,13 +383,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, @@ -453,13 +447,12 @@ func BenchmarkLCOW_Container(b *testing.B) { } // recreate the uVM opts := defaultLCOWOptions(ctx, b) - opts.ID += util.RandNameSuffix(i) vm, vmCleanup = testuvm.CreateLCOW(ctx, b, opts) testuvm.Start(ctx, b, vm) cache = testlayers.CacheFile(ctx, b, "") } - id := cri_util.GenerateID() + id := criutil.GenerateID() scratch, _ := testlayers.ScratchSpace(ctx, b, vm, "", "", cache) spec := oci.CreateLinuxSpec(ctx, b, id, oci.DefaultLinuxSpecOpts(id, diff --git a/test/functional/lcow_container_test.go b/test/functional/lcow_container_test.go deleted file mode 100644 index f72a4f2902..0000000000 --- a/test/functional/lcow_container_test.go +++ /dev/null @@ -1,169 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package functional - -import ( - "context" - "strings" - "testing" - - ctrdoci "github.com/containerd/containerd/oci" - - "github.com/Microsoft/hcsshim/osversion" - - "github.com/Microsoft/hcsshim/test/internal/cmd" - "github.com/Microsoft/hcsshim/test/internal/container" - "github.com/Microsoft/hcsshim/test/internal/layers" - "github.com/Microsoft/hcsshim/test/internal/oci" - "github.com/Microsoft/hcsshim/test/internal/util" - "github.com/Microsoft/hcsshim/test/pkg/require" - "github.com/Microsoft/hcsshim/test/pkg/uvm" -) - -func TestLCOW_ContainerLifecycle(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - require.Build(t, osversion.RS5) - - ctx := namespacedContext(context.Background()) - ls := linuxImageLayers(ctx, t) - opts := defaultLCOWOptions(ctx, t) - opts.ID += util.RandNameSuffix() - vm := uvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - scratch, _ := layers.ScratchSpace(ctx, t, vm, "", "", "") - - spec := oci.CreateLinuxSpec(ctx, t, t.Name()+util.RandNameSuffix(), - oci.DefaultLinuxSpecOpts("", - ctrdoci.WithProcessArgs("/bin/sh", "-c", oci.TailNullArgs), - oci.WithWindowsLayerFolders(append(ls, scratch)))...) - - c, _, cleanup := container.Create(ctx, t, vm, spec, t.Name(), hcsOwner) - t.Cleanup(cleanup) - - init := container.Start(ctx, t, c, nil) - t.Cleanup(func() { - container.Kill(ctx, t, c) - container.Wait(ctx, t, c) - }) - cmd.Kill(ctx, t, init) - cmd.WaitExitCode(ctx, t, init, cmd.ForcedKilledExitCode) -} - -var ioTests = []struct { - name string - args []string - in string - want string -}{ - { - name: "true", - args: []string{"/bin/sh", "-c", "true"}, - want: "", - }, - { - name: "echo", - args: []string{"/bin/sh", "-c", `echo -n "hi y'all"`}, - want: "hi y'all", - }, - { - name: "tee", - args: []string{"/bin/sh", "-c", "tee"}, - in: "are you copying me?", - want: "are you copying me?", - }, -} - -func TestLCOW_ContainerIO(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - require.Build(t, osversion.RS5) - - ctx := namespacedContext(context.Background()) - ls := linuxImageLayers(ctx, t) - opts := defaultLCOWOptions(ctx, t) - opts.ID += util.RandNameSuffix() - cache := layers.CacheFile(ctx, t, "") - vm := uvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - for _, tt := range ioTests { - t.Run(tt.name, func(t *testing.T) { - id := strings.ReplaceAll(t.Name(), "/", "") + util.RandNameSuffix() - scratch, _ := layers.ScratchSpace(ctx, t, vm, "", "", cache) - spec := oci.CreateLinuxSpec(ctx, t, id, - oci.DefaultLinuxSpecOpts(id, - ctrdoci.WithProcessArgs(tt.args...), - oci.WithWindowsLayerFolders(append(ls, scratch)))...) - - c, _, cleanup := container.Create(ctx, t, vm, spec, id, hcsOwner) - t.Cleanup(cleanup) - - io := cmd.NewBufferedIO() - if tt.in != "" { - io = cmd.NewBufferedIOFromString(tt.in) - } - init := container.Start(ctx, t, c, io) - - t.Cleanup(func() { - container.Kill(ctx, t, c) - container.Wait(ctx, t, c) - }) - - if e := cmd.Wait(ctx, t, init); e != 0 { - t.Fatalf("got exit code %d, wanted %d", e, 0) - } - - io.TestOutput(t, tt.want, nil) - }) - } -} - -func TestLCOW_ContainerExec(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - require.Build(t, osversion.RS5) - - ctx := namespacedContext(context.Background()) - ls := linuxImageLayers(ctx, t) - opts := defaultLCOWOptions(ctx, t) - opts.ID += util.RandNameSuffix() - vm := uvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - id := strings.ReplaceAll(t.Name(), "/", "") + util.RandNameSuffix() - scratch, _ := layers.ScratchSpace(ctx, t, vm, "", "", "") - spec := oci.CreateLinuxSpec(ctx, t, id, - oci.DefaultLinuxSpecOpts(id, - ctrdoci.WithProcessArgs("/bin/sh", "-c", oci.TailNullArgs), - oci.WithWindowsLayerFolders(append(ls, scratch)))...) - - c, _, cleanup := container.Create(ctx, t, vm, spec, id, hcsOwner) - t.Cleanup(cleanup) - init := container.Start(ctx, t, c, nil) - t.Cleanup(func() { - cmd.Kill(ctx, t, init) - cmd.Wait(ctx, t, init) - container.Kill(ctx, t, c) - container.Wait(ctx, t, c) - }) - - for _, tt := range ioTests { - t.Run(tt.name, func(t *testing.T) { - ps := oci.CreateLinuxSpec(ctx, t, id, - oci.DefaultLinuxSpecOpts(id, - // oci.WithTTY, - ctrdoci.WithDefaultPathEnv, - ctrdoci.WithProcessArgs(tt.args...))..., - ).Process - io := cmd.NewBufferedIO() - if tt.in != "" { - io = cmd.NewBufferedIOFromString(tt.in) - } - p := cmd.Create(ctx, t, c, ps, io) - cmd.Start(ctx, t, p) - - if e := cmd.Wait(ctx, t, p); e != 0 { - t.Fatalf("got exit code %d, wanted %d", e, 0) - } - - io.TestOutput(t, tt.want, nil) - }) - } -} diff --git a/test/functional/lcow_networking_test.go b/test/functional/lcow_networking_test.go index 3d83fdc6b9..270560c02f 100644 --- a/test/functional/lcow_networking_test.go +++ b/test/functional/lcow_networking_test.go @@ -18,6 +18,7 @@ import ( "github.com/Microsoft/hcsshim/test/internal/container" "github.com/Microsoft/hcsshim/test/internal/layers" "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" "github.com/Microsoft/hcsshim/test/pkg/require" "github.com/Microsoft/hcsshim/test/pkg/uvm" ) @@ -122,7 +123,7 @@ func TestLCOW_IPv6_Assignment(t *testing.T) { t.Fatalf("network attachment: %v", err) } - ctx := namespacedContext(context.Background()) + ctx := util.Context(namespacedContext(context.Background()), t) ls := linuxImageLayers(ctx, t) opts := defaultLCOWOptions(ctx, t) vm := uvm.CreateAndStartLCOWFromOpts(ctx, t, opts) diff --git a/test/functional/lcow_policy_test.go b/test/functional/lcow_policy_test.go index d501b8e094..7d43c25da6 100644 --- a/test/functional/lcow_policy_test.go +++ b/test/functional/lcow_policy_test.go @@ -39,7 +39,7 @@ func setupScratchTemplate(ctx context.Context, tb testing.TB) string { func TestGetProperties_WithPolicy(t *testing.T) { requireFeatures(t, featureLCOW, featureUVM, featureLCOWIntegrity) - ctx := namespacedContext(context.Background()) + ctx := util.Context(namespacedContext(context.Background()), t) scratchPath := setupScratchTemplate(ctx, t) ls := linuxImageLayers(ctx, t) diff --git a/test/functional/lcow_test.go b/test/functional/lcow_test.go deleted file mode 100644 index d26bfd9e19..0000000000 --- a/test/functional/lcow_test.go +++ /dev/null @@ -1,303 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package functional - -import ( - "bytes" - "context" - "errors" - "fmt" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opencontainers/runtime-spec/specs-go" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcsoci" - "github.com/Microsoft/hcsshim/internal/lcow" - "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/internal/uvm/scsi" - "github.com/Microsoft/hcsshim/osversion" - - testutilities "github.com/Microsoft/hcsshim/test/internal" - testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" - "github.com/Microsoft/hcsshim/test/pkg/require" - testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" -) - -// test if closing a waiting (but not starting) uVM succeeds. -func TestLCOW_UVMCreateClose(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - require.Build(t, osversion.RS5) - - ctx := context.Background() - vm, cleanup := testuvm.CreateLCOW(ctx, t, defaultLCOWOptions(ctx, t)) - - testuvm.Close(ctx, t, vm) - - // also run cleanup to make sure that works fine too - cleanup(ctx) -} - -// test if waiting after creating (but not starting) an LCOW uVM returns. -func TestLCOW_UVMCreateWait(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - require.Build(t, osversion.RS5) - - pCtx := context.Background() - vm, cleanup := testuvm.CreateLCOW(pCtx, t, defaultLCOWOptions(pCtx, t)) - t.Cleanup(func() { cleanup(pCtx) }) - - ctx, cancel := context.WithTimeout(pCtx, 3*time.Second) - t.Cleanup(cancel) - switch err := vm.WaitCtx(ctx); { - case err == nil: - t.Fatal("wait did not error") - case !errors.Is(err, context.DeadlineExceeded): - t.Fatalf("wait should have errored with '%v'; got '%v'", context.DeadlineExceeded, err) - } -} - -// TestLCOW_UVMNoSCSINoVPMemInitrd starts an LCOW utility VM without a SCSI controller and -// no VPMem device. Uses initrd. -func TestLCOW_UVMNoSCSINoVPMemInitrd(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - - ctx := context.Background() - opts := defaultLCOWOptions(ctx, t) - opts.SCSIControllerCount = 0 - opts.VPMemDeviceCount = 0 - opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd - opts.RootFSFile = uvm.InitrdFile - opts.KernelDirect = false - opts.KernelFile = uvm.KernelFile - - testLCOWUVMNoSCSISingleVPMem(t, opts, fmt.Sprintf("Command line: initrd=/%s", opts.RootFSFile)) -} - -// TestLCOW_UVMNoSCSISingleVPMemVHD starts an LCOW utility VM without a SCSI controller and -// only a single VPMem device. Uses VPMEM VHD. -func TestLCOW_UVMNoSCSISingleVPMemVHD(t *testing.T) { - requireFeatures(t, featureLCOW, featureUVM) - - ctx := context.Background() - opts := defaultLCOWOptions(ctx, t) - opts.SCSIControllerCount = 0 - opts.VPMemDeviceCount = 1 - opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD - opts.RootFSFile = uvm.VhdFile - - testLCOWUVMNoSCSISingleVPMem(t, opts, `Command line: root=/dev/pmem0`, `init=/init`) -} - -func testLCOWUVMNoSCSISingleVPMem(t *testing.T, opts *uvm.OptionsLCOW, expected ...string) { - t.Helper() - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM) - ctx := context.Background() - - lcowUVM := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - - io := testcmd.NewBufferedIO() - // c := cmd.Command(lcowUVM, "dmesg") - c := testcmd.Create(ctx, t, lcowUVM, &specs.Process{Args: []string{"dmesg"}}, io) - testcmd.Start(ctx, t, c) - testcmd.WaitExitCode(ctx, t, c, 0) - - out, err := io.Output() - - if err != nil { - t.Helper() - t.Fatalf("uvm exec failed with: %s", err) - } - - for _, s := range expected { - if !strings.Contains(out, s) { - t.Helper() - t.Fatalf("Expected dmesg output to have %q: %s", s, out) - } - } -} - -// TestLCOW_TimeUVMStartVHD starts/terminates a utility VM booting from VPMem- -// attached root filesystem a number of times. -func TestLCOW_TimeUVMStartVHD(t *testing.T) { - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeVHD) -} - -// TestLCOWUVMStart_KernelDirect_VHD starts/terminates a utility VM booting from -// VPMem- attached root filesystem a number of times starting from the Linux -// Kernel directly and skipping EFI. -func TestLCOW_UVMStart_KernelDirect_VHD(t *testing.T) { - require.Build(t, 18286) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeVHD) -} - -// TestLCOWTimeUVMStartInitRD starts/terminates a utility VM booting from initrd- -// attached root file system a number of times. -func TestLCOW_TimeUVMStartInitRD(t *testing.T) { - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeInitRd) -} - -// TestLCOWUVMStart_KernelDirect_InitRd starts/terminates a utility VM booting -// from initrd- attached root file system a number of times starting from the -// Linux Kernel directly and skipping EFI. -func TestLCOW_UVMStart_KernelDirect_InitRd(t *testing.T) { - require.Build(t, 18286) - requireFeatures(t, featureLCOW, featureUVM) - - testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeInitRd) -} - -func testLCOWTimeUVMStart(t *testing.T, kernelDirect bool, rfsType uvm.PreferredRootFSType) { - t.Helper() - requireFeatures(t, featureLCOW, featureUVM) - - ctx := context.Background() - for i := 0; i < 3; i++ { - opts := defaultLCOWOptions(ctx, t) - opts.KernelDirect = kernelDirect - if !kernelDirect { - // can only use the uncompressed kernel with direct boot - opts.KernelFile = uvm.KernelFile - } - opts.VPMemDeviceCount = 32 - opts.PreferredRootFSType = rfsType - switch opts.PreferredRootFSType { - case uvm.PreferredRootFSTypeInitRd: - opts.RootFSFile = uvm.InitrdFile - case uvm.PreferredRootFSTypeVHD: - opts.RootFSFile = uvm.VhdFile - } - - lcowUVM := testuvm.CreateAndStartLCOWFromOpts(context.Background(), t, opts) - testuvm.Close(ctx, t, lcowUVM) - } -} - -func TestLCOWSimplePodScenario(t *testing.T) { - t.Skip("Doesn't work quite yet") - - require.Build(t, osversion.RS5) - requireFeatures(t, featureLCOW, featureUVM, featureContainer) - - layers := linuxImageLayers(context.Background(), t) - - cacheDir := t.TempDir() - cacheFile := filepath.Join(cacheDir, "cache.vhdx") - - // This is what gets mounted for UVM scratch - uvmScratchDir := t.TempDir() - uvmScratchFile := filepath.Join(uvmScratchDir, "uvmscratch.vhdx") - - // Scratch for the first container - c1ScratchDir := t.TempDir() - c1ScratchFile := filepath.Join(c1ScratchDir, "sandbox.vhdx") - - // Scratch for the second container - c2ScratchDir := t.TempDir() - c2ScratchFile := filepath.Join(c2ScratchDir, "sandbox.vhdx") - - lcowUVM := testuvm.CreateAndStartLCOW(context.Background(), t, "uvm") - defer lcowUVM.Close() - - // Populate the cache and generate the scratch file - if err := lcow.CreateScratch(context.Background(), lcowUVM, uvmScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { - t.Fatal(err) - } - - _, err := lcowUVM.SCSIManager.AddVirtualDisk(context.Background(), uvmScratchFile, false, lcowUVM.ID(), &scsi.MountConfig{}) - if err != nil { - t.Fatal(err) - } - - // Now create the first containers sandbox, populate a spec - if err := lcow.CreateScratch(context.Background(), lcowUVM, c1ScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { - t.Fatal(err) - } - c1Spec := testutilities.GetDefaultLinuxSpec(t) - c1Folders := append(layers, c1ScratchDir) - c1Spec.Windows.LayerFolders = c1Folders - c1Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "one"} - c1Opts := &hcsoci.CreateOptions{ - Spec: c1Spec, - HostingSystem: lcowUVM, - } - - // Now create the second containers sandbox, populate a spec - if err := lcow.CreateScratch(context.Background(), lcowUVM, c2ScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { - t.Fatal(err) - } - c2Spec := testutilities.GetDefaultLinuxSpec(t) - c2Folders := append(layers, c2ScratchDir) - c2Spec.Windows.LayerFolders = c2Folders - c2Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "two"} - c2Opts := &hcsoci.CreateOptions{ - Spec: c2Spec, - HostingSystem: lcowUVM, - } - - // Create the two containers - c1hcsSystem, c1Resources, err := hcsoci.CreateContainer(context.Background(), c1Opts) - if err != nil { - t.Fatal(err) - } - c2hcsSystem, c2Resources, err := hcsoci.CreateContainer(context.Background(), c2Opts) - if err != nil { - t.Fatal(err) - } - - // Start them. In the UVM, they'll be in the created state from runc's perspective after this.eg - /// # runc list - //ID PID STATUS BUNDLE CREATED OWNER - //3a724c2b-f389-5c71-0555-ebc6f5379b30 138 running /run/gcs/c/1 2018-06-04T21:23:39.1253911Z root - //7a8229a0-eb60-b515-55e7-d2dd63ffae75 158 created /run/gcs/c/2 2018-06-04T21:23:39.4249048Z root - if err := c1hcsSystem.Start(context.Background()); err != nil { - t.Fatal(err) - } - defer resources.ReleaseResources(context.Background(), c1Resources, lcowUVM, true) //nolint:errcheck - - if err := c2hcsSystem.Start(context.Background()); err != nil { - t.Fatal(err) - } - defer resources.ReleaseResources(context.Background(), c2Resources, lcowUVM, true) //nolint:errcheck - - // Start the init process in each container and grab it's stdout comparing to expected - runInitProcess(t, c1hcsSystem, "hello lcow container one") - runInitProcess(t, c2hcsSystem, "hello lcow container two") -} - -// Helper to run the init process in an LCOW container; verify it exits with exit -// code 0; verify stderr is empty; check output is as expected. -func runInitProcess(t *testing.T, s cow.Container, expected string) { - t.Helper() - var errB bytes.Buffer - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - cmd := &cmd.Cmd{ - Host: s, - Stderr: &errB, - Context: ctx, - } - outb, err := cmd.Output() - if err != nil { - t.Fatalf("stderr: %s", err) - } - out := string(outb) - if strings.TrimSpace(out) != expected { - t.Fatalf("got %q expecting %q", string(out), expected) - } -} diff --git a/test/functional/lcow_uvm_test.go b/test/functional/lcow_uvm_test.go new file mode 100644 index 0000000000..a024d3762f --- /dev/null +++ b/test/functional/lcow_uvm_test.go @@ -0,0 +1,271 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "fmt" + "testing" + + "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + + testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +// TestLCOWUVM_KernelArgs starts an LCOW utility VM and validates the kernel args contain the expected parameters. +func TestLCOWUVM_KernelArgs(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureLCOW, featureUVM) + + // TODO: + // - opts.VPCIEnabled and `pci=off` + // - opts.ProcessDumpLocation and `-core-dump-location` + // - opts.ConsolePipe/opts.EnableGraphicsConsole and `console=` + + ctx := util.Context(context.Background(), t) + numCPU := int32(2) + + for _, tc := range []struct { + name string + optsFn func(*uvm.OptionsLCOW) + wantArgs []string + notWantArgs []string + wantDmesg []string + notWantDmesg []string + }{ + // + // initrd test cases + // + // Don't test initrd with SCSI or vPMEM, since boot won't use either and the settings + // won't appear in kernel args or dmesg. + // Kernel command line only contains `initrd=/initrd.img` if KernelDirect is disabled, which + // implies booting from a compressed kernel. + + { + name: "initrd kernel", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + opts.RootFSFile = uvm.InitrdFile + + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + }, + wantArgs: []string{fmt.Sprintf(`initrd=/%s`, uvm.InitrdFile), + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`root=`, `rootwait`, `init=`, `/dev/pmem`, `/dev/sda`, `console=`}, + wantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "initrd vmlinux", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + opts.RootFSFile = uvm.InitrdFile + + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + }, + wantArgs: []string{`8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`root=`, `rootwait`, `init=`, `/dev/pmem`, `/dev/sda`, `console=`}, + wantDmesg: []string{`initrd`, `initramfs`}, + }, + + // + // VHD rootfs test cases + // + + { + name: "no SCSI single vPMEM VHD kernel", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 1 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + }, + wantArgs: []string{`root=/dev/pmem0`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/sda`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "SCSI no vPMEM VHD kernel", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 1 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + }, + wantArgs: []string{`root=/dev/sda`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/pmem`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "no SCSI single vPMEM VHD vmlinux", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 1 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + }, + wantArgs: []string{`root=/dev/pmem0`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/sda`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + { + name: "SCSI no vPMEM VHD vmlinux", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.SCSIControllerCount = 1 + opts.VPMemDeviceCount = 0 + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + }, + wantArgs: []string{`root=/dev/sda`, `rootwait`, `init=/init`, + `8250_core.nr_uarts=0`, fmt.Sprintf(`nr_cpus=%d`, numCPU), `panic=-1`, `quiet`, `pci=off`}, + notWantArgs: []string{`initrd=`, `/dev/pmem`, `console=`}, + notWantDmesg: []string{`initrd`, `initramfs`}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + opts := defaultLCOWOptions(ctx, t) + opts.ProcessorCount = numCPU + tc.optsFn(opts) + + if opts.KernelDirect { + require.Build(t, 18286) + } + + vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) + + // validate the kernel args were constructed as expected + ioArgs := testcmd.NewBufferedIO() + cmdArgs := testcmd.Create(ctx, t, vm, &specs.Process{Args: []string{"cat", "/proc/cmdline"}}, ioArgs) + testcmd.Start(ctx, t, cmdArgs) + testcmd.WaitExitCode(ctx, t, cmdArgs, 0) + + ioArgs.TestStdOutContains(t, tc.wantArgs, tc.notWantArgs) + + // some boot options (notably using initrd) need to validated by looking at dmesg logs + // dmesg will output the kernel command line as + // + // [ 0.000000] Command line: <...> + // + // but its easier/safer to read the args directly from /proc/cmdline + + ioDmesg := testcmd.NewBufferedIO() + cmdDmesg := testcmd.Create(ctx, t, vm, &specs.Process{Args: []string{"dmesg"}}, ioDmesg) + testcmd.Start(ctx, t, cmdDmesg) + testcmd.WaitExitCode(ctx, t, cmdDmesg, 0) + + ioDmesg.TestStdOutContains(t, tc.wantDmesg, tc.notWantDmesg) + }) + } +} + +// TestLCOWUVM_Boot starts and terminates a utility VM multiple times using different boot options. +func TestLCOWUVM_Boot(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureLCOW, featureUVM) + + numIters := 3 + ctx := util.Context(context.Background(), t) + + for _, tc := range []struct { + name string + optsFn func(*uvm.OptionsLCOW) + }{ + { + name: "vPMEM no kernel direct initrd", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + + opts.RootFSFile = uvm.InitrdFile + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + + opts.VPMemDeviceCount = 32 + }, + }, + { + name: "vPMEM kernel direct initrd", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + + opts.RootFSFile = uvm.InitrdFile + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + + opts.VPMemDeviceCount = 32 + }, + }, + { + name: "vPMEM no kernel direct VHD", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = false + opts.KernelFile = uvm.KernelFile + + opts.RootFSFile = uvm.VhdFile + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + + opts.VPMemDeviceCount = 32 + }, + }, + { + name: "vPMEM kernel direct VHD", + optsFn: func(opts *uvm.OptionsLCOW) { + opts.KernelDirect = true + opts.KernelFile = uvm.UncompressedKernelFile + + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + opts.VPMemDeviceCount = 32 + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + for i := 0; i < numIters; i++ { + // create new options every time, in case they are modified during uVM creation + opts := defaultLCOWOptions(ctx, t) + tc.optsFn(opts) + + // should probably short circuit earlied, but this will skip all subsequent iterations, which works + if opts.KernelDirect { + require.Build(t, 18286) + } + + vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) + testuvm.Close(ctx, t, vm) + } + }) + } +} diff --git a/test/functional/main_test.go b/test/functional/main_test.go index 8823a2d360..0392da6d5b 100644 --- a/test/functional/main_test.go +++ b/test/functional/main_test.go @@ -40,6 +40,9 @@ import ( testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" ) +// TODO: WCOW benchmarks (hyper-v, process, and hostprocess) +// TODO: common cmd.Cmd tests on different hosts: start, exec, double start, exit code, etc + // owner field for uVMs. const hcsOwner = "hcsshim-functional-tests" @@ -259,6 +262,11 @@ func requireFeatures(tb testing.TB, features ...string) { require.Features(tb, flagFeatures, features...) } +func requireAnyFeature(tb testing.TB, features ...string) { + tb.Helper() + require.AnyFeature(tb, flagFeatures, features...) +} + func defaultLCOWOptions(ctx context.Context, tb testing.TB) *uvm.OptionsLCOW { tb.Helper() @@ -333,6 +341,8 @@ func windowsServercoreImageLayers(ctx context.Context, tb testing.TB) []string { // namespacedContext returns a [context.Context] with the provided namespace added via // [github.com/containerd/containerd/namespaces.WithNamespace]. func namespacedContext(ctx context.Context) context.Context { + // since this (usually) called at the start of a test, add the testing timeout to it + // for the entire test run return namespaces.WithNamespace(ctx, *flagContainerdNamespace) } diff --git a/test/functional/uvm_bench_test.go b/test/functional/uvm_bench_test.go new file mode 100644 index 0000000000..27d5129a4f --- /dev/null +++ b/test/functional/uvm_bench_test.go @@ -0,0 +1,123 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "testing" + + "github.com/Microsoft/hcsshim/osversion" + + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func BenchmarkUVM(b *testing.B) { + requireFeatures(b, featureUVM) + requireAnyFeature(b, featureLCOW, featureWCOW) + require.Build(b, osversion.RS5) + + pCtx := util.Context(context.Background(), b) + + for _, tt := range []struct { + feature string + createOpts func(context.Context, testing.TB) any + }{ + { + feature: featureLCOW, + //nolint: thelper + createOpts: func(ctx context.Context, tb testing.TB) any { return defaultLCOWOptions(ctx, tb) }, + }, + { + feature: featureWCOW, + //nolint: thelper + createOpts: func(ctx context.Context, tb testing.TB) any { return defaultWCOWOptions(ctx, tb) }, + }, + } { + b.Run(tt.feature, func(b *testing.B) { + requireFeatures(b, tt.feature) + + b.Run("Create", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + + b.StartTimer() + _, cleanup := testuvm.Create(ctx, b, opts) + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + + b.Run("Start", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + vm, cleanup := testuvm.Create(ctx, b, opts) + + b.StartTimer() + if err := vm.Start(ctx); err != nil { + b.Fatalf("could not start UVM: %v", err) + } + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + + b.Run("Kill", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + vm, cleanup := testuvm.Create(ctx, b, opts) + testuvm.Start(ctx, b, vm) + + b.StartTimer() + testuvm.Kill(ctx, b, vm) + if err := vm.WaitCtx(ctx); err != nil { + b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) + } + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + + b.Run("Close", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ctx, cancel := context.WithTimeout(pCtx, benchmarkIterationTimeout) + + opts := tt.createOpts(ctx, b) + vm, cleanup := testuvm.Create(ctx, b, opts) + testuvm.Start(ctx, b, vm) + + b.StartTimer() + if err := vm.CloseCtx(ctx); err != nil { + b.Fatalf("could not kill uvm %q: %v", vm.ID(), err) + } + b.StopTimer() + + cleanup(ctx) + cancel() + } + }) + }) + } +} diff --git a/test/functional/uvm_plannine_test.go b/test/functional/uvm_plannine_test.go index 14da7176b4..9e6e1df8ba 100644 --- a/test/functional/uvm_plannine_test.go +++ b/test/functional/uvm_plannine_test.go @@ -16,6 +16,7 @@ import ( "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/internal/util" "github.com/Microsoft/hcsshim/test/pkg/require" testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" ) @@ -53,22 +54,21 @@ func TestPlan9(t *testing.T) { func TestPlan9_Writable(t *testing.T) { require.Build(t, osversion.RS5) requireFeatures(t, featureLCOW, featureUVM, featurePlan9) - ctx := context.Background() + ctx := util.Context(context.Background(), t) opts := defaultLCOWOptions(ctx, t) opts.NoWritableFileShares = true vm := testuvm.CreateAndStartLCOWFromOpts(ctx, t, opts) - defer vm.Close() dir := t.TempDir() // mount as writable should fail - share, err := vm.AddPlan9(context.Background(), dir, fmt.Sprintf("/tmp/%s", filepath.Base(dir)), false, false, nil) + share, err := vm.AddPlan9(ctx, dir, fmt.Sprintf("/tmp/%s", filepath.Base(dir)), false, false, nil) defer func() { if share == nil { return } - if err := vm.RemovePlan9(context.Background(), share); err != nil { + if err := vm.RemovePlan9(ctx, share); err != nil { t.Fatalf("RemovePlan9 failed: %s", err) } }() @@ -77,7 +77,7 @@ func TestPlan9_Writable(t *testing.T) { } // mount as read-only should succeed - share, err = vm.AddPlan9(context.Background(), dir, fmt.Sprintf("/tmp/%s", filepath.Base(dir)), true, false, nil) + share, err = vm.AddPlan9(ctx, dir, fmt.Sprintf("/tmp/%s", filepath.Base(dir)), true, false, nil) if err != nil { t.Fatalf("AddPlan9 failed: %v", err) } diff --git a/test/functional/uvm_test.go b/test/functional/uvm_test.go new file mode 100644 index 0000000000..37fb6f193d --- /dev/null +++ b/test/functional/uvm_test.go @@ -0,0 +1,70 @@ +//go:build windows && functional +// +build windows,functional + +package functional + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Microsoft/hcsshim/osversion" + + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +func TestUVM(t *testing.T) { + requireFeatures(t, featureUVM) + requireAnyFeature(t, featureLCOW, featureWCOW) + require.Build(t, osversion.RS5) + + ctx := context.Background() + + for _, tt := range []struct { + feature string + createOpts func(context.Context, testing.TB) any + }{ + { + feature: featureLCOW, + //nolint: thelper + createOpts: func(_ context.Context, tb testing.TB) any { return defaultLCOWOptions(ctx, tb) }, + }, + { + feature: featureWCOW, + //nolint: thelper + createOpts: func(ctx context.Context, tb testing.TB) any { return defaultWCOWOptions(ctx, tb) }, + }, + } { + t.Run(tt.feature, func(t *testing.T) { + requireFeatures(t, tt.feature) + + // test if closing a created (but not started) uVM succeeds. + t.Run("Close_Created", func(t *testing.T) { + vm, cleanup := testuvm.Create(ctx, t, tt.createOpts(ctx, t)) + + testuvm.Close(ctx, t, vm) + + // also run cleanup to make sure that works fine too + cleanup(ctx) + }) + + // test if waiting after creating (but not starting) a uVM times out. + t.Run("Wait_Created", func(t *testing.T) { + vm, cleanup := testuvm.Create(ctx, t, tt.createOpts(ctx, t)) + t.Cleanup(func() { cleanup(ctx) }) + + // arbitrary timeout + timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Second) + t.Cleanup(cancel) + switch err := vm.WaitCtx(timeoutCtx); { + case err == nil: + t.Fatal("wait did not error") + case !errors.Is(err, context.DeadlineExceeded): + t.Fatalf("wait should have errored with '%v'; got '%v'", context.DeadlineExceeded, err) + } + }) + }) + } +} diff --git a/test/functional/uvm_update_test.go b/test/functional/uvm_update_test.go index 7060cd12d9..d36731982b 100644 --- a/test/functional/uvm_update_test.go +++ b/test/functional/uvm_update_test.go @@ -14,6 +14,7 @@ import ( "github.com/Microsoft/hcsshim/osversion" "github.com/Microsoft/hcsshim/pkg/ctrdtaskapi" + "github.com/Microsoft/hcsshim/test/internal/util" "github.com/Microsoft/hcsshim/test/pkg/require" "github.com/Microsoft/hcsshim/test/pkg/uvm" ) @@ -22,6 +23,8 @@ func TestLCOW_Update_Resources(t *testing.T) { requireFeatures(t, featureLCOW, featureUVM) require.Build(t, osversion.RS5) + ctx := util.Context(context.Background(), t) + for _, config := range []struct { name string resource interface{} @@ -54,7 +57,6 @@ func TestLCOW_Update_Resources(t *testing.T) { }, } { t.Run(config.name, func(t *testing.T) { - ctx := context.Background() vm, cleanup := uvm.CreateLCOW(ctx, t, defaultLCOWOptions(ctx, t)) uvm.Start(ctx, t, vm) defer cleanup(ctx) diff --git a/test/functional/uvm_vsmb_test.go b/test/functional/uvm_vsmb_test.go index f869aa7c82..7914d666cd 100644 --- a/test/functional/uvm_vsmb_test.go +++ b/test/functional/uvm_vsmb_test.go @@ -6,80 +6,166 @@ package functional import ( "context" "errors" + "fmt" + "os" + "path/filepath" "testing" "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/internal/util" "github.com/Microsoft/hcsshim/test/pkg/require" testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" ) -// TestVSMB tests adding/removing VSMB layers from a v2 Windows utility VM. -func TestVSMB(t *testing.T) { - t.Skip("not yet updated") +// TODO: vSMB benchmarks +// TODO: re-add a removed directmapped vSMB share +// TODO: add vSMB to created-but-not-started (or closed) uVM +// TestVSMB_WCOW tests adding/removing VSMB layers from a v2 Windows utility VM. +func TestVSMB_WCOW(t *testing.T) { require.Build(t, osversion.RS5) requireFeatures(t, featureWCOW, featureUVM, featureVSMB) - //nolint:staticcheck // SA1019: deprecated; will be replaced when test is updated - uvm, _, _ := testuvm.CreateWCOWUVM(context.Background(), t, t.Name(), "microsoft/nanoserver") - defer uvm.Close() - - dir := t.TempDir() - var iterations uint32 = 64 - options := uvm.DefaultVSMBOptions(true) - options.TakeBackupPrivilege = true - for i := 0; i < int(iterations); i++ { - if _, err := uvm.AddVSMB(context.Background(), dir, options); err != nil { - t.Fatalf("AddVSMB failed: %s", err) - } - } + ctx := util.Context(namespacedContext(context.Background()), t) - // Remove them all - for i := 0; i < int(iterations); i++ { - if err := uvm.RemoveVSMB(context.Background(), dir, true); err != nil { - t.Fatalf("RemoveVSMB failed: %s", err) + type testCase struct { + name string + backupPriv bool + readOnly bool + noDirectMap bool + } + tests := make([]testCase, 0, 8) + for _, ro := range []bool{true, false} { + for _, backup := range []bool{true, false} { + for _, noDirectMap := range []bool{true, false} { + n := "RW" + if ro { + n = "RO" + } + if backup { + n += "-backup" + } + if noDirectMap { + n += "-noDirectMap" + } + + tests = append(tests, testCase{ + name: n, + backupPriv: backup, + readOnly: ro, + noDirectMap: noDirectMap, + }) + } } } -} -// TODO: VSMB for mapped directories - -func TestVSMB_Writable(t *testing.T) { - t.Skip("not yet updated") - - require.Build(t, osversion.RS5) - requireFeatures(t, featureWCOW, featureUVM, featureVSMB) - - opts := uvm.NewDefaultOptionsWCOW(t.Name(), "") - opts.NoWritableFileShares = true - //nolint:staticcheck // SA1019: deprecated; will be replaced when test is updated - vm, _, _ := testuvm.CreateWCOWUVMFromOptsWithImage(context.Background(), t, opts, "microsoft/nanoserver") - defer vm.Close() - - dir := t.TempDir() - options := vm.DefaultVSMBOptions(true) - options.TakeBackupPrivilege = true - options.ReadOnly = false - _, err := vm.AddVSMB(context.Background(), dir, options) - defer func() { - if err == nil { - return - } - if err = vm.RemoveVSMB(context.Background(), dir, true); err != nil { - t.Fatalf("RemoveVSMB failed: %s", err) + const iterations = 64 + for _, tt := range tests { + for _, newDir := range []bool{true, false} { + name := tt.name + if newDir { + name += "-newDir" + } + + t.Run("dir-"+name, func(t *testing.T) { + // create a temp directory before creating the uVM, so the uVM will be closed before + // temp dir's cleanup + dir := t.TempDir() + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + options := vm.DefaultVSMBOptions(tt.readOnly) + options.TakeBackupPrivilege = tt.backupPriv + options.NoDirectmap = tt.noDirectMap + t.Logf("vSMB options: %#+v", options) + + var path string + var err error + for i := 0; i < iterations; i++ { + if i == 0 || newDir { + // create a temp directory on the first iteration, or on each subsequent iteration if [testCase.newDir] + // don't need to remove it, since `dir` will be removed whole-sale during test cleanup + if path, err = os.MkdirTemp(dir, ""); err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + } + + opts := *options // create a copy in case its (accidentally) modified + s := testuvm.AddVSMB(ctx, t, vm, path, &opts) + if path != s.HostPath { + t.Fatalf("expected vSMB path: %q; got %q", path, s.HostPath) + } + } + }) + + t.Run("file-"+name, func(t *testing.T) { + // create a temp directory before creating the uVM, so the uVM will be closed before + // temp dir's cleanup + dir := t.TempDir() + vm := testuvm.CreateAndStart(ctx, t, defaultWCOWOptions(ctx, t)) + + options := vm.DefaultVSMBOptions(tt.readOnly) + options.TakeBackupPrivilege = tt.backupPriv + options.NoDirectmap = tt.noDirectMap + t.Logf("vSMB options: %#+v", options) + + var path string + var err error + for i := 0; i < iterations; i++ { + if i == 0 || newDir { + // create a temp directory on the first iteration, or on each subsequent iteration if [testCase.newDir] + // don't need to remove it, since `dir` will be removed whole-sale during test cleanup + if path, err = os.MkdirTemp(dir, ""); err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + } + f := filepath.Join(path, fmt.Sprintf("f%d.txt", i)) + if err := os.WriteFile(f, []byte(t.Name()), 0600); err != nil { + t.Fatal(err) + } + + opts := *options // create a copy in case its (accidentally) modified + s := testuvm.AddVSMB(ctx, t, vm, f, &opts) + if path != s.HostPath { + t.Fatalf("expected vSMB path: %q; got %q", path, s.HostPath) + } + } + }) } - }() - - if !errors.Is(err, hcs.ErrOperationDenied) { - t.Fatalf("AddVSMB should have failed with %v instead of: %v", hcs.ErrOperationDenied, err) } - options.ReadOnly = true - _, err = vm.AddVSMB(context.Background(), dir, options) - if err != nil { - t.Fatalf("AddVSMB failed: %s", err) - } + t.Run("NoWritableFileShares", func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // create a temp directory before creating the uVM, so the uVM will be closed before + // temp dir's cleanup + dir := t.TempDir() + + opts := defaultWCOWOptions(ctx, t) + opts.NoWritableFileShares = true + vm := testuvm.CreateAndStart(ctx, t, opts) + + options := vm.DefaultVSMBOptions(tt.readOnly) + options.TakeBackupPrivilege = tt.backupPriv + options.NoDirectmap = tt.noDirectMap + t.Logf("vSMB options: %#+v", options) + + s, err := vm.AddVSMB(ctx, dir, options) + + t.Cleanup(func() { + if err != nil { + return + } + if err = vm.RemoveVSMB(ctx, s.HostPath, tt.readOnly); err != nil { + t.Fatalf("failed to remove vSMB share: %v", err) + } + }) + + if !tt.readOnly && !errors.Is(err, hcs.ErrOperationDenied) { + t.Fatalf("AddVSMB should have failed with %v instead of: %v", hcs.ErrOperationDenied, err) + } + }) + } + }) } diff --git a/test/functional/wcow_test.go b/test/functional/wcow_uvm_test.go similarity index 100% rename from test/functional/wcow_test.go rename to test/functional/wcow_uvm_test.go diff --git a/test/go.mod b/test/go.mod index e1e10e2b82..ce71b12fb0 100644 --- a/test/go.mod +++ b/test/go.mod @@ -11,6 +11,7 @@ require ( github.com/containerd/log v0.1.0 github.com/containerd/ttrpc v1.2.2 github.com/containerd/typeurl/v2 v2.1.1 + github.com/google/go-cmp v0.5.9 github.com/google/go-containerregistry v0.16.1 github.com/kevpar/cri v1.11.1-0.20220302210600-4c5c347230b2 github.com/opencontainers/go-digest v1.0.0 @@ -65,7 +66,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/klauspost/compress v1.16.5 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect diff --git a/test/internal/cmd/cmd.go b/test/internal/cmd/cmd.go index 8e61478919..2db5da5d9e 100644 --- a/test/internal/cmd/cmd.go +++ b/test/internal/cmd/cmd.go @@ -1,5 +1,6 @@ //go:build windows +// This package provides testing wrappers around [github.com/Microsoft/hcsshim/internal/cmd] package cmd import ( @@ -50,6 +51,8 @@ func Create(ctx context.Context, _ testing.TB, c cow.ProcessHost, p *specs.Proce func Start(_ context.Context, tb testing.TB, c *cmd.Cmd) { tb.Helper() + tb.Logf("starting command %q", desc(c)) + if err := c.Start(); err != nil { tb.Fatalf("failed to start %q: %v", desc(c), err) } diff --git a/test/internal/cmd/io.go b/test/internal/cmd/io.go index 462ee410e6..8a5d05d5da 100644 --- a/test/internal/cmd/io.go +++ b/test/internal/cmd/io.go @@ -5,8 +5,11 @@ package cmd import ( "bytes" "errors" + "strings" "testing" + "github.com/google/go-cmp/cmp" + "github.com/Microsoft/hcsshim/internal/cmd" ) @@ -38,12 +41,45 @@ func (b *BufferedIO) Output() (_ string, err error) { func (b *BufferedIO) TestOutput(tb testing.TB, out string, err error) { tb.Helper() - outGive, errGive := b.Output() - if !errors.Is(errGive, err) { - tb.Fatalf("got stderr: %v; wanted: %v", errGive, err) + outGot, errGot := b.Output() + if !errors.Is(errGot, err) { + tb.Fatalf("got stderr: %v; wanted: %v", errGot, err) + } + + out = strings.ToLower(strings.TrimSpace(out)) + outGot = strings.ToLower(strings.TrimSpace(outGot)) + if diff := cmp.Diff(out, outGot); diff != "" { + tb.Fatalf("stdout mismatch (-want +got):\n%s", diff) + } +} + +func (b *BufferedIO) TestStdOutContains(tb testing.TB, want, notWant []string) { + tb.Helper() + + outGot, err := b.Output() + if err != nil { + tb.Fatalf("got stderr: %v", err) } - if outGive != out { - tb.Fatalf("got stdout %q; wanted %q", outGive, out) + + tb.Logf("searching stdout for substrings\nstdout:\n%s\nwanted substrings:\n%q\nunwanted substrings:\n%q", outGot, want, notWant) + + outGot = strings.ToLower(outGot) + + for _, s := range want { + if !strings.Contains(outGot, strings.ToLower(s)) { + tb.Errorf("stdout does not contain substring:\n%s", s) + } + } + + for _, s := range notWant { + if strings.Contains(outGot, strings.ToLower(s)) { + tb.Errorf("stdout contains substring:\n%s", s) + } + } + + // FailNow() to match behavior of [TestOutput] + if tb.Failed() { + tb.FailNow() } } diff --git a/test/internal/container/container.go b/test/internal/container/container.go index 5246c257c5..8ed27f2b0c 100644 --- a/test/internal/container/container.go +++ b/test/internal/container/container.go @@ -94,7 +94,6 @@ func Create( tb.Errorf("could not close container %q: %v", c.ID(), err) } } - return c, r, f } diff --git a/test/internal/oci/oci.go b/test/internal/oci/oci.go index 197bf94cf0..e82edd02d0 100644 --- a/test/internal/oci/oci.go +++ b/test/internal/oci/oci.go @@ -12,6 +12,8 @@ import ( criopts "github.com/containerd/containerd/pkg/cri/opts" "github.com/opencontainers/runtime-spec/specs-go" + "github.com/Microsoft/hcsshim/pkg/annotations" + "github.com/Microsoft/hcsshim/test/pkg/images" ) @@ -21,6 +23,7 @@ import ( const ( TailNullArgs = "tail -f /dev/null" + PingSelfCmd = "cmd.exe /c ping -t 127.0.0.1" DefaultNamespace = namespaces.Default CRINamespace = criconstants.K8sContainerdNamespace @@ -42,6 +45,22 @@ func DefaultLinuxSpecOpts(nns string, extra ...ctrdoci.SpecOpts) []ctrdoci.SpecO return append(opts, extra...) } +func DefaultWindowsSpecOpts(nns string, extra ...ctrdoci.SpecOpts) []ctrdoci.SpecOpts { + opts := []ctrdoci.SpecOpts{ + // make sure we set the Windows field + func(_ context.Context, _ ctrdoci.Client, _ *containers.Container, s *specs.Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + return nil + }, + criopts.WithoutRoot, + ctrdoci.WithProcessCwd(`C:\`), + ctrdoci.WithWindowsNetworkNamespace(nns), + } + return append(opts, extra...) +} + // DefaultLinuxSpec returns a default OCI spec for a Linux container. // // See [CreateSpecWithPlatform] for more details. @@ -95,3 +114,26 @@ func WithWindowsLayerFolders(layers []string) ctrdoci.SpecOpts { return nil } } + +// AsHostProcessContainer updates the spec to create a HostProcess container. +func AsHostProcessContainer() ctrdoci.SpecOpts { + return func(_ context.Context, _ ctrdoci.Client, _ *containers.Container, s *specs.Spec) error { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + s.Annotations[annotations.HostProcessContainer] = "true" + return nil + } +} + +// HostProcessInheritUser updates the spec to allow the HostProcess container to inherit the current +// user's token. +func HostProcessInheritUser() ctrdoci.SpecOpts { + return func(_ context.Context, _ ctrdoci.Client, _ *containers.Container, s *specs.Spec) error { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + s.Annotations[annotations.HostProcessInheritUser] = "true" + return nil + } +} diff --git a/test/internal/util/util.go b/test/internal/util/util.go index d2634f2d5e..7fcf7f5341 100644 --- a/test/internal/util/util.go +++ b/test/internal/util/util.go @@ -1,6 +1,7 @@ package util import ( + "context" "crypto/rand" "encoding/hex" "flag" @@ -9,6 +10,7 @@ import ( "runtime" "strconv" "strings" + "testing" "time" "unicode" @@ -150,3 +152,48 @@ func repeat(f func() error, n int, d time.Duration) (err error) { return err } + +// Context creates a [context.Context] that uses the testing.Deadline minus a small grace period (if applicable) +// and the cancellation to the testing cleanup. +// +// Based heavily on (copied directly from): Go lang's src/internal/testenv/Command.Context +// https://cs.opensource.google/go/go/+/master:src/internal/testenv/exec.go;l=133;drc=5613882df7555484680ecabc0462b7c23c6f5205 +func Context(ctx context.Context, tb testing.TB) context.Context { + tb.Helper() + + var ( + cancelCtx context.CancelFunc + gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging) + ) + + if t, ok := tb.(interface { + testing.TB + Deadline() (time.Time, bool) + }); ok { + if td, ok := t.Deadline(); ok { + // Start with a minimum grace period, to allow cleanup before testing is stopped + gracePeriod = 100 * time.Millisecond + + // If time allows, increase the termination grace period to 5% of the + // test's remaining time. + testTimeout := time.Until(td) + if gp := testTimeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + timeout := testTimeout - 2*gracePeriod + if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > timeout { + // Either ctx doesn't have a deadline, or its deadline would expire + // after (or too close before) the test has already timed out. + // Add a shorter timeout so that the test will produce useful output. + ctx, cancelCtx = context.WithTimeout(ctx, timeout) + } + } + } + + if cancelCtx != nil { + tb.Cleanup(cancelCtx) + } + + return ctx +} diff --git a/test/pkg/uvm/uvm.go b/test/pkg/uvm/uvm.go index 2cd0a36d51..817679a7d4 100644 --- a/test/pkg/uvm/uvm.go +++ b/test/pkg/uvm/uvm.go @@ -25,6 +25,38 @@ func newCleanupFn(_ context.Context, tb testing.TB, vm *uvm.UtilityVM) CleanupFn } } +// TODO: create interface in "internal/uvm" that both [OptionsLCOW] and [OptionsWCOW] implement +// +// can't use generic interface { OptionsLCOW | OptionsWCOW } since that is a type constraint and requires +// making all calls generic as well. + +// Create creates a utility VM with the passed opts. +func Create(ctx context.Context, tb testing.TB, opts any) (*uvm.UtilityVM, CleanupFn) { + tb.Helper() + + switch opts := opts.(type) { + case *uvm.OptionsLCOW: + return CreateLCOW(ctx, tb, opts) + case *uvm.OptionsWCOW: + return CreateWCOW(ctx, tb, opts) + } + tb.Fatalf("unknown uVM creation options: %T", opts) + return nil, nil +} + +// CreateAndStartWCOWFromOpts creates a utility VM with the specified options. +// +// The cleanup function will be added to `tb.Cleanup`. +func CreateAndStart(ctx context.Context, tb testing.TB, opts any) *uvm.UtilityVM { + tb.Helper() + + vm, cleanup := Create(ctx, tb, opts) + Start(ctx, tb, vm) + tb.Cleanup(func() { cleanup(ctx) }) + + return vm +} + func Start(ctx context.Context, tb testing.TB, vm *uvm.UtilityVM) { tb.Helper() err := vm.Start(ctx) diff --git a/test/pkg/uvm/wcow.go b/test/pkg/uvm/wcow.go index bba48a8b84..02f9cabe09 100644 --- a/test/pkg/uvm/wcow.go +++ b/test/pkg/uvm/wcow.go @@ -92,9 +92,8 @@ func AddVSMB(ctx context.Context, tb testing.TB, vm *uvm.UtilityVM, path string, tb.Fatalf("failed to add vSMB share: %v", err) } - ro := options.ReadOnly tb.Cleanup(func() { - if err := vm.RemoveVSMB(ctx, s.HostPath, ro); err != nil { + if err := s.Release(ctx); err != nil { tb.Fatalf("failed to remove vSMB share: %v", err) } })