Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Volumemgr support for kubevirt eve #3768

Merged
merged 1 commit into from
Mar 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/pillar/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ ADD scripts/device-steps.sh \
scripts/handlezedserverconfig.sh \
scripts/veth.sh \
scripts/dhcpcd.sh \
scripts/copy-image-to-qcow.sh \
/out/opt/zededa/bin/
ADD conf/lisp.config.base /out/var/tmp/zededa/lisp.config.base

Expand Down
2 changes: 1 addition & 1 deletion pkg/pillar/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ test: build-docker-test
--entrypoint /final/opt/gotestsum $(DOCKER_TAG) \
--jsonfile /pillar/results.json \
--junitfile /pillar/results.xml \
--raw-command -- go test -coverprofile=coverage.txt -covermode=atomic -race -json \
--raw-command -- go test -tags kubevirt -coverprofile=coverage.txt -covermode=atomic -race -json \
Copy link
Contributor

@milan-zededa milan-zededa Mar 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a unit test that failing without the kubevirt tag?
If so, we should put it under //go:build kubevirt

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not that I know of. This code is given by @christoph-zededa I just used it.

Copy link
Contributor

@christoph-zededa christoph-zededa Mar 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no go test that is failing without the kubevirt tag.

There are two reasons I've put it there:

  • someone might add test for code that is under the kubevirt build flag
  • make sure that the current tests are still successful even with the kubevirt build flag set

./...
docker run --platform linux/$(ZARCH) -w /pillar \
--entrypoint /bin/sh $(DOCKER_TAG) \
Expand Down
125 changes: 118 additions & 7 deletions pkg/pillar/cas/containerd.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"

Expand All @@ -18,6 +19,7 @@ import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/mount"
"github.com/lf-edge/edge-containers/pkg/resolver"
"github.com/lf-edge/eve/pkg/pillar/base"
"github.com/lf-edge/eve/pkg/pillar/containerd"
"github.com/lf-edge/eve/pkg/pillar/types"
"github.com/moby/sys/mountinfo"
Expand Down Expand Up @@ -679,13 +681,6 @@ func (c *containerdCAS) PrepareContainerRootDir(rootPath, reference, rootBlobSha
logrus.Errorf(err.Error())
return err
}
mountpoints := clientImageSpec.Config.Volumes
execpath := clientImageSpec.Config.Entrypoint
cmd := clientImageSpec.Config.Cmd
workdir := clientImageSpec.Config.WorkingDir
unProcessedEnv := clientImageSpec.Config.Env
logrus.Infof("PrepareContainerRootDir: mountPoints %+v execpath %+v cmd %+v workdir %+v env %+v",
mountpoints, execpath, cmd, workdir, unProcessedEnv)
clientImageSpecJSON, err := getJSON(clientImageSpec)
if err != nil {
err = fmt.Errorf("PrepareContainerRootDir: Could not build json of image: %v. %v",
Expand All @@ -705,6 +700,122 @@ func (c *containerdCAS) PrepareContainerRootDir(rootPath, reference, rootBlobSha
logrus.Errorf(err.Error())
return err
}
if base.IsHVTypeKube() {
err := c.prepareContainerRootDirForKubevirt(clientImageSpec, snapshotID, rootPath)
if err != nil {
return err
}
}
return nil
}

func (c *containerdCAS) prepareContainerRootDirForKubevirt(clientImageSpec *spec.Image, snapshotID string, rootPath string) error {

// On kubevirt eve we also write the mountpoints and cmdline files to the rootPath.
// Once rootPath is populated with all necessary files, this rootPath directory will be
// converted to a PVC and passed in to domainmgr to attach to VM as rootdisk
// NOTE: For kvm eve these files are generated and passed to bootloader in pkg/pillar/containerd/oci.go:AddLoader()
eriknordmark marked this conversation as resolved.
Show resolved Hide resolved

mountpoints := clientImageSpec.Config.Volumes
execpath := clientImageSpec.Config.Entrypoint
cmd := clientImageSpec.Config.Cmd
workdir := clientImageSpec.Config.WorkingDir
unProcessedEnv := clientImageSpec.Config.Env
user := clientImageSpec.Config.User
logrus.Infof("PrepareContainerRootDir: mountPoints %+v execpath %+v cmd %+v workdir %+v env %+v user %+v",
mountpoints, execpath, cmd, workdir, unProcessedEnv, user)

// Mount the snapshot
// Do not unmount the rootfs in this code path, we need this containerdir to generate a qcow2->PVC from it in
// pkg/pillar/volumehandlers/csihandler.go
// Once qcow2 is successfully created, this directory will be unmounted in csihandler.go
if err := c.MountSnapshot(snapshotID, GetRoofFsPath(rootPath)); err != nil {
return fmt.Errorf("PrepareContainerRootDir error mount of snapshot: %s. %s", snapshotID, err)
}

err := c.writeKubevirtMountpointsFile(mountpoints, rootPath)
if err != nil {
return err
}

// create env manifest
envContent := ""
if workdir != "" {
envContent = fmt.Sprintf("export WORKDIR=\"%s\"\n", workdir)
} else {
envContent = fmt.Sprintf("export WORKDIR=/\n")
}
for _, e := range unProcessedEnv {
keyAndValueSlice := strings.SplitN(e, "=", 2)
if len(keyAndValueSlice) == 2 {
//handles Key=Value case
envContent = envContent + fmt.Sprintf("export %s=\"%s\"\n", keyAndValueSlice[0], keyAndValueSlice[1])
} else {
//handles Key= case
envContent = envContent + fmt.Sprintf("export %s\n", e)
}
}
if err := os.WriteFile(filepath.Join(rootPath, "environment"), []byte(envContent), 0644); err != nil {
return err
}

// create cmdline manifest
// each item needs to be independently quoted for initrd
execpathQuoted := make([]string, 0)

// This loop is just to pick the execpath and eliminate any square braces around it.
// Just pick /entrypoint.sh from [/entrypoint.sh]
for _, c := range execpath {
execpathQuoted = append(execpathQuoted, fmt.Sprintf("\"%s\"", c))
}
for _, s := range cmd {
execpathQuoted = append(execpathQuoted, fmt.Sprintf("\"%s\"", s))
}
command := strings.Join(execpathQuoted, " ")
if err := os.WriteFile(filepath.Join(rootPath, "cmdline"),
[]byte(command), 0644); err != nil {
return err
}

// Userid and GID are same
ug := "0 0"
if user != "" {
uid, converr := strconv.Atoi(user)
if converr != nil {
return converr
}
ug = fmt.Sprintf("%d %d", uid, uid)
}
if err := os.WriteFile(filepath.Join(rootPath, "ug"),
[]byte(ug), 0644); err != nil {
return err
}

err = os.MkdirAll(filepath.Join(rootPath, "modules"), 0600)
return err
}

func (*containerdCAS) writeKubevirtMountpointsFile(mountpoints map[string]struct{}, rootPath string) error {
if len(mountpoints) > 0 {
f, err := os.OpenFile(filepath.Join(rootPath, "mountPoints"), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
err = fmt.Errorf("PrepareContainerRootDir: Exception while creating file mountpoints to %v %v",
rootPath, err)
logrus.Error(err.Error())
return err
}

defer f.Close()
for m := range mountpoints {
m += "\n"
if _, err := f.WriteString(m); err != nil {
err = fmt.Errorf("PrepareContainerRootDir: Exception while writing mountpoints to %v %v",
rootPath, err)
logrus.Error(err.Error())
return err
}
}
}
return nil
}

Expand Down
43 changes: 43 additions & 0 deletions pkg/pillar/cas/containerd_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package cas
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This file is missing Copyright and license lines.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done


import (
"os"
"path/filepath"
"strings"
"testing"
)

func TestWriteKubevirtMountpointsFile(t *testing.T) {
cas := containerdCAS{}
mountPoints := map[string]struct{}{
"/media/floppy": {},
"/media/cdrom": {},
}

dname, err := os.MkdirTemp("", "prefix")

if err != nil {
t.Fatal(err)
}

err = cas.writeKubevirtMountpointsFile(mountPoints, dname)
if err != nil {
t.Fatal(err)
}

contentBytes, err := os.ReadFile(filepath.Join(dname, "mountPoints"))

if err != nil {
t.Fatal(err)
}

content := string(contentBytes)

for mountPoint := range mountPoints {
if !strings.Contains(content, mountPoint) {
t.Fatalf("mountPoint %s is missing", mountPoint)
}
}

os.RemoveAll(filepath.Join(dname, "mountPoints"))
}
5 changes: 4 additions & 1 deletion pkg/pillar/cmd/volumemgr/handlecontent.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (

v1types "github.com/google/go-containerregistry/pkg/v1/types"
zconfig "github.com/lf-edge/eve-api/go/config"
"github.com/lf-edge/eve/pkg/pillar/base"
"github.com/lf-edge/eve/pkg/pillar/types"
"github.com/lf-edge/eve/pkg/pillar/utils"
"github.com/lf-edge/eve/pkg/pillar/vault"
Expand Down Expand Up @@ -182,6 +183,7 @@ func createContentTreeStatus(ctx *volumemgrContext, config types.ContentTreeConf
DisplayName: config.DisplayName,
State: types.INITIAL,
Blobs: []string{},
HVTypeKube: base.IsHVTypeKube(),
// LastRefCountChangeTime: time.Now(),
}
populateDatastoreFields(ctx, config, status)
Expand Down Expand Up @@ -325,7 +327,8 @@ func doDeleteContentTree(ctx *volumemgrContext, status *types.ContentTreeStatus)
log.Functionf("doDeleteContentTree for %v", status.ContentID)
RemoveAllBlobsFromContentTreeStatus(ctx, status, status.Blobs...)
//We create a reference when we load the blobs. We should remove that reference when we delete the contentTree.
if err := ctx.casClient.RemoveImage(status.ReferenceID()); err != nil {
refName := status.ReferenceID()
if err := ctx.casClient.RemoveImage(refName); err != nil {
log.Errorf("doDeleteContentTree: exception while deleting image %s: %s",
status.RelativeURL, err.Error())
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/pillar/cmd/volumemgr/handlevolumeref.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func handleVolumeRefCreate(ctxArg interface{}, key string,
VerifyOnly: config.VerifyOnly,
Target: vs.Target,
CustomMeta: vs.CustomMeta,
ReferenceName: vs.ReferenceName,
}
if vs.HasError() {
description := vs.ErrorDescription
Expand Down Expand Up @@ -187,6 +188,7 @@ func updateVolumeRefStatus(ctx *volumemgrContext, vs *types.VolumeStatus) {
status.Target = vs.Target
status.CustomMeta = vs.CustomMeta
status.WWN = vs.WWN
status.ReferenceName = vs.ReferenceName
if vs.HasError() {
description := vs.ErrorDescription
description.ErrorEntities = []*types.ErrorEntity{{
Expand Down Expand Up @@ -215,6 +217,7 @@ func updateVolumeRefStatus(ctx *volumemgrContext, vs *types.VolumeStatus) {
WWN: vs.WWN,
VerifyOnly: config.VerifyOnly,
Target: vs.Target,
ReferenceName: vs.ReferenceName,
}
if vs.HasError() {
description := vs.ErrorDescription
Expand Down
4 changes: 3 additions & 1 deletion pkg/pillar/cmd/volumemgr/handlework.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,8 +235,10 @@ func casIngestWorker(ctxPtr interface{}, w worker.Work) worker.WorkResult {
}
}

appImgName := status.ReferenceID()

// load the blobs
loadedBlobs, err := ctx.casClient.IngestBlobsAndCreateImage(status.ReferenceID(), *root, loadBlobs...)
loadedBlobs, err := ctx.casClient.IngestBlobsAndCreateImage(appImgName, *root, loadBlobs...)
// loadedBlobs are BlobStatus for the ones we loaded
for _, blob := range loadedBlobs {
d.loaded = append(d.loaded, blob.Sha256)
Expand Down
55 changes: 55 additions & 0 deletions pkg/pillar/cmd/volumemgr/initialvolumestatus.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"strings"

zconfig "github.com/lf-edge/eve-api/go/config"
"github.com/lf-edge/eve/pkg/pillar/kubeapi"
"github.com/lf-edge/eve/pkg/pillar/types"
"github.com/lf-edge/eve/pkg/pillar/volumehandlers"
"github.com/lf-edge/eve/pkg/pillar/zfs"
Expand Down Expand Up @@ -66,6 +67,28 @@ func populateExistingVolumesFormatDatasets(_ *volumemgrContext, dataset string)
log.Functionf("populateExistingVolumesFormatDatasets(%s) Done", dataset)
}

// populateExistingVolumesFormatPVC iterates over the namespace and takes format
// from the name of the volume/PVC and prepares map of it
func populateExistingVolumesFormatPVC(_ *volumemgrContext) {

log.Functionf("populateExistingVolumesFormatPVC")
pvlist, err := kubeapi.GetPVCList(log)
if err != nil {
log.Errorf("populateExistingVolumesFormatPVC: GetPVCList failed: %v", err)
return
}
for _, pvcName := range pvlist {
tempStatus, err := getVolumeStatusByPVC(pvcName)
if err != nil {
log.Error(err)
continue
}
volumeFormat[tempStatus.Key()] = tempStatus.ContentFormat
}
log.Functionf("populateExistingVolumesFormatPVC Done")

}

// Periodic garbage collection looking at RefCount=0 files in the unknown
// Others have their delete handler.
func gcObjects(ctx *volumemgrContext, dirName string) {
Expand Down Expand Up @@ -117,6 +140,38 @@ func gcVolumes(ctx *volumemgrContext, locations []string) {
}
}

func getVolumeStatusByPVC(pvcName string) (*types.VolumeStatus, error) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about adding a comment that this is the reverse of the PVCName function you added? That would be helpful for a reader.

var encrypted bool
var parsedFormat int32
var volumeIDAndGeneration string

volumeIDAndGeneration = pvcName
parsedFormat = int32(zconfig.Format_PVC)

generation := strings.Split(volumeIDAndGeneration, "-pvc-")
volUUID, err := uuid.FromString(generation[0])
if err != nil {
return nil, fmt.Errorf("failed to parse VolumeID: %s", err)
}
if len(generation) == 1 {
return nil, fmt.Errorf("cannot extract generation from PVC %s", pvcName)
}
// we cannot extract LocalGenerationCounter from the PVC name
// assume it is zero
generationCounter, err := strconv.ParseInt(generation[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse GenerationCounter: %s", err)
}
vs := types.VolumeStatus{
VolumeID: volUUID,
Encrypted: encrypted,
GenerationCounter: generationCounter,
ContentFormat: zconfig.Format(parsedFormat),
FileLocation: pvcName,
}
return &vs, nil
}

func getVolumeStatusByLocation(location string) (*types.VolumeStatus, error) {
var encrypted bool
var parsedFormat int32
Expand Down
10 changes: 6 additions & 4 deletions pkg/pillar/cmd/volumemgr/updatestatus.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,11 +434,12 @@ func doUpdateContentTree(ctx *volumemgrContext, status *types.ContentTreeStatus)
log.Functionf("doUpdateContentTree(%s) successfully loaded all blobs into CAS", status.Key())

// check if the image was created
if !lookupImageCAS(status.ReferenceID(), ctx.casClient) {
imgName := status.ReferenceID()
if !lookupImageCAS(imgName, ctx.casClient) {
log.Functionf("doUpdateContentTree(%s): image does not yet exist in CAS", status.Key())
return changed, false
}
log.Functionf("doUpdateContentTree(%s): image exists in CAS, Content Tree load is completely LOADED", status.Key())
log.Functionf("doUpdateContentTree(%s): image %v exists in CAS, Content Tree load is completely LOADED", status.Key(), imgName)
status.State = types.LOADED
status.CreateTime = time.Now()
// ContentTreeStatus.FileLocation has no meaning once everything is loaded
Expand Down Expand Up @@ -592,7 +593,8 @@ func doUpdateVol(ctx *volumemgrContext, status *types.VolumeStatus) (bool, bool)
status.State != types.CREATING_VOLUME &&
status.SubState == types.VolumeSubStateInitial {

_, err := ctx.casClient.GetImageHash(ctStatus.ReferenceID())
imgName := ctStatus.ReferenceID()
_, err := ctx.casClient.GetImageHash(imgName)
if err != nil {
log.Functionf("doUpdateVol(%s): waiting for image create: %s", status.Key(), err.Error())
return changed, false
Expand All @@ -614,7 +616,7 @@ func doUpdateVol(ctx *volumemgrContext, status *types.VolumeStatus) (bool, bool)
status.Key(), status.DisplayName)
return changed, false
}
status.ReferenceName = ctStatus.ReferenceID()
status.ReferenceName = imgName
eriknordmark marked this conversation as resolved.
Show resolved Hide resolved
status.ContentFormat = ctStatus.Format
log.Noticef("doUpdateVol(%s): setting VolumeStatus.ContentFormat by ContentTree to %s", status.Key(), status.ContentFormat)
changed = true
Expand Down
Loading
Loading