Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reference containerDisks and kernel boot images in reproducible format during migrations #6535

Merged
merged 4 commits into from
Oct 22, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
96 changes: 82 additions & 14 deletions pkg/container-disk/container-disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"

"kubevirt.io/client-go/log"

Expand Down Expand Up @@ -52,6 +54,8 @@ const KernelBootVolumeName = KernelBootName + "-volume"

const ephemeralStorageOverheadSize = "50M"

var digestRegex = regexp.MustCompile(`sha256:([a-zA-Z0-9]+)`)

func GetLegacyVolumeMountDirOnHost(vmi *v1.VirtualMachineInstance) string {
return filepath.Join(mountBaseDir, string(vmi.UID))
}
Expand Down Expand Up @@ -204,23 +208,23 @@ func GetImage(root string, imagePath string) (string, error) {
return imagePath, nil
}

func GenerateInitContainers(vmi *v1.VirtualMachineInstance, podVolumeName string, binVolumeName string) []kubev1.Container {
return generateContainersHelper(vmi, podVolumeName, binVolumeName, true)
func GenerateInitContainers(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName string, binVolumeName string) []kubev1.Container {
return generateContainersHelper(vmi, imageIDs, podVolumeName, binVolumeName, true)
}

func GenerateContainers(vmi *v1.VirtualMachineInstance, podVolumeName string, binVolumeName string) []kubev1.Container {
return generateContainersHelper(vmi, podVolumeName, binVolumeName, false)
func GenerateContainers(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName string, binVolumeName string) []kubev1.Container {
return generateContainersHelper(vmi, imageIDs, podVolumeName, binVolumeName, false)
}

func GenerateKernelBootContainer(vmi *v1.VirtualMachineInstance, podVolumeName string, binVolumeName string) *kubev1.Container {
return generateKernelBootContainerHelper(vmi, podVolumeName, binVolumeName, false)
func GenerateKernelBootContainer(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName string, binVolumeName string) *kubev1.Container {
return generateKernelBootContainerHelper(vmi, imageIDs, podVolumeName, binVolumeName, false)
}

func GenerateKernelBootInitContainer(vmi *v1.VirtualMachineInstance, podVolumeName string, binVolumeName string) *kubev1.Container {
return generateKernelBootContainerHelper(vmi, podVolumeName, binVolumeName, true)
func GenerateKernelBootInitContainer(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName string, binVolumeName string) *kubev1.Container {
return generateKernelBootContainerHelper(vmi, imageIDs, podVolumeName, binVolumeName, true)
}

func generateKernelBootContainerHelper(vmi *v1.VirtualMachineInstance, podVolumeName string, binVolumeName string, isInit bool) *kubev1.Container {
func generateKernelBootContainerHelper(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName string, binVolumeName string, isInit bool) *kubev1.Container {
if !util.HasKernelBootContainerImage(vmi) {
return nil
}
Expand All @@ -240,34 +244,38 @@ func generateKernelBootContainerHelper(vmi *v1.VirtualMachineInstance, podVolume
}

const fakeVolumeIdx = 0 // volume index makes no difference for kernel-boot container
return generateContainerFromVolume(vmi, podVolumeName, binVolumeName, isInit, true, &kernelBootVolume, fakeVolumeIdx)
return generateContainerFromVolume(vmi, imageIDs, podVolumeName, binVolumeName, isInit, true, &kernelBootVolume, fakeVolumeIdx)
}

// The controller uses this function to generate the container
// specs for hosting the container registry disks.
func generateContainersHelper(vmi *v1.VirtualMachineInstance, podVolumeName string, binVolumeName string, isInit bool) []kubev1.Container {
func generateContainersHelper(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName string, binVolumeName string, isInit bool) []kubev1.Container {
var containers []kubev1.Container

// Make VirtualMachineInstance Image Wrapper Containers
for index, volume := range vmi.Spec.Volumes {
if volume.Name == KernelBootVolumeName {
continue
}
if container := generateContainerFromVolume(vmi, podVolumeName, binVolumeName, isInit, false, &volume, index); container != nil {
if container := generateContainerFromVolume(vmi, imageIDs, podVolumeName, binVolumeName, isInit, false, &volume, index); container != nil {
containers = append(containers, *container)
}
}
return containers
}

func generateContainerFromVolume(vmi *v1.VirtualMachineInstance, podVolumeName, binVolumeName string, isInit, isKernelBoot bool, volume *v1.Volume, volumeIdx int) *kubev1.Container {
func generateContainerFromVolume(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, podVolumeName, binVolumeName string, isInit, isKernelBoot bool, volume *v1.Volume, volumeIdx int) *kubev1.Container {
if volume.ContainerDisk == nil {
return nil
}

volumeMountDir := GetVolumeMountDirOnGuest(vmi)
diskContainerName := fmt.Sprintf("volume%s", volume.Name)
diskContainerName := toContainerName(volume.Name)
diskContainerImage := volume.ContainerDisk.Image
if img, exists := imageIDs[volume.Name]; exists {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Optional: In ExtractImageIDsFromSourcePod the returned map is initialized with empty strings "" for each volume. Maybe it would be safer to check:

Suggested change
if img, exists := imageIDs[volume.Name]; exists {
if img, _ := imageIDs[volume.Name]; len(img) > 0 {

diskContainerImage = img
}

resources := kubev1.ResourceRequirements{}
resources.Limits = make(kubev1.ResourceList)
resources.Requests = make(kubev1.ResourceList)
Expand Down Expand Up @@ -360,3 +368,63 @@ func CreateEphemeralImages(
func getContainerDiskSocketBasePath(baseDir, podUID string) string {
return fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/container-disks", baseDir, podUID)
}

// ExtractImageIDsFromSourcePod takes the VMI and its source pod to determine the exact image used by containerdisks and boot container images,
// which is recorded in the status section of a started pod.
// It returns a map where the key is the vlume name and the value is the imageID
func ExtractImageIDsFromSourcePod(vmi *v1.VirtualMachineInstance, sourcePod *kubev1.Pod) (imageIDs map[string]string, err error) {
imageIDs = map[string]string{}
for _, volume := range vmi.Spec.Volumes {
if volume.ContainerDisk == nil {
continue
}
imageIDs[volume.Name] = ""
}

if util.HasKernelBootContainerImage(vmi) {
imageIDs[KernelBootVolumeName] = ""
}

for _, status := range sourcePod.Status.ContainerStatuses {
if !isImageVolume(status.Name) {
continue
}
key := toVolumeName(status.Name)
if _, exists := imageIDs[key]; !exists {
continue
}
imageID, err := toImageWithDigest(status.Image, status.ImageID)
if err != nil {
return nil, err
}
imageIDs[key] = imageID
}
return
}

func toImageWithDigest(image string, imageID string) (string, error) {
baseImage := image
if strings.LastIndex(image, "@sha256:") != -1 {
baseImage = strings.Split(image, "@sha256:")[0]
} else if colonIndex := strings.LastIndex(image, ":"); colonIndex > strings.LastIndex(image, "/") {
baseImage = image[:colonIndex]
}

digestMatches := digestRegex.FindStringSubmatch(imageID)
if len(digestMatches) < 2 {
return "", fmt.Errorf("failed to identify image digest for container %q with id %q", image, imageID)
}
return fmt.Sprintf("%s@sha256:%s", baseImage, digestMatches[1]), nil
}

func isImageVolume(containerName string) bool {
return strings.HasPrefix(containerName, "volume")
}

func toContainerName(volumeName string) string {
return fmt.Sprintf("volume%s", volumeName)
}

func toVolumeName(containerName string) string {
return strings.TrimPrefix(containerName, "volume")
}
129 changes: 126 additions & 3 deletions pkg/container-disk/container-disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"os"
"os/user"
"path/filepath"
"strings"

. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/extensions/table"
Expand Down Expand Up @@ -183,7 +184,7 @@ var _ = Describe("ContainerDisk", func() {
k8sv1.ResourceMemory: resource.MustParse("64M"),
},
}
containers := GenerateContainers(vmi, "libvirt-runtime", "/var/run/libvirt")
containers := GenerateContainers(vmi, nil, "libvirt-runtime", "/var/run/libvirt")

containerResourceSpecs := []k8sv1.ResourceList{containers[0].Resources.Limits, containers[0].Resources.Requests}

Expand All @@ -195,7 +196,7 @@ var _ = Describe("ContainerDisk", func() {

vmi := v1.NewMinimalVMI("fake-vmi")
appendContainerDisk(vmi, "r0")
containers := GenerateContainers(vmi, "libvirt-runtime", "/var/run/libvirt")
containers := GenerateContainers(vmi, nil, "libvirt-runtime", "/var/run/libvirt")

expectedEphemeralStorageRequest := resource.MustParse(ephemeralStorageOverheadSize)

Expand All @@ -212,7 +213,7 @@ var _ = Describe("ContainerDisk", func() {
vmi := v1.NewMinimalVMI("fake-vmi")
appendContainerDisk(vmi, "r1")
appendContainerDisk(vmi, "r0")
containers := GenerateContainers(vmi, "libvirt-runtime", "bin-volume")
containers := GenerateContainers(vmi, nil, "libvirt-runtime", "bin-volume")
Expect(err).ToNot(HaveOccurred())

Expect(len(containers)).To(Equal(2))
Expand Down Expand Up @@ -262,6 +263,89 @@ var _ = Describe("ContainerDisk", func() {
})
})
})

Context("should use the right containerID", func() {
It("for a new migration pod with two containerDisks", func() {
vmi := v1.NewMinimalVMI("myvmi")
appendContainerDisk(vmi, "disk1")
appendNonContainerDisk(vmi, "disk3")
appendContainerDisk(vmi, "disk2")

pod := createMigrationSourcePod(vmi)

imageIDs, err := ExtractImageIDsFromSourcePod(vmi, pod)
Expect(err).ToNot(HaveOccurred())
Expect(imageIDs).To(HaveKeyWithValue("disk1", "someimage@sha256:0"))
Expect(imageIDs).To(HaveKeyWithValue("disk2", "someimage@sha256:1"))
Expect(imageIDs).To(HaveLen(2))

newContainers := GenerateContainers(vmi, imageIDs, "a-name", "something")
Expect(newContainers[0].Image).To(Equal("someimage@sha256:0"))
Expect(newContainers[1].Image).To(Equal("someimage@sha256:1"))
})
It("for a new migration pod with a containerDisk and a kernel image", func() {
vmi := v1.NewMinimalVMI("myvmi")
appendContainerDisk(vmi, "disk1")
appendNonContainerDisk(vmi, "disk3")

vmi.Spec.Domain.Firmware = &v1.Firmware{KernelBoot: &v1.KernelBoot{Container: &v1.KernelBootContainer{Image: "someimage:v1.2.3.4"}}}

pod := createMigrationSourcePod(vmi)

imageIDs, err := ExtractImageIDsFromSourcePod(vmi, pod)
Expect(err).ToNot(HaveOccurred())
Expect(imageIDs).To(HaveKeyWithValue("disk1", "someimage@sha256:0"))
Expect(imageIDs).To(HaveKeyWithValue("kernel-boot-volume", "someimage@sha256:bootcontainer"))
Expect(imageIDs).To(HaveLen(2))

newContainers := GenerateContainers(vmi, imageIDs, "a-name", "something")
newBootContainer := GenerateKernelBootContainer(vmi, imageIDs, "a-name", "something")
newContainers = append(newContainers, *newBootContainer)
Expect(newContainers[0].Image).To(Equal("someimage@sha256:0"))
Expect(newContainers[1].Image).To(Equal("someimage@sha256:bootcontainer"))
})

It("should fail if it can't detect a reproducible imageID", func() {
vmi := v1.NewMinimalVMI("myvmi")
appendContainerDisk(vmi, "disk1")
pod := createMigrationSourcePod(vmi)
pod.Status.ContainerStatuses[0].ImageID = "rubish"
_, err := ExtractImageIDsFromSourcePod(vmi, pod)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(`failed to identify image digest for container "someimage:v1.2.3.4" with id "rubish"`))
})

table.DescribeTable("It should detect the image ID from", func(imageID string) {
expected := "myregistry.io/myimage@sha256:4gjffGJlg4"
res, err := toImageWithDigest("myregistry.io/myimage", imageID)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(expected))
res, err = toImageWithDigest("myregistry.io/myimage:1234", imageID)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(expected))
res, err = toImageWithDigest("myregistry.io/myimage:latest", imageID)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(expected))
},
table.Entry("docker", "docker://sha256:4gjffGJlg4"),
table.Entry("dontainerd", "sha256:4gjffGJlg4"),
table.Entry("cri-o", "myregistry/myimage@sha256:4gjffGJlg4"),
)

table.DescribeTable("It should detect the base image from", func(given, expected string) {
res, err := toImageWithDigest(given, "docker://sha256:4gjffGJlg4")
Expect(err).ToNot(HaveOccurred())
Expect(strings.Split(res, "@sha256:")[0]).To(Equal(expected))
},
table.Entry("image with registry and no tags or shasum", "myregistry.io/myimage", "myregistry.io/myimage"),
table.Entry("image with registry and tag", "myregistry.io/myimage:latest", "myregistry.io/myimage"),
table.Entry("image with registry and shasum", "myregistry.io/myimage@sha256:123534", "myregistry.io/myimage"),
table.Entry("image with registry and no tags or shasum and custom port", "myregistry.io:5000/myimage", "myregistry.io:5000/myimage"),
table.Entry("image with registry and tag and custom port", "myregistry.io:5000/myimage:latest", "myregistry.io:5000/myimage"),
table.Entry("image with registry and shasum and custom port", "myregistry.io:5000/myimage@sha256:123534", "myregistry.io:5000/myimage"),
table.Entry("image with registry and shasum and custom port and group", "myregistry.io:5000/mygroup/myimage@sha256:123534", "myregistry.io:5000/mygroup/myimage"),
)
})
})
})

Expand All @@ -282,3 +366,42 @@ func appendContainerDisk(vmi *v1.VirtualMachineInstance, diskName string) {
},
})
}
func appendNonContainerDisk(vmi *v1.VirtualMachineInstance, diskName string) {
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: diskName,
DiskDevice: v1.DiskDevice{
Disk: &v1.DiskTarget{},
},
})
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: diskName,
VolumeSource: v1.VolumeSource{
DataVolume: &v1.DataVolumeSource{},
},
})
}

func createMigrationSourcePod(vmi *v1.VirtualMachineInstance) *k8sv1.Pod {
pod := &k8sv1.Pod{Status: k8sv1.PodStatus{}}
containers := GenerateContainers(vmi, nil, "a-name", "something")

for idx, container := range containers {
status := k8sv1.ContainerStatus{
Name: container.Name,
Image: container.Image,
ImageID: fmt.Sprintf("finalimg@sha256:%v", idx),
}
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, status)
}
bootContainer := GenerateKernelBootContainer(vmi, nil, "a-name", "something")
if bootContainer != nil {
status := k8sv1.ContainerStatus{
Name: bootContainer.Name,
Image: bootContainer.Image,
ImageID: fmt.Sprintf("finalimg@sha256:%v", "bootcontainer"),
}
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, status)
}

return pod
}
6 changes: 3 additions & 3 deletions pkg/testutils/matchers.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ loop:
}

func ExpectEvent(recorder *record.FakeRecorder, reason string) {
gomega.Expect(recorder.Events).To(gomega.Receive(gomega.ContainSubstring(reason)))
gomega.ExpectWithOffset(1, recorder.Events).To(gomega.Receive(gomega.ContainSubstring(reason)))
}

// ExpectEvents checks for given reasons in arbitrary order
Expand All @@ -103,12 +103,12 @@ func ExpectEvents(recorder *record.FakeRecorder, reasons ...string) {
filtered = append(filtered, reason)
}

gomega.Expect(found).To(gomega.BeTrue(), "Expected to match event reason '%s' with one of %v", e, reasons)
gomega.ExpectWithOffset(1, found).To(gomega.BeTrue(), "Expected to match event reason '%s' with one of %v", e, reasons)
reasons = filtered

default:
// There should be something, trigger an error
gomega.Expect(recorder.Events).To(gomega.Receive())
gomega.ExpectWithOffset(1, recorder.Events).To(gomega.Receive())
}
}
}
Expand Down