View
@@ -527,6 +527,38 @@ var _ = Describe("Replicaset", func() {
testutils.ExpectEvents(recorder, SuccessfulDeleteVirtualMachineReason, FailedDeleteVirtualMachineReason)
})
It("should back off if a sync error occurs", func() {
rs, vmi := DefaultReplicaSet(0)
vmi1 := vmi.DeepCopy()
vmi1.ObjectMeta.Name = "test1"
addReplicaSet(rs)
vmiFeeder.Add(vmi)
vmiFeeder.Add(vmi1)
// Let first one succeed
vmiInterface.EXPECT().Delete(vmi.ObjectMeta.Name, gomock.Any()).Return(nil)
// Let second one fail
vmiInterface.EXPECT().Delete(vmi1.ObjectMeta.Name, gomock.Any()).Return(fmt.Errorf("failure"))
// We should see the failed condition, replicas should stay at 2
rsInterface.EXPECT().Update(gomock.Any()).Do(func(obj interface{}) {
objRS := obj.(*v1.VirtualMachineInstanceReplicaSet)
Expect(objRS.Status.Replicas).To(Equal(int32(2)))
Expect(objRS.Status.Conditions).To(HaveLen(1))
cond := objRS.Status.Conditions[0]
Expect(cond.Type).To(Equal(v1.VirtualMachineInstanceReplicaSetReplicaFailure))
Expect(cond.Reason).To(Equal("FailedDelete"))
Expect(cond.Message).To(Equal("failure"))
Expect(cond.Status).To(Equal(k8sv1.ConditionTrue))
})
controller.Execute()
Expect(mockQueue.GetRateLimitedEnqueueCount()).To(Equal(1))
Expect(mockQueue.Len()).To(Equal(0))
testutils.ExpectEvents(recorder, SuccessfulDeleteVirtualMachineReason, FailedDeleteVirtualMachineReason)
})
It("should update the replica count but keep the failed state", func() {
rs, vmi := DefaultReplicaSet(3)
@@ -583,6 +615,7 @@ var _ = Describe("Replicaset", func() {
})
controller.Execute()
Expect(mockQueue.GetRateLimitedEnqueueCount()).To(Equal(0))
testutils.ExpectEvent(recorder, SuccessfulCreateVirtualMachineReason)
testutils.ExpectEvent(recorder, SuccessfulCreateVirtualMachineReason)
})
View
@@ -598,6 +598,30 @@ var _ = Describe("VirtualMachine", func() {
testutils.ExpectEvents(recorder, FailedDeleteVirtualMachineReason)
})
It("should back off if a sync error occurs", func() {
vm, vmi := DefaultVirtualMachine(false)
addVirtualMachine(vm)
vmiFeeder.Add(vmi)
vmiInterface.EXPECT().Delete(vmi.ObjectMeta.Name, gomock.Any()).Return(fmt.Errorf("failure"))
vmInterface.EXPECT().Update(gomock.Any()).Do(func(obj interface{}) {
objVM := obj.(*v1.VirtualMachine)
Expect(objVM.Status.Conditions).To(HaveLen(1))
cond := objVM.Status.Conditions[0]
Expect(cond.Type).To(Equal(v1.VirtualMachineFailure))
Expect(cond.Reason).To(Equal("FailedDelete"))
Expect(cond.Message).To(Equal("failure"))
Expect(cond.Status).To(Equal(k8sv1.ConditionTrue))
})
controller.Execute()
Expect(mockQueue.Len()).To(Equal(0))
Expect(mockQueue.GetRateLimitedEnqueueCount()).To(Equal(1))
testutils.ExpectEvents(recorder, FailedDeleteVirtualMachineReason)
})
})
})
View
@@ -254,7 +254,17 @@ func (c *VMIController) execute(key string) error {
if needsSync {
syncErr = c.sync(vmi, pods, dataVolumes)
}
return c.updateStatus(vmi, pods, dataVolumes, syncErr)
err = c.updateStatus(vmi, pods, dataVolumes, syncErr)
if err != nil {
return err
}
if syncErr != nil {
return syncErr
}
return nil
}
func (c *VMIController) updateStatus(vmi *virtv1.VirtualMachineInstance, pods []*k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr syncError) error {
View
@@ -388,6 +388,25 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
testutils.ExpectEvent(recorder, FailedCreatePodReason)
})
It("should back-off if a sync error occurs", func() {
vmi := NewPendingVirtualMachine("testvmi")
addVirtualMachine(vmi)
kubeClient.Fake.PrependReactor("create", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, nil, fmt.Errorf("random error")
})
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Reason).To(Equal("FailedCreate"))
}).Return(vmi, nil)
controller.Execute()
Expect(controller.Queue.Len()).To(Equal(0))
Expect(mockQueue.GetRateLimitedEnqueueCount()).To(Equal(1))
testutils.ExpectEvent(recorder, FailedCreatePodReason)
})
It("should remove the error condition if the sync finally succeeds", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{{Type: v1.VirtualMachineInstanceSynchronized}}
@@ -408,7 +427,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
}).Return(vmi, nil)
controller.Execute()
Expect(mockQueue.GetRateLimitedEnqueueCount()).To(Equal(0))
testutils.ExpectEvent(recorder, SuccessfulCreatePodReason)
})
table.DescribeTable("should move the vmi to scheduling state if a pod exists", func(phase k8sv1.PodPhase, isReady bool) {
View
@@ -129,7 +129,7 @@ type VirtualMachineController struct {
// If the grace period has started but not expired, timeLeft represents
// the time in seconds left until the period expires.
// If the grace period has not started, timeLeft will be set to -1.
func (d *VirtualMachineController) hasGracePeriodExpired(dom *api.Domain) (hasExpired bool, timeLeft int) {
func (d *VirtualMachineController) hasGracePeriodExpired(dom *api.Domain) (hasExpired bool, timeLeft int64) {
hasExpired = false
timeLeft = 0
@@ -165,7 +165,7 @@ func (d *VirtualMachineController) hasGracePeriodExpired(dom *api.Domain) (hasEx
return
}
timeLeft = int(gracePeriod - diff)
timeLeft = int64(gracePeriod - diff)
if timeLeft < 1 {
timeLeft = 1
}
@@ -335,8 +335,14 @@ func (d *VirtualMachineController) execute(key string) error {
vmi.UID = domain.Spec.Metadata.KubeVirt.UID
}
// Ignore domains from an older VMI
// Ignore domains from an older VMI but delete watchdog file,
// by definition there can only be one VMI with the same name run at the same time.
// When we delete the watchdog file we trigger a DELETE for the damain cache for the stale entry
if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
err := watchdog.WatchdogFileRemove(d.virtShareDir, vmi)
if err != nil {
return fmt.Errorf("failed to remove watchdog file from stale domain entry: %v", err)
}
log.Log.Object(vmi).Info("Ignoring domain from an older VMI, will be handled by its own VMI.")
return nil
}
@@ -503,6 +509,13 @@ func (d *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstan
return err
}
// in case of dirty virt-launcher shutdown, socket files may be left over
socket := cmdclient.SocketFromUID(d.virtShareDir, string(vmi.UID))
err = os.RemoveAll(socket)
if err != nil {
return err
}
d.closeLauncherClient(vmi)
return nil
}
@@ -578,6 +591,20 @@ func (d *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInsta
}
log.Log.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
// Make sure that we don't hot-loop in case we send the first domain notification
if timeLeft == -1 {
timeLeft = 5
if vmi.Spec.TerminationGracePeriodSeconds != nil && *vmi.Spec.TerminationGracePeriodSeconds < timeLeft {
timeLeft = *vmi.Spec.TerminationGracePeriodSeconds
}
}
// In case we have a long grace period, we want to resend the graceful shutdown every 5 seconds
// That's important since a booting OS can miss ACPI signals
if timeLeft > 5 {
timeLeft = 5
}
// pending graceful shutdown.
d.Queue.AddAfter(controller.VirtualMachineKey(vmi), time.Duration(timeLeft)*time.Second)
d.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), "Signaled Graceful Shutdown")
View
@@ -23,19 +23,17 @@ import (
"fmt"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"kubevirt.io/kubevirt/pkg/api/v1"
diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/pkg/precond"
cmdclient "kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
watchdog "kubevirt.io/kubevirt/pkg/watchdog"
"kubevirt.io/kubevirt/pkg/virt-handler/cmd-client"
"kubevirt.io/kubevirt/pkg/watchdog"
)
type OnShutdownCallback func(pid int)
@@ -53,7 +51,7 @@ type monitor struct {
}
type ProcessMonitor interface {
RunForever(startTimeout time.Duration)
RunForever(startTimeout time.Duration, stopChan chan struct{})
}
func GracefulShutdownTriggerDir(baseDir string) string {
@@ -242,7 +240,7 @@ func (mon *monitor) refresh() {
return
}
func (mon *monitor) monitorLoop(startTimeout time.Duration, signalChan chan os.Signal) {
func (mon *monitor) monitorLoop(startTimeout time.Duration, stopChan chan struct{}) {
// random value, no real rationale
rate := 1 * time.Second
@@ -262,8 +260,7 @@ func (mon *monitor) monitorLoop(startTimeout time.Duration, signalChan chan os.S
select {
case <-ticker.C:
mon.refresh()
case s := <-signalChan:
log.Log.Infof("Received signal %d.", s)
case <-stopChan:
if mon.gracePeriodStartTime != 0 {
continue
@@ -280,16 +277,9 @@ func (mon *monitor) monitorLoop(startTimeout time.Duration, signalChan chan os.S
ticker.Stop()
}
func (mon *monitor) RunForever(startTimeout time.Duration) {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT,
)
func (mon *monitor) RunForever(startTimeout time.Duration, stopChan chan struct{}) {
mon.monitorLoop(startTimeout, c)
mon.monitorLoop(startTimeout, stopChan)
}
func readProcCmdline(pathname string) ([]string, error) {
View
@@ -142,10 +142,11 @@ var _ = Describe("VirtLauncher", func() {
})
It("verify start timeout works", func() {
stopChan := make(chan struct{})
done := make(chan string)
go func() {
mon.RunForever(time.Second)
mon.RunForever(time.Second, stopChan)
done <- "exit"
}()
noExitCheck := time.After(3 * time.Second)
@@ -161,17 +162,17 @@ var _ = Describe("VirtLauncher", func() {
})
It("verify monitor loop exits when signal arrives and no pid is present", func() {
signalChannel := make(chan os.Signal, 1)
stopChan := make(chan struct{})
done := make(chan string)
go func() {
mon.monitorLoop(1*time.Second, signalChannel)
mon.monitorLoop(1*time.Second, stopChan)
done <- "exit"
}()
time.Sleep(time.Second)
signalChannel <- syscall.SIGQUIT
close(stopChan)
noExitCheck := time.After(5 * time.Second)
exited := false
@@ -185,15 +186,15 @@ var _ = Describe("VirtLauncher", func() {
})
It("verify graceful shutdown trigger works", func() {
signalChannel := make(chan os.Signal, 1)
stopChan := make(chan struct{})
done := make(chan string)
StartProcess()
VerifyProcessStarted()
go func() { CleanupProcess() }()
go func() {
mon.monitorLoop(1*time.Second, signalChannel)
mon.monitorLoop(1*time.Second, stopChan)
done <- "exit"
}()
@@ -203,7 +204,7 @@ var _ = Describe("VirtLauncher", func() {
Expect(err).ToNot(HaveOccurred())
Expect(exists).To(Equal(false))
signalChannel <- syscall.SIGQUIT
close(stopChan)
time.Sleep(time.Second)
@@ -213,19 +214,19 @@ var _ = Describe("VirtLauncher", func() {
})
It("verify grace period works", func() {
signalChannel := make(chan os.Signal, 1)
stopChan := make(chan struct{})
done := make(chan string)
StartProcess()
VerifyProcessStarted()
go func() { CleanupProcess() }()
go func() {
mon.gracePeriod = 1
mon.monitorLoop(1*time.Second, signalChannel)
mon.monitorLoop(1*time.Second, stopChan)
done <- "exit"
}()
signalChannel <- syscall.SIGTERM
close(stopChan)
noExitCheck := time.After(5 * time.Second)
exited := false
View
@@ -8,6 +8,7 @@ import (
"github.com/libvirt/libvirt-go"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"kubevirt.io/kubevirt/pkg/log"
@@ -75,74 +76,104 @@ func newWatchEventError(err error) watch.Event {
return watch.Event{Type: watch.Error, Object: &metav1.Status{Status: metav1.StatusFailure, Message: err.Error()}}
}
func libvirtEventCallback(d cli.VirDomain, event *libvirt.DomainEventLifecycle, client *DomainEventClient, deleteNotificationSent chan watch.Event) {
func libvirtEventCallback(c cli.Connection, domain *api.Domain, event *libvirt.DomainEventLifecycle, client *DomainEventClient, events chan watch.Event) {
// check for reconnects, and emit an error to force a resync
if event == nil {
client.SendDomainEvent(newWatchEventError(fmt.Errorf("Libvirt reconnect")))
return
}
domain, err := util.NewDomain(d)
if err != nil {
log.Log.Reason(err).Error("Could not create the Domain.")
client.SendDomainEvent(newWatchEventError(err))
return
}
// No matter which event, try to fetch the domain xml
// and the state. If we get a IsNotFound error, that
// means that the VirtualMachineInstance was removed.
spec, err := util.GetDomainSpec(d)
d, err := c.LookupDomainByName(util.DomainFromNamespaceName(domain.ObjectMeta.Namespace, domain.ObjectMeta.Name))
if err != nil {
if !domainerrors.IsNotFound(err) {
log.Log.Reason(err).Error("Could not fetch the Domain specification.")
client.SendDomainEvent(newWatchEventError(err))
return
}
} else {
domain.Spec = *spec
domain.ObjectMeta.UID = spec.Metadata.KubeVirt.UID
}
status, reason, err := d.GetState()
if err != nil {
if !domainerrors.IsNotFound(err) {
log.Log.Reason(err).Error("Could not fetch the Domain state.")
log.Log.Reason(err).Error("Could not fetch the Domain.")
client.SendDomainEvent(newWatchEventError(err))
return
}
domain.SetState(api.NoState, api.ReasonNonExistent)
} else {
domain.SetState(util.ConvState(status), util.ConvReason(status, reason))
defer d.Free()
// No matter which event, try to fetch the domain xml
// and the state. If we get a IsNotFound error, that
// means that the VirtualMachineInstance was removed.
status, reason, err := d.GetState()
if err != nil {
if !domainerrors.IsNotFound(err) {
log.Log.Reason(err).Error("Could not fetch the Domain state.")
client.SendDomainEvent(newWatchEventError(err))
return
}
domain.SetState(api.NoState, api.ReasonNonExistent)
} else {
domain.SetState(util.ConvState(status), util.ConvReason(status, reason))
}
spec, err := util.GetDomainSpec(status, d)
if err != nil {
if !domainerrors.IsNotFound(err) {
log.Log.Reason(err).Error("Could not fetch the Domain specification.")
client.SendDomainEvent(newWatchEventError(err))
return
}
} else {
domain.Spec = *spec
domain.ObjectMeta.UID = spec.Metadata.KubeVirt.UID
}
log.Log.Infof("libvirt domain status: %v:%v", status, reason)
log.Log.Infof("kubevirt domain status: %v:%v", domain.Status.Status, domain.Status.Reason)
}
log.Log.Infof("domain status: %v:%v", status, reason)
switch domain.Status.Reason {
case api.ReasonNonExistent:
event := watch.Event{Type: watch.Deleted, Object: domain}
client.SendDomainEvent(event)
deleteNotificationSent <- event
events <- event
default:
if event.Event == libvirt.DOMAIN_EVENT_DEFINED && libvirt.DomainEventDefinedDetailType(event.Detail) == libvirt.DOMAIN_EVENT_DEFINED_ADDED {
client.SendDomainEvent(watch.Event{Type: watch.Added, Object: domain})
event := watch.Event{Type: watch.Added, Object: domain}
client.SendDomainEvent(event)
events <- event
} else {
client.SendDomainEvent(watch.Event{Type: watch.Modified, Object: domain})
}
}
}
func StartNotifier(virtShareDir string, domainConn cli.Connection, deleteNotificationSent chan watch.Event) error {
func StartNotifier(virtShareDir string, domainConn cli.Connection, deleteNotificationSent chan watch.Event, vmiUID types.UID) error {
type LibvirtEvent struct {
Domain string
Event *libvirt.DomainEventLifecycle
}
eventChan := make(chan LibvirtEvent, 10)
// Run the event process logic in a separate go-routine to not block libvirt
go func() {
for event := range eventChan {
// TODO don't make a client every single time
client, err := NewDomainEventClient(virtShareDir)
if err != nil {
log.Log.Reason(err).Error("Unable to create domain event notify client")
continue
}
libvirtEventCallback(domainConn, util.NewDomainFromName(event.Domain, vmiUID), event.Event, client, deleteNotificationSent)
log.Log.Info("processed event")
}
}()
entrypointCallback := func(c *libvirt.Connect, d *libvirt.Domain, event *libvirt.DomainEventLifecycle) {
log.Log.Infof("Libvirt event %d with reason %d received", event.Event, event.Detail)
// TODO don't make a client every single time
client, err := NewDomainEventClient(virtShareDir)
name, err := d.GetName()
if err != nil {
log.Log.Reason(err).Error("Unable to create domain event notify client")
return
log.Log.Reason(err).Info("Could not determine name of libvirt domain in event callback.")
}
select {
case eventChan <- LibvirtEvent{Event: event, Domain: name}:
default:
log.Log.Infof("Libvirt event channel is full, dropping event.")
}
libvirtEventCallback(d, event, client, deleteNotificationSent)
log.Log.Info("processed event")
}
err := domainConn.DomainEventLifecycleRegister(entrypointCallback)
if err != nil {
View
@@ -37,6 +37,7 @@ import (
notifyserver "kubevirt.io/kubevirt/pkg/virt-handler/notify-server"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/cli"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/util"
)
var _ = Describe("Domain notify", func() {
@@ -49,11 +50,14 @@ var _ = Describe("Domain notify", func() {
var client *DomainEventClient
var mockDomain *cli.MockVirDomain
var mockCon *cli.MockConnection
var ctrl *gomock.Controller
BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
mockCon = cli.NewMockConnection(ctrl)
mockDomain = cli.NewMockVirDomain(ctrl)
mockCon.EXPECT().LookupDomainByName(gomock.Any()).Return(mockDomain, nil).AnyTimes()
stop = make(chan struct{})
eventChan = make(chan watch.Event, 100)
@@ -86,11 +90,14 @@ var _ = Describe("Domain notify", func() {
Expect(err).ToNot(HaveOccurred())
mockDomain.EXPECT().GetState().Return(state, -1, nil)
mockDomain.EXPECT().GetName().Return("test", nil)
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_MIGRATABLE)).Return(string(x), nil)
mockDomain.EXPECT().Free()
mockDomain.EXPECT().GetName().Return("test", nil).AnyTimes()
if state == libvirt.DOMAIN_RUNNING {
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_MIGRATABLE)).Return(string(x), nil)
}
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_INACTIVE)).Return(string(x), nil)
libvirtEventCallback(mockDomain, &libvirt.DomainEventLifecycle{Event: event}, client, deleteNotificationSent)
libvirtEventCallback(mockCon, util.NewDomainFromName("test", "1234"), &libvirt.DomainEventLifecycle{Event: event}, client, deleteNotificationSent)
timedOut := false
timeout := time.After(2 * time.Second)
@@ -116,11 +123,12 @@ var _ = Describe("Domain notify", func() {
It("should receive a delete event when a VirtualMachineInstance is undefined",
func() {
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_MIGRATABLE)).Return("", libvirt.Error{Code: libvirt.ERR_NO_DOMAIN})
mockDomain.EXPECT().Free()
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_INACTIVE)).Return("", libvirt.Error{Code: libvirt.ERR_NO_DOMAIN})
mockDomain.EXPECT().GetState().Return(libvirt.DOMAIN_NOSTATE, -1, libvirt.Error{Code: libvirt.ERR_NO_DOMAIN})
mockDomain.EXPECT().GetName().Return("test", nil)
mockDomain.EXPECT().GetName().Return("test", nil).AnyTimes()
libvirtEventCallback(mockDomain, &libvirt.DomainEventLifecycle{Event: libvirt.DOMAIN_EVENT_UNDEFINED}, client, deleteNotificationSent)
libvirtEventCallback(mockCon, util.NewDomainFromName("test", "1234"), &libvirt.DomainEventLifecycle{Event: libvirt.DOMAIN_EVENT_UNDEFINED}, client, deleteNotificationSent)
timedOut := false
timeout := time.After(2 * time.Second)
View
@@ -182,7 +182,7 @@ func createSocket(socketPath string) (net.Listener, error) {
func RunServer(socketPath string,
domainManager virtwrap.DomainManager,
stopChan chan struct{},
options *ServerOptions) error {
options *ServerOptions) (chan struct{}, error) {
useEmulation := false
if options != nil {
@@ -196,23 +196,26 @@ func RunServer(socketPath string,
rpcServer.Register(server)
sock, err := createSocket(socketPath)
if err != nil {
return err
return nil, err
}
done := make(chan struct{})
go func() {
select {
case <-stopChan:
sock.Close()
os.Remove(socketPath)
log.Log.Info("closing cmd server socket")
close(done)
}
}()
go func() {
rpcServer.Accept(sock)
}()
return nil
return done, nil
}
func (s *Launcher) Ping(args *cmdclient.Args, reply *cmdclient.Reply) error {
View
@@ -203,7 +203,7 @@ func (l *LibvirtDomainManager) SyncVMI(vmi *v1.VirtualMachineInstance, useEmulat
// TODO Suspend, Pause, ..., for now we only support reaching the running state
// TODO for migration and error detection we also need the state change reason
// TODO blocked state
if cli.IsDown(domState) {
if cli.IsDown(domState) && !vmi.IsRunning() && !vmi.IsFinal() {
err = dom.Create()
if err != nil {
logger.Reason(err).Error("Starting the VirtualMachineInstance failed.")
@@ -239,7 +239,11 @@ func (l *LibvirtDomainManager) SyncVMI(vmi *v1.VirtualMachineInstance, useEmulat
}
func (l *LibvirtDomainManager) getDomainSpec(dom cli.VirDomain) (*api.DomainSpec, error) {
return util.GetDomainSpec(dom)
state, _, err := dom.GetState()
if err != nil {
return nil, err
}
return util.GetDomainSpec(state, dom)
}
func (l *LibvirtDomainManager) SignalShutdownVMI(vmi *v1.VirtualMachineInstance) error {
@@ -369,7 +373,7 @@ func (l *LibvirtDomainManager) ListAllDomains() ([]*api.Domain, error) {
}
return list, err
}
spec, err := util.GetDomainSpec(dom)
spec, err := l.getDomainSpec(dom)
if err != nil {
if domainerrors.IsNotFound(err) {
continue
View
@@ -165,11 +165,13 @@ var _ = Describe("Manager", func() {
table.DescribeTable("on successful list all domains",
func(state libvirt.DomainState, kubevirtState api.LifeCycle, libvirtReason int, kubevirtReason api.StateChangeReason) {
mockDomain.EXPECT().GetState().Return(state, libvirtReason, nil)
mockDomain.EXPECT().GetState().Return(state, libvirtReason, nil).AnyTimes()
mockDomain.EXPECT().GetName().Return("test", nil)
x, err := xml.Marshal(api.NewMinimalDomainSpec("test"))
Expect(err).To(BeNil())
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_MIGRATABLE)).Return(string(x), nil)
if !cli.IsDown(state) {
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_MIGRATABLE)).Return(string(x), nil)
}
mockDomain.EXPECT().GetXMLDesc(gomock.Eq(libvirt.DOMAIN_XML_INACTIVE)).Return(string(x), nil)
mockConn.EXPECT().ListAllDomains(gomock.Eq(libvirt.CONNECT_LIST_DOMAINS_ACTIVE|libvirt.CONNECT_LIST_DOMAINS_INACTIVE)).Return([]cli.VirDomain{mockDomain}, nil)
View
@@ -13,6 +13,8 @@ import (
"github.com/libvirt/libvirt-go"
k8sv1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
@@ -102,24 +104,30 @@ func SetDomainSpec(virConn cli.Connection, vmi *v1.VirtualMachineInstance, wante
return dom, nil
}
func GetDomainSpec(dom cli.VirDomain) (*api.DomainSpec, error) {
spec, err := GetDomainSpecWithFlags(dom, libvirt.DOMAIN_XML_MIGRATABLE)
if err != nil {
return nil, err
}
func GetDomainSpec(status libvirt.DomainState, dom cli.VirDomain) (*api.DomainSpec, error) {
var spec *api.DomainSpec
inactiveSpec, err := GetDomainSpecWithFlags(dom, libvirt.DOMAIN_XML_INACTIVE)
if err != nil {
return nil, err
}
spec = inactiveSpec
// libvirt (the whole server) sometimes block indefinitely if a guest-shutdown was performed
// and we immediately ask it after the successful shutdown for a migratable xml.
if !cli.IsDown(status) {
spec, err = GetDomainSpecWithFlags(dom, libvirt.DOMAIN_XML_MIGRATABLE)
if err != nil {
return nil, err
}
}
if !reflect.DeepEqual(spec.Metadata, inactiveSpec.Metadata) {
// Metadata is updated on offline config only. As a result,
// We have to merge updates to metadata into the domain spec.
metadata := &inactiveSpec.Metadata
metadata.DeepCopyInto(&spec.Metadata)
}
return spec, nil
}
@@ -225,8 +233,11 @@ func SplitVMINamespaceKey(domainName string) (namespace, name string) {
// VMINamespaceKeyFunc constructs the domain name with a namespace prefix i.g.
// namespace_name.
func VMINamespaceKeyFunc(vmi *v1.VirtualMachineInstance) string {
domName := fmt.Sprintf("%s_%s", vmi.GetObjectMeta().GetNamespace(), vmi.GetObjectMeta().GetName())
return domName
return DomainFromNamespaceName(vmi.Namespace, vmi.Name)
}
func DomainFromNamespaceName(namespace, name string) string {
return fmt.Sprintf("%s_%s", namespace, name)
}
func NewDomain(dom cli.VirDomain) (*api.Domain, error) {
@@ -242,6 +253,15 @@ func NewDomain(dom cli.VirDomain) (*api.Domain, error) {
return domain, nil
}
func NewDomainFromName(name string, vmiUID types.UID) *api.Domain {
namespace, name := SplitVMINamespaceKey(name)
domain := api.NewDomainReferenceFromName(namespace, name)
domain.Spec.Metadata.KubeVirt.UID = vmiUID
domain.GetObjectMeta().SetUID(domain.Spec.Metadata.KubeVirt.UID)
return domain
}
func SetupLibvirt() error {
// TODO: setting permissions and owners is not part of device plugins.
View
@@ -632,7 +632,7 @@ var _ = Describe("VMIlifecycle", func() {
By("Deleting the VirtualMachineInstance")
_, err = virtClient.RestClient().Delete().Resource("virtualmachineinstances").Namespace(vmi.GetObjectMeta().GetNamespace()).Name(vmi.GetObjectMeta().GetName()).Do().Get()
Expect(err).To(BeNil())
tests.NewObjectEventWatcher(vmi).SinceWatchedObjectResourceVersion().WaitFor(tests.NormalEvent, v1.Deleted)
tests.NewObjectEventWatcher(vmi).Timeout(60*time.Second).SinceWatchedObjectResourceVersion().WaitFor(tests.NormalEvent, v1.Deleted)
tests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
// Check if the stop event was logged
@@ -1034,7 +1034,8 @@ var _ = Describe("VMIlifecycle", func() {
// Delete the VirtualMachineInstance and wait for the confirmation of the delete
By("Deleting the VirtualMachineInstance")
Expect(virtClient.VirtualMachineInstance(vmi.Namespace).Delete(obj.Name, &metav1.DeleteOptions{})).To(Succeed())
tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().WaitFor(tests.NormalEvent, v1.Deleted)
event := tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().Timeout(60*time.Second).WaitFor(tests.NormalEvent, v1.Deleted)
Expect(event).ToNot(BeNil())
// Check if the graceful shutdown was logged
By("Checking that virt-handler logs VirtualMachineInstance graceful shutdown")
View
@@ -468,7 +468,7 @@ func GetTestTemplateFedora() *Template {
}
func newTemplateFedoraWithDockerTag(dockerTag string) *Template {
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}"})
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}", "kubevirt.io/os": "fedora27"})
addRegistryDisk(&vm.Spec.Template.Spec, fmt.Sprintf("%s/%s:%s", DockerPrefix, imageFedora, dockerTag), busVirtio)
addNoCloudDiskWitUserData(&vm.Spec.Template.Spec, "#cloud-config\npassword: fedora\nchpasswd: { expire: False }")
@@ -489,7 +489,7 @@ func newTemplateFedoraWithDockerTag(dockerTag string) *Template {
}
func GetTemplateRHEL7() *Template {
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}"})
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}", "kubevirt.io/os": "rhel-7.4"})
addPVCDisk(&vm.Spec.Template.Spec, "linux-vm-pvc-${NAME}", busVirtio, "disk0", "disk0-pvc")
pvc := getPVCForTemplate("linux-vm-pvc-${NAME}")
@@ -500,7 +500,7 @@ func GetTemplateRHEL7() *Template {
}
func GetTestTemplateRHEL7() *Template {
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}"})
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}", "kubevirt.io/os": "rhel-7.4"})
addEphemeralPVCDisk(&vm.Spec.Template.Spec, "disk-rhel", busSata, "pvcdisk", "pvcvolume")
return newTemplateForRHEL7VM(vm)
@@ -524,7 +524,7 @@ func newTemplateForRHEL7VM(vm *v1.VirtualMachine) *Template {
}
func GetTemplateWindows() *Template {
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}"})
vm := getBaseVM("", map[string]string{"kubevirt-vm": "vm-${NAME}", "kubevirt.io/os": "win2k12r2"})
windows := GetVMIWindows()
vm.Spec.Template.Spec = windows.Spec
vm.Spec.Template.ObjectMeta.Annotations = windows.ObjectMeta.Annotations