View
@@ -58,6 +58,8 @@ spec:
type: string
devices:
properties:
autoattachGraphicsDevice:
type: boolean
autoattachPodInterface:
type: boolean
disks:
@@ -214,6 +216,8 @@ spec:
properties:
limits:
type: object
overcommitGuestOverhead:
type: boolean
requests:
type: object
required:
@@ -239,6 +243,9 @@ spec:
terminationGracePeriodSeconds:
format: int64
type: integer
tolerations:
items: {}
type: array
volumes:
items:
properties:
View
@@ -212,6 +212,15 @@ func (in *Devices) DeepCopyInto(out *Devices) {
**out = **in
}
}
if in.AutoattachGraphicsDevice != nil {
in, out := &in.AutoattachGraphicsDevice, &out.AutoattachGraphicsDevice
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
return
}
@@ -1660,6 +1669,13 @@ func (in *VirtualMachineInstanceSpec) DeepCopyInto(out *VirtualMachineInstanceSp
(*in).DeepCopyInto(*out)
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]core_v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
if *in == nil {
View
@@ -71,7 +71,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
},
"model": {
SchemaProps: spec.SchemaProps{
Description: "Model specifies the CPU model inside the VMI. List of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml. You also can specify special cases like \"host-passthrough\" to get the same CPU as the node and \"host-model\" to get CPU closest to the node one. You can find more information under https://libvirt.org/formatdomain.html#elementsCPU. Defaults to host-model.",
Description: "Model specifies the CPU model inside the VMI. List of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml. It is possible to specify special cases like \"host-passthrough\" to get the same CPU as the node and \"host-model\" to get CPU closest to the node one. For more information see https://libvirt.org/formatdomain.html#elementsCPU. Defaults to host-model.",
Type: []string{"string"},
Format: "",
},
@@ -220,6 +220,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
Format: "",
},
},
"autoattachGraphicsDevice": {
SchemaProps: spec.SchemaProps{
Description: "Wheater to attach the default graphics device or not. VNC will not be available if set to false. Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
@@ -1151,6 +1158,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
},
},
},
"overcommitGuestOverhead": {
SchemaProps: spec.SchemaProps{
Description: "Don't ask the scheduler to take the guest-management overhead into account. Instead put the overhead only into the requested memory limits. This can lead to crashes if all memory is in use on a node. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
@@ -1774,6 +1788,19 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "If toleration is specified, obey all the toleration rules.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"terminationGracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated.",
@@ -1826,7 +1853,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "kubevirt.io/kubevirt/pkg/api/v1.DomainSpec", "kubevirt.io/kubevirt/pkg/api/v1.Network", "kubevirt.io/kubevirt/pkg/api/v1.Volume"},
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration", "kubevirt.io/kubevirt/pkg/api/v1.DomainSpec", "kubevirt.io/kubevirt/pkg/api/v1.Network", "kubevirt.io/kubevirt/pkg/api/v1.Volume"},
},
"kubevirt.io/kubevirt/pkg/api/v1.VirtualMachineInstanceStatus": {
Schema: spec.Schema{
View
@@ -114,6 +114,10 @@ type ResourceRequirements struct {
// Valid resource keys are "memory" and "cpu".
// +optional
Limits v1.ResourceList `json:"limits,omitempty"`
// Don't ask the scheduler to take the guest-management overhead into account. Instead
// put the overhead only into the requested memory limits. This can lead to crashes if
// all memory is in use on a node. Defaults to false.
OvercommitGuestOverhead bool `json:"overcommitGuestOverhead,omitempty"`
}
// CPU allows specifying the CPU topology.
@@ -125,9 +129,9 @@ type CPU struct {
Cores uint32 `json:"cores,omitempty"`
// Model specifies the CPU model inside the VMI.
// List of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml.
// You also can specify special cases like "host-passthrough" to get the same CPU as the node
// It is possible to specify special cases like "host-passthrough" to get the same CPU as the node
// and "host-model" to get CPU closest to the node one.
// You can find more information under https://libvirt.org/formatdomain.html#elementsCPU.
// For more information see https://libvirt.org/formatdomain.html#elementsCPU.
// Defaults to host-model.
// +optional
Model string `json:"model,omitempty"`
@@ -176,6 +180,9 @@ type Devices struct {
Interfaces []Interface `json:"interfaces,omitempty"`
// Whether to attach a pod network interface. Defaults to true.
AutoattachPodInterface *bool `json:"autoattachPodInterface,omitempty"`
// Wheater to attach the default graphics device or not.
// VNC will not be available if set to false. Defaults to true.
AutoattachGraphicsDevice *bool `json:"autoattachGraphicsDevice,omitempty"`
}
// ---
View
@@ -39,16 +39,17 @@ func (DomainPresetSpec) SwaggerDoc() map[string]string {
func (ResourceRequirements) SwaggerDoc() map[string]string {
return map[string]string{
"requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"overcommitGuestOverhead": "Don't ask the scheduler to take the guest-management overhead into account. Instead\nput the overhead only into the requested memory limits. This can lead to crashes if\nall memory is in use on a node. Defaults to false.",
}
}
func (CPU) SwaggerDoc() map[string]string {
return map[string]string{
"": "CPU allows specifying the CPU topology.",
"cores": "Cores specifies the number of cores inside the vmi.\nMust be a value greater or equal 1.",
"model": "Model specifies the CPU model inside the VMI.\nList of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml.\nYou also can specify special cases like \"host-passthrough\" to get the same CPU as the node\nand \"host-model\" to get CPU closest to the node one.\nYou can find more information under https://libvirt.org/formatdomain.html#elementsCPU.\nDefaults to host-model.\n+optional",
"model": "Model specifies the CPU model inside the VMI.\nList of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml.\nIt is possible to specify special cases like \"host-passthrough\" to get the same CPU as the node\nand \"host-model\" to get CPU closest to the node one.\nFor more information see https://libvirt.org/formatdomain.html#elementsCPU.\nDefaults to host-model.\n+optional",
}
}
@@ -80,10 +81,11 @@ func (Firmware) SwaggerDoc() map[string]string {
func (Devices) SwaggerDoc() map[string]string {
return map[string]string{
"disks": "Disks describes disks, cdroms, floppy and luns which are connected to the vmi.",
"watchdog": "Watchdog describes a watchdog device which can be added to the vmi.",
"interfaces": "Interfaces describe network interfaces which are added to the vm.",
"autoattachPodInterface": "Whether to attach a pod network interface. Defaults to true.",
"disks": "Disks describes disks, cdroms, floppy and luns which are connected to the vmi.",
"watchdog": "Watchdog describes a watchdog device which can be added to the vmi.",
"interfaces": "Interfaces describe network interfaces which are added to the vm.",
"autoattachPodInterface": "Whether to attach a pod network interface. Defaults to true.",
"autoattachGraphicsDevice": "Wheater to attach the default graphics device or not.\nVNC will not be available if set to false. Defaults to true.",
}
}
View
@@ -140,6 +140,8 @@ type VirtualMachineInstanceSpec struct {
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// If affinity is specifies, obey all the affinity rules
Affinity *k8sv1.Affinity `json:"affinity,omitempty"`
// If toleration is specified, obey all the toleration rules.
Tolerations []k8sv1.Toleration `json:"tolerations,omitempty"`
// Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated.
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
// List of volumes that can be mounted by disks belonging to the vmi.
@@ -317,12 +319,29 @@ const (
)
const (
AppLabel string = "kubevirt.io"
DomainLabel string = "kubevirt.io/domain"
CreatedByAnnotation string = "kubevirt.io/created-by"
OwnedByAnnotation string = "kubevirt.io/owned-by"
NodeNameLabel string = "kubevirt.io/nodeName"
NodeSchedulable string = "kubevirt.io/schedulable"
// This label marks resources that belong to KubeVirt. An optional value
// may indicate which specific KubeVirt component a resource belongs to.
AppLabel string = "kubevirt.io"
// This label is used to match virtual machine instances represented as
// libvirt XML domains with their pods. Among other things, the label is
// used to detect virtual machines with dead pods. Used on Pod.
DomainLabel string = "kubevirt.io/domain"
// This annotation is used to match virtual machine instance IDs with pods.
// Similar to kubevirt.io/domain. Used on Pod.
CreatedByAnnotation string = "kubevirt.io/created-by"
// This annotation defines which KubeVirt component owns the resource. Used
// on Pod.
OwnedByAnnotation string = "kubevirt.io/owned-by"
// This label describes which cluster node runs the virtual machine
// instance. Needed because with CRDs we can't use field selectors. Used on
// VirtualMachineInstance.
NodeNameLabel string = "kubevirt.io/nodeName"
// This label declares whether a particular node is available for
// scheduling virtual machine instances on it. Used on Node.
NodeSchedulable string = "kubevirt.io/schedulable"
// This annotation is regularly updated by virt-handler to help determine
// if a particular node is alive and hence should be available for new
// virtual machine instance scheduling. Used on Node.
VirtHandlerHeartbeat string = "kubevirt.io/heartbeat"
VirtualMachineInstanceFinalizer string = "foregroundDeleteVirtualMachine"
View
@@ -22,6 +22,7 @@ func (VirtualMachineInstanceSpec) SwaggerDoc() map[string]string {
"domain": "Specification of the desired behavior of the VirtualMachineInstance on the host.",
"nodeSelector": "NodeSelector is a selector which must be true for the vmi to fit on a node.\nSelector which must match a node's labels for the vmi to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n+optional",
"affinity": "If affinity is specifies, obey all the affinity rules",
"tolerations": "If toleration is specified, obey all the toleration rules.",
"terminationGracePeriodSeconds": "Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated.",
"volumes": "List of volumes that can be mounted by disks belonging to the vmi.",
"hostname": "Specifies the hostname of the vmi\nIf not specified, the hostname will be set to the name of the vmi, if dhcp or cloud-init is configured properly.\n+optional",
View
@@ -65,6 +65,9 @@ type KubeInformerFactory interface {
// Watches for ConfigMap objects
ConfigMap() cache.SharedIndexInformer
// Watches for LimitRange objects
LimitRanges() cache.SharedIndexInformer
}
type kubeInformerFactory struct {
@@ -182,6 +185,14 @@ func (f *kubeInformerFactory) ConfigMap() cache.SharedIndexInformer {
})
}
func (f *kubeInformerFactory) LimitRanges() cache.SharedIndexInformer {
return f.getInformer("limitrangeInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoreV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "limitranges", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &k8sv1.LimitRange{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
// resyncPeriod computes the time interval a shared informer waits before resyncing with the api server
func resyncPeriod(minResyncPeriod time.Duration) time.Duration {
factor := rand.Float64() + 1
View
@@ -4,6 +4,8 @@
package kubecli
import (
time "time"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
@@ -684,15 +686,15 @@ func (_mr *_MockVirtualMachineInstanceInterfaceRecorder) Patch(arg0, arg1, arg2
return _mr.mock.ctrl.RecordCall(_mr.mock, "Patch", _s...)
}
func (_m *MockVirtualMachineInstanceInterface) SerialConsole(name string) (StreamInterface, error) {
ret := _m.ctrl.Call(_m, "SerialConsole", name)
func (_m *MockVirtualMachineInstanceInterface) SerialConsole(name string, timeout time.Duration) (StreamInterface, error) {
ret := _m.ctrl.Call(_m, "SerialConsole", name, timeout)
ret0, _ := ret[0].(StreamInterface)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockVirtualMachineInstanceInterfaceRecorder) SerialConsole(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "SerialConsole", arg0)
func (_mr *_MockVirtualMachineInstanceInterfaceRecorder) SerialConsole(arg0, arg1 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "SerialConsole", arg0, arg1)
}
func (_m *MockVirtualMachineInstanceInterface) VNC(name string) (StreamInterface, error) {
View
@@ -27,6 +27,7 @@ package kubecli
import (
"io"
"time"
k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -74,7 +75,7 @@ type VirtualMachineInstanceInterface interface {
Update(*v1.VirtualMachineInstance) (*v1.VirtualMachineInstance, error)
Delete(name string, options *k8smetav1.DeleteOptions) error
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VirtualMachineInstance, err error)
SerialConsole(name string) (StreamInterface, error)
SerialConsole(name string, timeout time.Duration) (StreamInterface, error)
VNC(name string) (StreamInterface, error)
}
View
@@ -25,6 +25,7 @@ import (
"io"
"net/http"
"net/url"
"time"
"github.com/gorilla/websocket"
@@ -245,8 +246,59 @@ func (ws *wsStreamer) Stream(options StreamOptions) error {
func (v *vmis) VNC(name string) (StreamInterface, error) {
return v.asyncSubresourceHelper(name, "vnc")
}
func (v *vmis) SerialConsole(name string) (StreamInterface, error) {
return v.asyncSubresourceHelper(name, "console")
type connectionStruct struct {
con StreamInterface
err error
}
func (v *vmis) SerialConsole(name string, timeout time.Duration) (StreamInterface, error) {
timeoutChan := time.Tick(timeout)
connectionChan := make(chan connectionStruct)
isWaiting := true
go func() {
con, err := v.asyncSubresourceHelper(name, "console")
for err != nil && isWaiting {
if asyncSubresourceError, ok := err.(*AsyncSubresourceError); ok {
if asyncSubresourceError.GetStatusCode() == http.StatusBadRequest {
// Sleep to prevent denial of service on the api server
time.Sleep(1 * time.Second)
con, err = v.asyncSubresourceHelper(name, "console")
} else {
connectionChan <- connectionStruct{con: nil, err: asyncSubresourceError}
return
}
} else {
connectionChan <- connectionStruct{con: nil, err: err}
return
}
}
if isWaiting {
connectionChan <- connectionStruct{con: con, err: nil}
}
}()
select {
case <-timeoutChan:
isWaiting = false
return nil, fmt.Errorf("Timeout trying to connect to the virtual machine instance")
case conStruct := <-connectionChan:
return conStruct.con, conStruct.err
}
}
type AsyncSubresourceError struct {
err string
StatusCode int
}
func (a *AsyncSubresourceError) Error() string {
return a.err
}
func (a *AsyncSubresourceError) GetStatusCode() int {
return a.StatusCode
}
func (v *vmis) asyncSubresourceHelper(name string, resource string) (StreamInterface, error) {
@@ -276,22 +328,22 @@ func (v *vmis) asyncSubresourceHelper(name string, resource string) (StreamInter
response, err := wrappedRoundTripper.RoundTrip(req)
if err != nil {
errChan <- err
errChan <- &AsyncSubresourceError{err: err.Error(), StatusCode: response.StatusCode}
return
}
if response != nil {
switch response.StatusCode {
case http.StatusOK:
case http.StatusNotFound:
err = fmt.Errorf("Virtual Machine not found.")
err = &AsyncSubresourceError{err: "Virtual Machine not found.", StatusCode: response.StatusCode}
case http.StatusInternalServerError:
err = fmt.Errorf("Websocket failed due to internal server error.")
err = &AsyncSubresourceError{err: "Websocket failed due to internal server error.", StatusCode: response.StatusCode}
default:
err = fmt.Errorf("Websocket failed with http status: %s", response.Status)
err = &AsyncSubresourceError{err: fmt.Sprintf("Websocket failed with http status: %s", response.Status), StatusCode: response.StatusCode}
}
} else {
err = fmt.Errorf("no response received")
err = &AsyncSubresourceError{err: "no response received"}
}
errChan <- err
}()
View
@@ -33,6 +33,7 @@ import (
flag "github.com/spf13/pflag"
"github.com/go-kit/kit/log"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
@@ -226,6 +227,24 @@ func (l FilteredLogger) Object(obj LoggableObject) *FilteredLogger {
return &l
}
func (l FilteredLogger) ObjectRef(obj *v1.ObjectReference) *FilteredLogger {
if obj == nil {
return &l
}
logParams := make([]interface{}, 0)
if obj.Namespace != "" {
logParams = append(logParams, "namespace", obj.Namespace)
}
logParams = append(logParams, "name", obj.Name)
logParams = append(logParams, "kind", obj.Kind)
logParams = append(logParams, "uid", obj.UID)
l.With(logParams...)
return &l
}
func (l *FilteredLogger) With(obj ...interface{}) *FilteredLogger {
l.logContext = l.logContext.With(obj...)
return l
View
@@ -29,6 +29,8 @@ import (
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sv1 "k8s.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/api/v1"
)
@@ -287,6 +289,31 @@ func TestObject(t *testing.T) {
tearDown()
}
func TestObjectRef(t *testing.T) {
setUp()
log := MakeLogger(MockLogger{})
log.SetLogLevel(INFO)
vmRef := &k8sv1.ObjectReference{
Kind: "test",
Name: "test",
Namespace: "test",
UID: "test",
}
log.ObjectRef(vmRef).Log("test", "message")
logEntry := logParams[0].([]interface{})
assert(t, logEntry[0].(string) == "level", "Logged line did not have level entry")
assert(t, logEntry[1].(string) == logLevelNames[INFO], "Logged line was not of level INFO")
assert(t, logEntry[2].(string) == "timestamp", "Logged line is not expected format")
assert(t, logEntry[4].(string) == "pos", "Logged line was not pos")
assert(t, logEntry[6].(string) == "component", "Logged line is not expected format")
assert(t, logEntry[7].(string) == "test", "Component was not logged")
assert(t, logEntry[8].(string) == "namespace", "Logged line did not contain object namespace")
assert(t, logEntry[10].(string) == "name", "Logged line did not contain object name")
assert(t, logEntry[12].(string) == "kind", "Logged line did not contain object kind")
assert(t, logEntry[14].(string) == "uid", "Logged line did not contain UUID")
tearDown()
}
func TestError(t *testing.T) {
setUp()
log := MakeLogger(MockLogger{})
View
@@ -47,12 +47,9 @@ type SubresourceAPIApp struct {
VirtCli kubecli.KubevirtClient
}
func (app *SubresourceAPIApp) requestHandler(request *restful.Request, response *restful.Response, cmd []string) {
func (app *SubresourceAPIApp) requestHandler(request *restful.Request, response *restful.Response, vmi *v1.VirtualMachineInstance, cmd []string) {
vmiName := request.PathParameter("name")
namespace := request.PathParameter("namespace")
podName, httpStatusCode, err := app.remoteExecInfo(vmiName, namespace)
podName, httpStatusCode, err := app.remoteExecInfo(vmi)
if err != nil {
log.Log.Reason(err).Error("Failed to gather remote exec info for subresource request.")
response.WriteError(httpStatusCode, err)
@@ -84,7 +81,7 @@ func (app *SubresourceAPIApp) requestHandler(request *restful.Request, response
httpResponseChan := make(chan int)
copyErr := make(chan error)
go func() {
httpCode, err := remoteExecHelper(podName, namespace, cmd, inReader, outWriter)
httpCode, err := remoteExecHelper(podName, vmi.Namespace, cmd, inReader, outWriter)
log.Log.Errorf("%v", err)
httpResponseChan <- httpCode
}()
@@ -118,17 +115,39 @@ func (app *SubresourceAPIApp) VNCRequestHandler(request *restful.Request, respon
vmiName := request.PathParameter("name")
namespace := request.PathParameter("namespace")
cmd := []string{"/usr/share/kubevirt/virt-launcher/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-%s", namespace, vmiName, "vnc")}
app.requestHandler(request, response, cmd)
vmi, code, err := app.fetchVirtualMachineInstance(vmiName, namespace)
if err != nil {
log.Log.Reason(err).Error("Failed to gather remote exec info for subresource request.")
response.WriteError(code, err)
return
}
// If there are no graphics devices present, we can't proceed
if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice != nil && *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == false {
err := fmt.Errorf("No graphics devices are present.")
log.Log.Reason(err).Error("Can't establish VNC connection.")
response.WriteError(http.StatusBadRequest, err)
return
}
cmd := []string{"/usr/share/kubevirt/virt-launcher/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/virt-%s", vmi.GetUID(), "vnc")}
app.requestHandler(request, response, vmi, cmd)
}
func (app *SubresourceAPIApp) ConsoleRequestHandler(request *restful.Request, response *restful.Response) {
vmiName := request.PathParameter("name")
namespace := request.PathParameter("namespace")
cmd := []string{"/usr/share/kubevirt/virt-launcher/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-%s", namespace, vmiName, "serial0")}
vmi, code, err := app.fetchVirtualMachineInstance(vmiName, namespace)
if err != nil {
log.Log.Reason(err).Error("Failed to gather remote exec info for subresource request.")
response.WriteError(code, err)
return
}
app.requestHandler(request, response, cmd)
cmd := []string{"/usr/share/kubevirt/virt-launcher/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/virt-%s", vmi.GetUID(), "serial0")}
app.requestHandler(request, response, vmi, cmd)
}
func (app *SubresourceAPIApp) findPod(namespace string, name string) (string, error) {
@@ -150,22 +169,26 @@ func (app *SubresourceAPIApp) findPod(namespace string, name string) (string, er
return podList.Items[0].ObjectMeta.Name, nil
}
func (app *SubresourceAPIApp) remoteExecInfo(name string, namespace string) (string, int, error) {
podName := ""
func (app *SubresourceAPIApp) fetchVirtualMachineInstance(name string, namespace string) (*v1.VirtualMachineInstance, int, error) {
vmi, err := app.VirtCli.VirtualMachineInstance(namespace).Get(name, &k8smetav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return "", http.StatusNotFound, goerror.New(fmt.Sprintf("VirtualMachineInstance %s in namespace %s not found.", name, namespace))
return nil, http.StatusNotFound, goerror.New(fmt.Sprintf("VirtualMachineInstance %s in namespace %s not found.", name, namespace))
}
return podName, http.StatusInternalServerError, err
return nil, http.StatusInternalServerError, err
}
return vmi, 0, nil
}
func (app *SubresourceAPIApp) remoteExecInfo(vmi *v1.VirtualMachineInstance) (string, int, error) {
podName := ""
if vmi.IsRunning() == false {
return podName, http.StatusBadRequest, goerror.New(fmt.Sprintf("Unable to connect to VirtualMachineInstance because phase is %s instead of %s", vmi.Status.Phase, v1.Running))
}
podName, err = app.findPod(namespace, name)
podName, err := app.findPod(vmi.Namespace, vmi.Name)
if err != nil {
return podName, http.StatusBadRequest, fmt.Errorf("unable to find matching pod for remote execution: %v", err)
}
View
@@ -67,17 +67,13 @@ var _ = Describe("VirtualMachineInstance Subresources", func() {
podList.Items = append(podList.Items, *pod)
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/apis/kubevirt.io/v1alpha2/namespaces/default/virtualmachineinstances/testvmi"),
ghttp.RespondWithJSONEncoded(http.StatusOK, vmi),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/api/v1/namespaces/default/pods"),
ghttp.RespondWithJSONEncoded(http.StatusOK, podList),
),
)
podName, httpStatusCode, err := app.remoteExecInfo("testvmi", "default")
podName, httpStatusCode, err := app.remoteExecInfo(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(podName).To(Equal("madeup-name"))
@@ -90,14 +86,7 @@ var _ = Describe("VirtualMachineInstance Subresources", func() {
vmi.Status.Phase = v1.Succeeded
vmi.ObjectMeta.SetUID(uuid.NewUUID())
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/apis/kubevirt.io/v1alpha2/namespaces/default/virtualmachineinstances/testvmi"),
ghttp.RespondWithJSONEncoded(http.StatusOK, vmi),
),
)
_, httpStatusCode, err := app.remoteExecInfo("testvmi", "default")
_, httpStatusCode, err := app.remoteExecInfo(vmi)
Expect(err).To(HaveOccurred())
Expect(httpStatusCode).To(Equal(http.StatusBadRequest))
@@ -113,17 +102,13 @@ var _ = Describe("VirtualMachineInstance Subresources", func() {
podList.Items = []k8sv1.Pod{}
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/apis/kubevirt.io/v1alpha2/namespaces/default/virtualmachineinstances/testvmi"),
ghttp.RespondWithJSONEncoded(http.StatusOK, vmi),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/api/v1/namespaces/default/pods"),
ghttp.RespondWithJSONEncoded(http.StatusOK, podList),
),
)
_, httpStatusCode, err := app.remoteExecInfo("testvmi", "default")
_, httpStatusCode, err := app.remoteExecInfo(vmi)
Expect(err).To(HaveOccurred())
Expect(httpStatusCode).To(Equal(http.StatusBadRequest))
View
@@ -198,7 +198,9 @@ func (t *templateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (
} else {
// Add overhead memory
memoryRequest := resources.Requests[k8sv1.ResourceMemory]
memoryRequest.Add(*memoryOverhead)
if !vmi.Spec.Domain.Resources.OvercommitGuestOverhead {
memoryRequest.Add(*memoryOverhead)
}
resources.Requests[k8sv1.ResourceMemory] = memoryRequest
if memoryLimit, ok := resources.Limits[k8sv1.ResourceMemory]; ok {
@@ -229,6 +231,7 @@ func (t *templateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (
command := []string{"/usr/share/kubevirt/virt-launcher/entrypoint.sh",
"--qemu-timeout", "5m",
"--name", domain,
"--uid", string(vmi.UID),
"--namespace", namespace,
"--kubevirt-share-dir", t.virtShareDir,
"--readiness-file", "/tmp/healthy",
@@ -390,6 +393,12 @@ func (t *templateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (
}
}
if vmi.Spec.Tolerations != nil {
pod.Spec.Tolerations = []k8sv1.Toleration{}
for _, v := range vmi.Spec.Tolerations {
pod.Spec.Tolerations = append(pod.Spec.Tolerations, v)
}
}
return &pod, nil
}
@@ -437,7 +446,9 @@ func getMemoryOverhead(domain v1.DomainSpec) *resource.Quantity {
overhead.Add(resource.MustParse("8Mi"))
// Add video RAM overhead
overhead.Add(resource.MustParse("16Mi"))
if domain.Devices.AutoattachGraphicsDevice == nil || *domain.Devices.AutoattachGraphicsDevice == true {
overhead.Add(resource.MustParse("16Mi"))
}
return overhead
}
View
@@ -72,6 +72,7 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.Containers[0].Command).To(Equal([]string{"/usr/share/kubevirt/virt-launcher/entrypoint.sh",
"--qemu-timeout", "5m",
"--name", "testvmi",
"--uid", "1234",
"--namespace", "testns",
"--kubevirt-share-dir", "/var/run/kubevirt",
"--readiness-file", "/tmp/healthy",
@@ -115,6 +116,7 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.Containers[0].Command).To(Equal([]string{"/usr/share/kubevirt/virt-launcher/entrypoint.sh",
"--qemu-timeout", "5m",
"--name", "testvmi",
"--uid", "1234",
"--namespace", "default",
"--kubevirt-share-dir", "/var/run/kubevirt",
"--readiness-file", "/tmp/healthy",
@@ -175,6 +177,24 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.Affinity).To(BeEquivalentTo(&kubev1.Affinity{PodAntiAffinity: &podAntiAffinity}))
})
It("should add tolerations to pod", func() {
podToleration := kubev1.Toleration{Key: "test"}
vm := v1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{Name: "testvm", Namespace: "default", UID: "1234"},
Spec: v1.VirtualMachineInstanceSpec{
Tolerations: []kubev1.Toleration{
{
Key: podToleration.Key,
},
},
Domain: v1.DomainSpec{},
},
}
pod, err := svc.RenderLaunchManifest(&vm)
Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec.Tolerations).To(BeEquivalentTo([]kubev1.Toleration{{Key: podToleration.Key}}))
})
It("should use the hostname and subdomain if specified on the vm", func() {
vmi := v1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{Name: "testvm",
@@ -266,6 +286,35 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().String()).To(Equal("1099507557"))
Expect(pod.Spec.Containers[0].Resources.Limits.Memory().String()).To(Equal("2099507557"))
})
It("should overcommit guest overhead if selected, by only adding the overhead to memory limits", func() {
vmi := v1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "testvmi",
Namespace: "default",
UID: "1234",
},
Spec: v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
Resources: v1.ResourceRequirements{
OvercommitGuestOverhead: true,
Requests: kubev1.ResourceList{
kubev1.ResourceMemory: resource.MustParse("1G"),
},
Limits: kubev1.ResourceList{
kubev1.ResourceMemory: resource.MustParse("2G"),
},
},
},
},
}
pod, err := svc.RenderLaunchManifest(&vmi)
Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().String()).To(Equal("1G"))
Expect(pod.Spec.Containers[0].Resources.Limits.Memory().String()).To(Equal("2099507557"))
})
It("should not add unset resources", func() {
vmi := v1.VirtualMachineInstance{
@@ -297,6 +346,40 @@ var _ = Describe("Template", func() {
// Limits for KVM and TUN devices should be requested.
Expect(pod.Spec.Containers[0].Resources.Limits).ToNot(BeNil())
})
table.DescribeTable("should check autoattachGraphicsDevicse", func(autoAttach *bool, memory int) {
vmi := v1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "testvmi",
Namespace: "default",
UID: "1234",
},
Spec: v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
CPU: &v1.CPU{Cores: 3},
Resources: v1.ResourceRequirements{
Requests: kubev1.ResourceList{
kubev1.ResourceCPU: resource.MustParse("1m"),
kubev1.ResourceMemory: resource.MustParse("64M"),
},
},
},
},
}
vmi.Spec.Domain.Devices = v1.Devices{
AutoattachGraphicsDevice: autoAttach,
}
pod, err := svc.RenderLaunchManifest(&vmi)
Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().ToDec().ScaledValue(resource.Mega)).To(Equal(int64(memory)))
},
table.Entry("and consider graphics overhead if it is not set", nil, 179),
table.Entry("and consider graphics overhead if it is set to true", True(), 179),
table.Entry("and not consider graphics overhead if it is set to false", False(), 162),
)
})
Context("with hugepages constraints", func() {
@@ -612,3 +695,13 @@ func MakeFakeConfigMapWatcher(configMaps []kubev1.ConfigMap) *cache.ListWatch {
}
return cmListWatch
}
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
View
@@ -99,6 +99,8 @@ type VirtControllerApp struct {
vmController *VMController
vmInformer cache.SharedIndexInformer
limitrangeInformer cache.SharedIndexInformer
LeaderElection leaderelectionconfig.Configuration
launcherImage string
@@ -158,6 +160,7 @@ func Execute() {
app.configMapCache = app.configMapInformer.GetStore()
app.vmInformer = app.informerFactory.VirtualMachine()
app.limitrangeInformer = app.informerFactory.LimitRanges()
app.initCommon()
app.initReplicaSet()
@@ -252,7 +255,7 @@ func (vca *VirtControllerApp) initCommon() {
}
vca.templateService = services.NewTemplateService(vca.launcherImage, vca.virtShareDir, vca.imagePullSecret, vca.configMapCache)
vca.vmiController = NewVMIController(vca.templateService, vca.vmiInformer, vca.podInformer, vca.vmiRecorder, vca.clientSet, vca.configMapInformer)
vca.vmiPresetController = NewVirtualMachinePresetController(vca.vmiPresetInformer, vca.vmiInformer, vca.vmiPresetQueue, vca.vmiPresetCache, vca.clientSet, vca.vmiPresetRecorder)
vca.vmiPresetController = NewVirtualMachinePresetController(vca.vmiPresetInformer, vca.vmiInformer, vca.vmiPresetQueue, vca.vmiPresetCache, vca.clientSet, vca.vmiPresetRecorder, vca.limitrangeInformer)
vca.nodeController = NewNodeController(vca.clientSet, vca.nodeInformer, vca.vmiInformer, nil)
}
View
@@ -26,6 +26,7 @@ import (
"time"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
@@ -42,25 +43,27 @@ import (
)
type VirtualMachinePresetController struct {
vmiPresetInformer cache.SharedIndexInformer
vmiInitInformer cache.SharedIndexInformer
clientset kubecli.KubevirtClient
queue workqueue.RateLimitingInterface
recorder record.EventRecorder
store cache.Store
vmiPresetInformer cache.SharedIndexInformer
vmiInitInformer cache.SharedIndexInformer
clientset kubecli.KubevirtClient
queue workqueue.RateLimitingInterface
recorder record.EventRecorder
store cache.Store
limitrangeInformer cache.SharedIndexInformer
}
const initializerMarking = "presets.virtualmachines." + kubev1.GroupName + "/presets-applied"
const exclusionMarking = "virtualmachineinstancepresets.admission.kubevirt.io/exclude"
func NewVirtualMachinePresetController(vmiPresetInformer cache.SharedIndexInformer, vmiInitInformer cache.SharedIndexInformer, queue workqueue.RateLimitingInterface, vmiInitCache cache.Store, clientset kubecli.KubevirtClient, recorder record.EventRecorder) *VirtualMachinePresetController {
func NewVirtualMachinePresetController(vmiPresetInformer cache.SharedIndexInformer, vmiInitInformer cache.SharedIndexInformer, queue workqueue.RateLimitingInterface, vmiInitCache cache.Store, clientset kubecli.KubevirtClient, recorder record.EventRecorder, limitrangeInformer cache.SharedIndexInformer) *VirtualMachinePresetController {
vmii := VirtualMachinePresetController{
vmiPresetInformer: vmiPresetInformer,
vmiInitInformer: vmiInitInformer,
clientset: clientset,
queue: queue,
recorder: recorder,
store: vmiInitCache,
vmiPresetInformer: vmiPresetInformer,
vmiInitInformer: vmiInitInformer,
clientset: clientset,
queue: queue,
recorder: recorder,
store: vmiInitCache,
limitrangeInformer: limitrangeInformer,
}
return &vmii
}
@@ -156,6 +159,9 @@ func (c *VirtualMachinePresetController) initializeVirtualMachine(vmi *kubev1.Vi
} else {
logger.Object(vmi).V(4).Info("Setting default values on VirtualMachine")
kubev1.SetObjectDefaults_VirtualMachineInstance(vmi)
// handle namespace limitrange default values
applyNamespaceLimitRangeValues(vmi, c.limitrangeInformer)
}
} else {
logger.Object(vmi).Infof("VirtualMachineInstance is excluded from VirtualMachinePresets")
@@ -172,6 +178,53 @@ func (c *VirtualMachinePresetController) initializeVirtualMachine(vmi *kubev1.Vi
return nil
}
func applyNamespaceLimitRangeValues(vmi *kubev1.VirtualMachineInstance, limitrangeInformer cache.SharedIndexInformer) {
isMemoryFieldExist := func(resource k8sv1.ResourceList) bool {
_, ok := resource[k8sv1.ResourceMemory]
return ok
}
vmiResources := vmi.Spec.Domain.Resources
// Copy namespace memory limits (if exists) to the VM spec
if vmiResources.Limits == nil || !isMemoryFieldExist(vmiResources.Limits) {
namespaceMemLimit, err := getNamespaceLimits(vmi.Namespace, limitrangeInformer)
if err == nil && !namespaceMemLimit.IsZero() {
if vmiResources.Limits == nil {
vmi.Spec.Domain.Resources.Limits = make(k8sv1.ResourceList)
}
vmi.Spec.Domain.Resources.Limits[k8sv1.ResourceMemory] = *namespaceMemLimit
}
}
}
func getNamespaceLimits(namespace string, limitrangeInformer cache.SharedIndexInformer) (*resource.Quantity, error) {
finalLimit := &resource.Quantity{Format: resource.BinarySI}
// there can be multiple LimitRange values set for the same resource in
// a namespace, we need to find the minimal
limits, err := limitrangeInformer.GetIndexer().ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
for _, limit := range limits {
for _, val := range limit.(*k8sv1.LimitRange).Spec.Limits {
mem := val.Default.Memory()
if val.Type == k8sv1.LimitTypeContainer {
if !mem.IsZero() {
if finalLimit.IsZero() != (mem.Cmp(*finalLimit) < 0) {
finalLimit = mem
}
}
}
}
}
return finalLimit, nil
}
// listPresets returns all VirtualMachinePresets by namespace
func listPresets(vmiPresetInformer cache.SharedIndexInformer, namespace string) ([]kubev1.VirtualMachineInstancePreset, error) {
indexer := vmiPresetInformer.GetIndexer()
View
@@ -765,6 +765,7 @@ var _ = Describe("VirtualMachineInstance Initializer", func() {
var recorder *record.FakeRecorder
var vmiPresetQueue *testutils.MockWorkQueue
var resourcesInformer cache.SharedIndexInformer
flavorKey := fmt.Sprintf("%s/flavor", v1.GroupName)
presetFlavor := "test-case"
@@ -784,7 +785,8 @@ var _ = Describe("VirtualMachineInstance Initializer", func() {
recorder = record.NewFakeRecorder(100)
vmiPresetController = NewVirtualMachinePresetController(vmiPresetInformer, vmiInformer, vmiPresetQueue, vmiInitCache, virtClient, recorder)
resourcesInformer, _ = testutils.NewFakeInformerFor(&k8sv1.LimitRangeList{})
vmiPresetController = NewVirtualMachinePresetController(vmiPresetInformer, vmiInformer, vmiPresetQueue, vmiInitCache, virtClient, recorder, resourcesInformer)
// create a reference preset
selector := k8smetav1.LabelSelector{MatchLabels: map[string]string{flavorKey: presetFlavor}}
@@ -1011,6 +1013,71 @@ var _ = Describe("VirtualMachineInstance Initializer", func() {
err := vmiPresetController.initializeVirtualMachine(vmi)
Expect(err).ToNot(HaveOccurred())
})
It("should handle namespace resource defaults", func() {
vmi := v1.NewMinimalVMI("testvmi")
vmi.Spec = v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
Devices: v1.Devices{
Disks: []v1.Disk{
{Name: "testdisk"},
},
},
},
}
obj1 := k8sv1.LimitRange{
ObjectMeta: k8smetav1.ObjectMeta{Name: "abc", Namespace: "default"},
Spec: k8sv1.LimitRangeSpec{
Limits: []k8sv1.LimitRangeItem{
{
Type: k8sv1.LimitTypeContainer,
Default: k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("256Mi"),
},
},
},
},
}
obj2 := k8sv1.LimitRange{
ObjectMeta: k8smetav1.ObjectMeta{Name: "abc1", Namespace: "default"},
Spec: k8sv1.LimitRangeSpec{
Limits: []k8sv1.LimitRangeItem{
{
Type: k8sv1.LimitTypeContainer,
Default: k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("128Mi"),
},
},
},
},
}
By("verifying that namespace limitrange defaults are set when vm memory limits are not set")
res := resourcesInformer.GetIndexer()
res.Add(&obj1)
applyNamespaceLimitRangeValues(vmi, resourcesInformer)
Expect(vmi.Spec.Domain.Resources.Limits.Memory().String()).To(Equal("256Mi"))
By("setting the minimal namespace memory limit value")
// remove previosuly set limits
vmi.Spec.Domain.Resources = v1.ResourceRequirements{}
res.Add(&obj2)
applyNamespaceLimitRangeValues(vmi, resourcesInformer)
Expect(vmi.Spec.Domain.Resources.Limits.Memory().String()).To(Equal("128Mi"))
By("verifying that namespace limits are not set when vm spec provides limits")
vmResources := v1.ResourceRequirements{
Limits: k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("2G"),
},
}
vmi.Spec.Domain.Resources = vmResources
applyNamespaceLimitRangeValues(vmi, resourcesInformer)
Expect(vmi.Spec.Domain.Resources.Limits.Memory().String()).To(Equal("2G"))
})
})
})
View
@@ -208,7 +208,7 @@ func (c *VMIController) execute(key string) error {
return nil
}
// If neddsSync is true (expectations fulfilled) we can make save assumptions if virt-handler or virt-controller owns the pod
// If needsSync is true (expectations fulfilled) we can make save assumptions if virt-handler or virt-controller owns the pod
needsSync := c.podExpectations.SatisfiedExpectations(key) && c.handoverExpectations.SatisfiedExpectations(key)
var syncErr syncError = nil
@@ -315,7 +315,16 @@ func isPodReady(pod *k8sv1.Pod) bool {
}
func isPodDownOrGoingDown(pod *k8sv1.Pod) bool {
return podIsDown(pod) || pod.DeletionTimestamp != nil
return podIsDown(pod) || isComputeContainerDown(pod) || pod.DeletionTimestamp != nil
}
func isComputeContainerDown(pod *k8sv1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Name == "compute" {
return containerStatus.State.Terminated != nil
}
}
return false
}
func podIsDown(pod *k8sv1.Pod) bool {
View
@@ -538,6 +538,19 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
controller.Execute()
})
It("should update the virtual machine to failed if compute container is terminated while still scheduling", func() {
vmi := NewPendingVirtualMachine("testvmi")
pod := NewPodForVirtualMachine(vmi, k8sv1.PodPending)
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, k8sv1.ContainerStatus{Name: "compute", State: k8sv1.ContainerState{Terminated: &k8sv1.ContainerStateTerminated{}}})
vmi.Status.Phase = v1.Scheduling
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectVirtualMachineFailedState(vmi)
controller.Execute()
})
table.DescribeTable("should remove the finalizer if no pod is present and the vmi is in ", func(phase v1.VirtualMachineInstancePhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = phase
@@ -625,7 +638,7 @@ func NewPodForVirtualMachine(vmi *v1.VirtualMachineInstance, phase k8sv1.PodPhas
Status: k8sv1.PodStatus{
Phase: phase,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true},
{Ready: true, Name: "test"},
},
},
}
View
@@ -58,6 +58,7 @@ type LauncherClient interface {
SyncVirtualMachine(vmi *v1.VirtualMachineInstance) error
ShutdownVirtualMachine(vmi *v1.VirtualMachineInstance) error
KillVirtualMachine(vmi *v1.VirtualMachineInstance) error
DeleteDomain(vmi *v1.VirtualMachineInstance) error
GetDomain() (*api.Domain, bool, error)
Ping() error
Close()
@@ -162,6 +163,17 @@ func (c *VirtLauncherClient) KillVirtualMachine(vmi *v1.VirtualMachineInstance)
return err
}
func (c *VirtLauncherClient) DeleteDomain(vmi *v1.VirtualMachineInstance) error {
cmd := "Launcher.Delete"
args := &Args{
VMI: vmi,
}
_, err := c.genericSendCmd(args, cmd)
return err
}
func (c *VirtLauncherClient) GetDomain() (*api.Domain, bool, error) {
domain := &api.Domain{}
cmd := "Launcher.GetDomain"
View
@@ -61,6 +61,16 @@ func (_mr *_MockLauncherClientRecorder) KillVirtualMachine(arg0 interface{}) *go
return _mr.mock.ctrl.RecordCall(_mr.mock, "KillVirtualMachine", arg0)
}
func (_m *MockLauncherClient) DeleteDomain(vmi *v1.VirtualMachineInstance) error {
ret := _m.ctrl.Call(_m, "DeleteDomain", vmi)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockLauncherClientRecorder) DeleteDomain(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteDomain", arg0)
}
func (_m *MockLauncherClient) GetDomain() (*api.Domain, bool, error) {
ret := _m.ctrl.Call(_m, "GetDomain")
ret0, _ := ret[0].(*api.Domain)
View
@@ -296,8 +296,10 @@ func (d *VirtualMachineController) getDomainFromCache(key string) (domain *api.D
func (d *VirtualMachineController) execute(key string) error {
// set to true when domain needs to be shutdown and removed from libvirt.
shouldShutdownAndDelete := false
// set to true when domain needs to be shutdown.
shouldShutdown := false
// set to true when domain needs to be removed from libvirt.
shouldDelete := false
// optimization. set to true when processing already deleted domain.
shouldCleanUp := false
// set to true when VirtualMachineInstance is active or about to become active.
@@ -308,27 +310,53 @@ func (d *VirtualMachineController) execute(key string) error {
return err
}
log.Log.V(3).Infof("Processing vmi %v, existing: %v\n", vmi.Name, vmiExists)
if vmiExists {
log.Log.V(3).Infof("vmi is in phase: %v\n", vmi.Status.Phase)
}
domain, domainExists, err := d.getDomainFromCache(key)
if err != nil {
return err
}
// Ignore domains from an older VMI
if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
log.Log.Object(vmi).Info("Ignoring domain from an older VMI, will be handled by its own VMI.")
return nil
}
log.Log.V(3).Infof("Domain: existing: %v\n", domainExists)
if domainExists {
log.Log.V(3).Infof("Domain status: %v, reason: %v\n", domain.Status.Status, domain.Status.Reason)
}
domainAlive := domainExists &&
domain.Status.Status != api.Shutoff &&
domain.Status.Status != api.Crashed &&
domain.Status.Status != ""
// Determine if gracefulShutdown has been triggered by virt-launcher
gracefulShutdown, err := virtlauncher.VmHasGracefulShutdownTrigger(d.virtShareDir, vmi)
if err != nil {
return err
} else if gracefulShutdown && vmi.IsRunning() {
log.Log.Object(vmi).V(3).Info("Shutting down due to graceful shutdown signal.")
shouldShutdownAndDelete = true
shouldShutdown = true
}
// Determine removal of VirtualMachineInstance from cache should result in deletion.
if !vmiExists {
if domainExists {
// The VirtualMachineInstance is deleted on the cluster,
// then continue with processing the deletion on the host.
if domainAlive {
// The VirtualMachineInstance is deleted on the cluster, and domain is alive,
// then shut down the domain.
log.Log.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
shouldShutdown = true
} else if domainExists {
// The VirtualMachineInstance is deleted on the cluster, and domain is not alive
// then delete the domain.
log.Log.Object(vmi).V(3).Info("Shutting down domain for deleted VirtualMachineInstance object.")
shouldShutdownAndDelete = true
shouldDelete = true
} else {
// If neither the domain nor the vmi object exist locally,
// then ensure any remaining local ephemeral data is cleaned up.
@@ -338,9 +366,12 @@ func (d *VirtualMachineController) execute(key string) error {
// Determine if VirtualMachineInstance is being deleted.
if vmiExists && vmi.ObjectMeta.DeletionTimestamp != nil {
if vmi.IsRunning() || domainExists {
if vmi.IsRunning() || domainAlive {
log.Log.Object(vmi).V(3).Info("Shutting down domain for VirtualMachineInstance with deletion timestamp.")
shouldShutdownAndDelete = true
shouldShutdown = true
} else if domainExists {
log.Log.Object(vmi).V(3).Info("Deleting domain for VirtualMachineInstance with deletion timestamp.")
shouldDelete = true
} else {
shouldCleanUp = true
}
@@ -350,7 +381,7 @@ func (d *VirtualMachineController) execute(key string) error {
// shutting down naturally (guest internal invoked shutdown)
if domainExists && vmiExists && vmi.IsFinal() {
log.Log.Object(vmi).V(3).Info("Removing domain and ephemeral data for finalized vmi.")
shouldShutdownAndDelete = true
shouldDelete = true
}
// Determine if an active (or about to be active) VirtualMachineInstance should be updated.
@@ -366,22 +397,18 @@ func (d *VirtualMachineController) execute(key string) error {
}
}
// If for instance an orphan delete was performed on a vmi, a pod can still be in terminating state,
// make sure that we don't perform an update and instead try to make sure that the pod goes definitely away
if vmiExists && domainExists && domain.Spec.Metadata.KubeVirt.UID != vmi.UID {
log.Log.Object(vmi).Errorf("Libvirt domain seems to be from a wrong VirtualMachineInstance instance. That should never happen. Manual intervention required.")
return nil
}
var syncErr error
// Process the VirtualMachineInstance update in this order.
// * Shutdown and Deletion due to VirtualMachineInstance deletion, process stopping, graceful shutdown trigger, etc...
// * Cleanup of already shutdown and Deleted VMIs
// * Update due to spec change and initial start flow.
if shouldShutdownAndDelete {
if shouldShutdown {
log.Log.Object(vmi).V(3).Info("Processing shutdown.")
syncErr = d.processVmShutdown(vmi, domain)
} else if shouldDelete {
log.Log.Object(vmi).V(3).Info("Processing deletion.")
syncErr = d.processVmDelete(vmi, domain)
} else if shouldCleanUp {
log.Log.Object(vmi).V(3).Info("Processing local ephemeral data cleanup for shutdown domain.")
syncErr = d.processVmCleanup(vmi)
@@ -500,46 +527,67 @@ func (d *VirtualMachineController) getLauncherClient(vmi *v1.VirtualMachineInsta
func (d *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
clientDisconnected := false
client, err := d.getLauncherClient(vmi)
// Only attempt to shutdown/destroy if we still have a connection established with the pod.
client, err := d.getVerifiedLauncherClient(vmi)
if err != nil {
clientDisconnected = true
}
// verify connectivity before processing shutdown.
// It's possible the pod has already been torn down along with the VirtualMachineInstance.
if clientDisconnected == false {
err := client.Ping()
if cmdclient.IsDisconnected(err) {
clientDisconnected = true
} else if err != nil {
return err
}
return err
}
// Only attempt to gracefully terminate if we still have a
// connection established with the pod.
// If the pod has been torn down, we know the VirtualMachineInstance has been destroyed.
if clientDisconnected == false {
// Only attempt to gracefully shutdown if the domain has the ACPI feature enabled
if isACPIEnabled(vmi, domain) {
expired, timeLeft := d.hasGracePeriodExpired(domain)
if expired == false {
err = client.ShutdownVirtualMachine(vmi)
if err != nil && !cmdclient.IsDisconnected(err) {
// Only report err if it wasn't the result of a disconnect.
return err
if !expired {
if domain.Status.Status != api.Shutdown {
err = client.ShutdownVirtualMachine(vmi)
if err != nil && !cmdclient.IsDisconnected(err) {
// Only report err if it wasn't the result of a disconnect.
return err
}
log.Log.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
// pending graceful shutdown.
d.Queue.AddAfter(controller.VirtualMachineKey(vmi), time.Duration(timeLeft)*time.Second)
d.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), "Signaled Graceful Shutdown")
} else {
log.Log.V(4).Object(vmi).Infof("%s is already shutting down.", vmi.GetObjectMeta().GetName())
}
log.Log.Object(vmi).Infof("Signaled graceful shutdown for %s", vmi.GetObjectMeta().GetName())
// pending graceful shutdown.
d.Queue.AddAfter(controller.VirtualMachineKey(vmi), time.Duration(timeLeft)*time.Second)
d.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.ShuttingDown.String(), "Signaled Graceful Shutdown")
return nil
}
log.Log.Object(vmi).Infof("Grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
} else {
log.Log.Object(vmi).Infof("ACPI feature not available, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
}
log.Log.Object(vmi).Infof("grace period expired, killing deleted VirtualMachineInstance %s", vmi.GetObjectMeta().GetName())
err = client.KillVirtualMachine(vmi)
if err != nil && !cmdclient.IsDisconnected(err) {
// Only report err if it wasn't the result of a disconnect.
//
// Both virt-launcher and virt-handler are trying to destroy
// the VirtualMachineInstance at the same time. It's possible the client may get
// disconnected during the kill request, which shouldn't be
// considered an error.
return err
}
d.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), "VirtualMachineInstance stopping")
return nil
}
func (d *VirtualMachineController) processVmDelete(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
err = client.KillVirtualMachine(vmi)
// Only attempt to shutdown/destroy if we still have a connection established with the pod.
client, err := d.getVerifiedLauncherClient(vmi)
// If the pod has been torn down, we know the VirtualMachineInstance is down.
if err == nil {
log.Log.Object(vmi).Infof("Signaled deletion for %s", vmi.GetObjectMeta().GetName())
// pending deletion.
d.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), "Signaled Deletion")
err = client.DeleteDomain(vmi)
if err != nil && !cmdclient.IsDisconnected(err) {
// Only report err if it wasn't the result of a disconnect.
//
@@ -550,12 +598,23 @@ func (d *VirtualMachineController) processVmShutdown(vmi *v1.VirtualMachineInsta
return err
}
}
d.recorder.Event(vmi, k8sv1.EventTypeNormal, v1.Deleted.String(), "VirtualMachineInstance stopping")
return d.processVmCleanup(vmi)
return nil
}
func (d *VirtualMachineController) getVerifiedLauncherClient(vmi *v1.VirtualMachineInstance) (client cmdclient.LauncherClient, err error) {
client, err = d.getLauncherClient(vmi)
if err != nil {
return
}
// Verify connectivity.
// It's possible the pod has already been torn down along with the VirtualMachineInstance.
err = client.Ping()
return
}
func (d *VirtualMachineController) processVmUpdate(origVMI *v1.VirtualMachineInstance) error {
vmi := origVMI.DeepCopy()
@@ -618,12 +677,21 @@ func (d *VirtualMachineController) calculateVmPhaseForStatusReason(domain *api.D
return v1.Failed, nil
}
} else {
switch domain.Status.Status {
case api.Shutoff, api.Crashed:
switch domain.Status.Reason {
case api.ReasonCrashed, api.ReasonPanicked:
return v1.Failed, nil
case api.ReasonShutdown, api.ReasonDestroyed, api.ReasonSaved, api.ReasonFromSnapshot:
case api.ReasonDestroyed:
// When ACPI is available, the domain was tried to be shutdown,
// and destroyed means that the domain was destroyed after the graceperiod expired.
// Without ACPI a destroyed domain is ok.
if isACPIEnabled(vmi, domain) {
return v1.Failed, nil
}
return v1.Succeeded, nil
case api.ReasonShutdown, api.ReasonSaved, api.ReasonFromSnapshot:
return v1.Succeeded, nil
}
case api.Running, api.Paused, api.Blocked, api.PMSuspended:
@@ -711,3 +779,10 @@ func (d *VirtualMachineController) heartBeat(interval time.Duration, stopCh chan
}, interval, 1.2, true, stopCh)
}
}
func isACPIEnabled(vmi *v1.VirtualMachineInstance, domain *api.Domain) bool {
zero := int64(0)
return vmi.Spec.TerminationGracePeriodSeconds != &zero &&
domain.Spec.Features != nil &&
domain.Spec.Features.ACPI != nil
}
View
@@ -131,6 +131,9 @@ var _ = Describe("VirtualMachineInstance", func() {
initGracePeriodHelper := func(gracePeriod int64, vmi *v1.VirtualMachineInstance, dom *api.Domain) {
vmi.Spec.TerminationGracePeriodSeconds = &gracePeriod
dom.Spec.Features = &api.Features{
ACPI: &api.FeatureEnabled{},
}
dom.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds = gracePeriod
}
@@ -141,8 +144,7 @@ var _ = Describe("VirtualMachineInstance", func() {
domainFeeder.Add(domain)
client.EXPECT().Ping()
client.EXPECT().KillVirtualMachine(v1.NewVMIReferenceFromName("testvmi"))
client.EXPECT().Close()
client.EXPECT().DeleteDomain(v1.NewVMIReferenceFromName("testvmi"))
controller.Execute()
})
@@ -153,7 +155,6 @@ var _ = Describe("VirtualMachineInstance", func() {
client.EXPECT().Ping()
client.EXPECT().KillVirtualMachine(v1.NewVMIReferenceFromName("testvmi"))
client.EXPECT().Close()
controller.Execute()
})
@@ -212,7 +213,6 @@ var _ = Describe("VirtualMachineInstance", func() {
client.EXPECT().Ping()
client.EXPECT().KillVirtualMachine(v1.NewVMIReferenceFromName("testvmi"))
client.EXPECT().Close()
domainFeeder.Add(domain)
controller.Execute()
@@ -229,7 +229,6 @@ var _ = Describe("VirtualMachineInstance", func() {
client.EXPECT().Ping()
client.EXPECT().KillVirtualMachine(v1.NewVMIReferenceFromName("testvmi"))
client.EXPECT().Close()
domainFeeder.Add(domain)
controller.Execute()
}, 3)
View
@@ -500,20 +500,32 @@ func Convert_v1_VirtualMachine_To_api_Domain(vmi *v1.VirtualMachineInstance, dom
},
Source: &SerialSource{
Mode: "bind",
Path: fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-serial%d", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name, serialPort),
Path: fmt.Sprintf("/var/run/kubevirt-private/%s/virt-serial%d", vmi.ObjectMeta.UID, serialPort),
},
},
}
// Add mandatory vnc device
domain.Spec.Devices.Graphics = []Graphics{
{
Listen: &GraphicsListen{
Type: "socket",
Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-vnc", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name),
if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == nil || *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == true {
var heads uint = 1
var vram uint = 16384
domain.Spec.Devices.Video = []Video{
{
Model: VideoModel{
Type: "vga",
Heads: &heads,
VRam: &vram,
},
},
Type: "vnc",
},
}
domain.Spec.Devices.Graphics = []Graphics{
{
Listen: &GraphicsListen{
Type: "socket",
Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/virt-vnc", vmi.ObjectMeta.UID),
},
Type: "vnc",
},
}
}
getInterfaceType := func(iface *v1.Interface) string {
View
@@ -319,12 +319,14 @@ var _ = Describe("Converter", func() {
<model type="virtio"></model>
<alias name="default"></alias>
</interface>
<controller type="usb" index="0" model="none"></controller>
<video>
<model type="vga" heads="1" vram="16384"></model>
</video>
<graphics type="vnc">
<listen type="socket" socket="/var/run/kubevirt-private/mynamespace/testvmi/virt-vnc"></listen>
<listen type="socket" socket="/var/run/kubevirt-private/f4686d2c-6e8d-4335-b8fd-81bee22f4814/virt-vnc"></listen>
</graphics>
<memballoon model="none"></memballoon>
<disk device="disk" type="file">
<source file="/var/run/kubevirt-private/vmi-disks/myvolume/disk.img"></source>
<target bus="virtio" dev="vda"></target>
@@ -381,7 +383,7 @@ var _ = Describe("Converter", func() {
</disk>
<serial type="unix">
<target port="0"></target>
<source mode="bind" path="/var/run/kubevirt-private/mynamespace/testvmi/virt-serial0"></source>
<source mode="bind" path="/var/run/kubevirt-private/f4686d2c-6e8d-4335-b8fd-81bee22f4814/virt-serial0"></source>
</serial>
<console type="pty">
<target type="serial" port="0"></target>
@@ -745,6 +747,42 @@ var _ = Describe("Converter", func() {
Expect(err).To(BeNil())
})
})
Context("graphics and video device", func() {
table.DescribeTable("should check autoattachGraphicsDevicse", func(autoAttach *bool, devices int) {
vmi := v1.VirtualMachineInstance{
ObjectMeta: k8smeta.ObjectMeta{
Name: "testvmi",
Namespace: "default",
UID: "1234",
},
Spec: v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
CPU: &v1.CPU{Cores: 3},
Resources: v1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("1m"),
k8sv1.ResourceMemory: resource.MustParse("64M"),
},
},
},
},
}
vmi.Spec.Domain.Devices = v1.Devices{
AutoattachGraphicsDevice: autoAttach,
}
domain := vmiToDomain(&vmi, &ConverterContext{UseEmulation: true})
Expect(domain.Spec.Devices.Video).To(HaveLen(devices))
Expect(domain.Spec.Devices.Graphics).To(HaveLen(devices))
},
table.Entry("and add the graphics and video device if it is not set", nil, 1),
table.Entry("and add the graphics and video device if it is set to true", True(), 1),
table.Entry("and not add the graphics and video device if it is set to false", False(), 0),
)
})
})
func diskToDiskXML(disk *v1.Disk) string {
@@ -782,3 +820,13 @@ func xmlToDomainSpec(data string) *DomainSpec {
func vmiToDomainXMLToDomainSpec(vmi *v1.VirtualMachineInstance, c *ConverterContext) *DomainSpec {
return xmlToDomainSpec(vmiToDomainXML(vmi, c))
}
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
View
@@ -462,6 +462,22 @@ func (in *ConsoleTarget) DeepCopy() *ConsoleTarget {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Controller) DeepCopyInto(out *Controller) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Controller.
func (in *Controller) DeepCopy() *Controller {
if in == nil {
return nil
}
out := new(Controller)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConverterContext) DeepCopyInto(out *ConverterContext) {
*out = *in
@@ -516,6 +532,11 @@ func (in *Devices) DeepCopyInto(out *Devices) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make([]Controller, len(*in))
copy(*out, *in)
}
if in.Video != nil {
in, out := &in.Video, &out.Video
*out = make([]Video, len(*in))
View
@@ -12,19 +12,18 @@ const (
)
func SetDefaults_Devices(devices *Devices) {
// Use vga as video device, since it is better than cirrus
// and does not require guest drivers
var heads uint = 1
var vram uint = 16384
devices.Video = []Video{
// Set default controllers, "none" means that controller disabled
devices.Controllers = []Controller{
{
Model: VideoModel{
Type: "vga",
Heads: &heads,
VRam: &vram,
},
Type: "usb",
Index: "0",
Model: "none",
},
}
// Set default memballoon, "none" means that controller disabled
devices.Ballooning = &Ballooning{
Model: "none",
}
}
View
@@ -238,18 +238,30 @@ type HugePage struct {
}
type Devices struct {
Emulator string `xml:"emulator,omitempty"`
Interfaces []Interface `xml:"interface"`
Channels []Channel `xml:"channel"`
Video []Video `xml:"video"`
Graphics []Graphics `xml:"graphics"`
Ballooning *Ballooning `xml:"memballoon,omitempty"`
Disks []Disk `xml:"disk"`
Serials []Serial `xml:"serial"`
Consoles []Console `xml:"console"`
Watchdog *Watchdog `xml:"watchdog,omitempty"`
Emulator string `xml:"emulator,omitempty"`
Interfaces []Interface `xml:"interface"`
Channels []Channel `xml:"channel"`
Controllers []Controller `xml:"controller,omitempty"`
Video []Video `xml:"video"`
Graphics []Graphics `xml:"graphics"`
Ballooning *Ballooning `xml:"memballoon,omitempty"`
Disks []Disk `xml:"disk"`
Serials []Serial `xml:"serial"`
Consoles []Console `xml:"console"`
Watchdog *Watchdog `xml:"watchdog,omitempty"`
}
// BEGIN Controller -----------------------------
// Controller represens libvirt controller element https://libvirt.org/formatdomain.html#elementsControllers
type Controller struct {
Type string `xml:"type,attr"`
Index string `xml:"index,attr"`
Model string `xml:"model,attr,omitempty"`
}
// END Controller -----------------------------
// BEGIN Disk -----------------------------
type Disk struct {
View
@@ -21,6 +21,7 @@ package api
import (
"encoding/xml"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -40,9 +41,11 @@ var exampleXML = `<domain type="kvm" xmlns:qemu="http://libvirt.org/schemas/doma
<baseBoard></baseBoard>
</sysinfo>
<devices>
<controller type="usb" index="0" model="none"></controller>
<video>
<model type="vga" heads="1" vram="16384"></model>
</video>
<memballoon model="none"></memballoon>
<disk device="disk" type="network">
<source protocol="iscsi" name="iqn.2013-07.com.example:iscsi-nopool/2">
<host name="example.com" port="3260"></host>
@@ -176,6 +179,7 @@ var _ = Describe("Schema", func() {
It("Marshal into xml", func() {
buf, err := xml.MarshalIndent(exampleDomain.Spec, "", " ")
Expect(err).To(BeNil())
fmt.Printf(string(buf))
Expect(string(buf)).To(Equal(exampleXML))
})
View
@@ -210,24 +210,34 @@ func (_mr *_MockVirDomainRecorder) Resume() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Resume")
}
func (_m *MockVirDomain) Destroy() error {
ret := _m.ctrl.Call(_m, "Destroy")
func (_m *MockVirDomain) DestroyFlags(flags libvirt_go.DomainDestroyFlags) error {
ret := _m.ctrl.Call(_m, "DestroyFlags", flags)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockVirDomainRecorder) Destroy() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Destroy")
func (_mr *_MockVirDomainRecorder) DestroyFlags(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "DestroyFlags", arg0)
}
func (_m *MockVirDomain) Shutdown() error {
ret := _m.ctrl.Call(_m, "Shutdown")
func (_m *MockVirDomain) ShutdownFlags(flags libvirt_go.DomainShutdownFlags) error {
ret := _m.ctrl.Call(_m, "ShutdownFlags", flags)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockVirDomainRecorder) Shutdown() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Shutdown")
func (_mr *_MockVirDomainRecorder) ShutdownFlags(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "ShutdownFlags", arg0)
}
func (_m *MockVirDomain) Undefine() error {
ret := _m.ctrl.Call(_m, "Undefine")
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockVirDomainRecorder) Undefine() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Undefine")
}
func (_m *MockVirDomain) GetName() (string, error) {
@@ -263,16 +273,6 @@ func (_mr *_MockVirDomainRecorder) GetXMLDesc(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetXMLDesc", arg0)
}
func (_m *MockVirDomain) Undefine() error {
ret := _m.ctrl.Call(_m, "Undefine")
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockVirDomainRecorder) Undefine() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Undefine")
}
func (_m *MockVirDomain) OpenConsole(devname string, stream *libvirt_go.Stream, flags libvirt_go.DomainConsoleFlags) error {
ret := _m.ctrl.Call(_m, "OpenConsole", devname, stream, flags)
ret0, _ := ret[0].(error)
View
@@ -96,10 +96,10 @@ func (l *LibvirtConnection) NewStream(flags libvirt.StreamFlags) (Stream, error)
if err := l.reconnectIfNecessary(); err != nil {
return nil, err
}
defer l.checkConnectionLost()
s, err := l.Connect.NewStream(flags)
if err != nil {
l.checkConnectionLost(err)
return nil, err
}
return &VirStream{Stream: s}, nil
@@ -114,40 +114,41 @@ func (l *LibvirtConnection) DomainEventLifecycleRegister(callback libvirt.Domain
if err = l.reconnectIfNecessary(); err != nil {
return
}
defer l.checkConnectionLost()
l.callbacks = append(l.callbacks, callback)
_, err = l.Connect.DomainEventLifecycleRegister(nil, callback)
l.checkConnectionLost(err)
return
}
func (l *LibvirtConnection) LookupDomainByName(name string) (dom VirDomain, err error) {
if err = l.reconnectIfNecessary(); err != nil {
return
}
defer l.checkConnectionLost()
return l.Connect.LookupDomainByName(name)
domain, err := l.Connect.LookupDomainByName(name)
l.checkConnectionLost(err)
return domain, err
}
func (l *LibvirtConnection) DomainDefineXML(xml string) (dom VirDomain, err error) {
if err = l.reconnectIfNecessary(); err != nil {
return
}
defer l.checkConnectionLost()
dom, err = l.Connect.DomainDefineXML(xml)
l.checkConnectionLost(err)
return
}
func (l *LibvirtConnection) ListAllDomains(flags libvirt.ConnectListAllDomainsFlags) ([]VirDomain, error) {
if err := l.reconnectIfNecessary(); err != nil {
return nil, err
}
defer l.checkConnectionLost()
virDoms, err := l.Connect.ListAllDomains(flags)
if err != nil {
l.checkConnectionLost(err)
return nil, err
}
doms := make([]VirDomain, len(virDoms))
@@ -184,7 +185,7 @@ func (l *LibvirtConnection) installWatchdog(checkInterval time.Duration) {
l.reconnectLock.Unlock()
} else {
// Do the usual error check to determine if the connection is lost
l.checkConnectionLost()
l.checkConnectionLost(err)
}
}
}
@@ -214,16 +215,20 @@ func (l *LibvirtConnection) reconnectIfNecessary() (err error) {
return nil
}
func (l *LibvirtConnection) checkConnectionLost() {
func (l *LibvirtConnection) checkConnectionLost(err error) {
l.reconnectLock.Lock()
defer l.reconnectLock.Unlock()
err := libvirt.GetLastError()
if errors.IsOk(err) {
return
}
switch err.Code {
libvirtError, ok := err.(libvirt.Error)
if !ok {
return
}
switch libvirtError.Code {
case
libvirt.ERR_INTERNAL_ERROR,
libvirt.ERR_INVALID_CONN,
@@ -233,20 +238,20 @@ func (l *LibvirtConnection) checkConnectionLost() {
libvirt.ERR_SYSTEM_ERROR,
libvirt.ERR_RPC:
l.alive = false
log.Log.With("code", err.Code).Reason(err).Error("Connection to libvirt lost.")
log.Log.With("code", libvirtError.Code).Reason(libvirtError).Error("Connection to libvirt lost.")
}
}
type VirDomain interface {
GetState() (libvirt.DomainState, int, error)
Create() error
Resume() error
Destroy() error
Shutdown() error
DestroyFlags(flags libvirt.DomainDestroyFlags) error
ShutdownFlags(flags libvirt.DomainShutdownFlags) error
Undefine() error
GetName() (string, error)
GetUUIDString() (string, error)
GetXMLDesc(flags libvirt.DomainXMLFlags) (string, error)
Undefine() error
OpenConsole(devname string, stream *libvirt.Stream, flags libvirt.DomainConsoleFlags) error
Free() error
}
View
@@ -119,6 +119,28 @@ func (s *Launcher) Shutdown(args *cmdclient.Args, reply *cmdclient.Reply) error
return nil
}
func (s *Launcher) Delete(args *cmdclient.Args, reply *cmdclient.Reply) error {
reply.Success = true
vmi, err := getVmfromClientArgs(args)
if err != nil {
reply.Success = false
reply.Message = err.Error()
return nil
}
err = s.domainManager.DeleteVMI(vmi)
if err != nil {
log.Log.Object(vmi).Reason(err).Errorf("Failed to signal deletion for vmi")
reply.Success = false
reply.Message = err.Error()
return nil
}
log.Log.Object(vmi).Info("Signaled vmi deletion")
return nil
}
func (s *Launcher) GetDomain(args *cmdclient.Args, reply *cmdclient.Reply) error {
reply.Success = true
View
@@ -52,6 +52,16 @@ func (_mr *_MockDomainManagerRecorder) KillVMI(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "KillVMI", arg0)
}
func (_m *MockDomainManager) DeleteVMI(_param0 *v1.VirtualMachineInstance) error {
ret := _m.ctrl.Call(_m, "DeleteVMI", _param0)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockDomainManagerRecorder) DeleteVMI(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteVMI", arg0)
}
func (_m *MockDomainManager) SignalShutdownVMI(_param0 *v1.VirtualMachineInstance) error {
ret := _m.ctrl.Call(_m, "SignalShutdownVMI", _param0)
ret0, _ := ret[0].(error)
View
@@ -51,6 +51,7 @@ import (
type DomainManager interface {
SyncVMI(*v1.VirtualMachineInstance, bool) (*api.DomainSpec, error)
KillVMI(*v1.VirtualMachineInstance) error
DeleteVMI(*v1.VirtualMachineInstance) error
SignalShutdownVMI(*v1.VirtualMachineInstance) error
ListAllDomains() ([]*api.Domain, error)
}
@@ -246,7 +247,7 @@ func (l *LibvirtDomainManager) SignalShutdownVMI(vmi *v1.VirtualMachineInstance)
}
if domSpec.Metadata.KubeVirt.GracePeriod.DeletionTimestamp == nil {
err = dom.Shutdown()
err = dom.ShutdownFlags(libvirt.DOMAIN_SHUTDOWN_ACPI_POWER_BTN)
if err != nil {
log.Log.Object(vmi).Reason(err).Error("Signalling graceful shutdown failed.")
return err
@@ -289,8 +290,8 @@ func (l *LibvirtDomainManager) KillVMI(vmi *v1.VirtualMachineInstance) error {
return err
}
if domState == libvirt.DOMAIN_RUNNING || domState == libvirt.DOMAIN_PAUSED {
err = dom.Destroy()
if domState == libvirt.DOMAIN_RUNNING || domState == libvirt.DOMAIN_PAUSED || domState == libvirt.DOMAIN_SHUTDOWN {
err = dom.DestroyFlags(libvirt.DOMAIN_DESTROY_GRACEFUL)
if err != nil {
if domainerrors.IsNotFound(err) {
return nil
@@ -299,14 +300,30 @@ func (l *LibvirtDomainManager) KillVMI(vmi *v1.VirtualMachineInstance) error {
return err
}
log.Log.Object(vmi).Info("Domain stopped.")
return nil
}
err = dom.Undefine()
log.Log.Object(vmi).Info("Domain not running or paused, nothing to do.")
return nil
}
func (l *LibvirtDomainManager) DeleteVMI(vmi *v1.VirtualMachineInstance) error {
domName := api.VMINamespaceKeyFunc(vmi)
dom, err := l.virConn.LookupDomainByName(domName)
if err != nil {
// If the domain does not exist, we are done
if domainerrors.IsNotFound(err) {
return nil
} else {
log.Log.Object(vmi).Reason(err).Error("Getting the domain failed.")
return err
}
log.Log.Object(vmi).Reason(err).Error("Undefining the domain state failed.")
}
defer dom.Free()
err = dom.Undefine()
if err != nil {
log.Log.Object(vmi).Reason(err).Error("Undefining the domain failed.")
return err
}
log.Log.Object(vmi).Info("Domain undefined.")
View
@@ -139,27 +139,24 @@ var _ = Describe("Manager", func() {
table.DescribeTable("should try to undefine a VirtualMachineInstance in state",
func(state libvirt.DomainState) {
mockConn.EXPECT().LookupDomainByName(testDomainName).Return(mockDomain, nil)
mockDomain.EXPECT().GetState().Return(state, 1, nil)
mockDomain.EXPECT().Undefine().Return(nil)
manager, _ := NewLibvirtDomainManager(mockConn)
err := manager.KillVMI(newVMI(testNamespace, testVmName))
err := manager.DeleteVMI(newVMI(testNamespace, testVmName))
Expect(err).To(BeNil())
},
table.Entry("crashed", libvirt.DOMAIN_CRASHED),
table.Entry("shutdown", libvirt.DOMAIN_SHUTDOWN),
table.Entry("shutoff", libvirt.DOMAIN_SHUTOFF),
table.Entry("unknown", libvirt.DOMAIN_NOSTATE),
)
table.DescribeTable("should try to destroy and undefine a VirtualMachineInstance in state",
table.DescribeTable("should try to destroy a VirtualMachineInstance in state",
func(state libvirt.DomainState) {
mockConn.EXPECT().LookupDomainByName(testDomainName).Return(mockDomain, nil)
mockDomain.EXPECT().GetState().Return(state, 1, nil)
mockDomain.EXPECT().Destroy().Return(nil)
mockDomain.EXPECT().Undefine().Return(nil)
mockDomain.EXPECT().DestroyFlags(libvirt.DOMAIN_DESTROY_GRACEFUL).Return(nil)
manager, _ := NewLibvirtDomainManager(mockConn)
err := manager.KillVMI(newVMI(testNamespace, testVmName))
Expect(err).To(BeNil())
},
table.Entry("shuttingDown", libvirt.DOMAIN_SHUTDOWN),
table.Entry("running", libvirt.DOMAIN_RUNNING),
table.Entry("paused", libvirt.DOMAIN_PAUSED),
)
View
@@ -24,6 +24,7 @@ import (
"io"
"os"
"os/signal"
"time"
"golang.org/x/crypto/ssh/terminal"
@@ -35,6 +36,8 @@ import (
"kubevirt.io/kubevirt/pkg/virtctl/templates"
)
var timeout int
func NewCommand(clientConfig clientcmd.ClientConfig) *cobra.Command {
cmd := &cobra.Command{
Use: "console (vmi)",
@@ -46,6 +49,8 @@ func NewCommand(clientConfig clientcmd.ClientConfig) *cobra.Command {
return c.Run(cmd, args)
},
}
cmd.Flags().IntVar(&timeout, "timeout", 5, "The number of minutes to wait for the virtual machine instance to be ready.")
cmd.SetUsageTemplate(templates.UsageTemplate())
return cmd
}
@@ -55,8 +60,11 @@ type Console struct {
}
func usage() string {
usage := "# Connect to the console on VirtualMachineInstance 'myvmi':\n"
usage += "virtctl console myvmi"
usage := `# Connect to the console on VirtualMachineInstance 'myvmi':
virtctl console myvmi
# Configure one minute timeout (default 5 minutes)
virtctl console --timeout=1 myvmi`
return usage
}
@@ -73,15 +81,6 @@ func (c *Console) Run(cmd *cobra.Command, args []string) error {
return err
}
state, err := terminal.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
return fmt.Errorf("Make raw terminal failed: %s", err)
}
fmt.Fprint(os.Stderr, "Escape sequence is ^]\n")
in := os.Stdin
out := os.Stdout
stdinReader, stdinWriter := io.Pipe()
stdoutReader, stdoutWriter := io.Pipe()
@@ -93,10 +92,16 @@ func (c *Console) Run(cmd *cobra.Command, args []string) error {
writeStop := make(chan error)
readStop := make(chan error)
// Wait until the virtual machine is in running phase, user interrupt or timeout
runningChan := make(chan error)
waitInterrupt := make(chan os.Signal, 1)
signal.Notify(waitInterrupt, os.Interrupt)
go func() {
con, err := virtCli.VirtualMachineInstance(namespace).SerialConsole(vmi)
con, err := virtCli.VirtualMachineInstance(namespace).SerialConsole(vmi, time.Duration(timeout)*time.Minute)
runningChan <- err
if err != nil {
resChan <- err
return
}
@@ -106,6 +111,26 @@ func (c *Console) Run(cmd *cobra.Command, args []string) error {
})
}()
select {
case <-waitInterrupt:
// Make a new line in the terminal
fmt.Println()
return nil
case err = <-runningChan:
if err != nil {
return err
}
}
state, err := terminal.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
return fmt.Errorf("Make raw terminal failed: %s", err)
}
fmt.Fprint(os.Stderr, "Successfully connected to ", vmi, " console. The escape sequence is ^]\n")
in := os.Stdin
out := os.Stdout
go func() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
View
@@ -31,7 +31,7 @@ var loadBalancerIP string
var port int32
var nodePort int32
var strProtocol string
var intTargetPort int
var strTargetPort string
var strServiceType string
var portName string
var namespace string
@@ -41,8 +41,8 @@ func NewExposeCommand(clientConfig clientcmd.ClientConfig) *cobra.Command {
cmd := &cobra.Command{
Use: "expose TYPE NAME",
Short: "Expose a virtual machine as a new service.",
Long: `Looks up a virtual machine, offline virtual machine or virtual machine replica set by name and use its selector as the selector for a new service on the specified port.
A virtual machine replica set will be exposed as a service only if its selector is convertible to a selector that service supports, i.e. when the selector contains only the matchLabels component.
Long: `Looks up a virtual machine instance, virtual machine or virtual machine instance replica set by name and use its selector as the selector for a new service on the specified port.
A virtual machine instance replica set will be exposed as a service only if its selector is convertible to a selector that service supports, i.e. when the selector contains only the matchLabels component.
Note that if no port is specified via --port and the exposed resource has multiple ports, all will be re-used by the new service.
Also if no labels are specified, the new service will re-use the labels from the resource it exposes.
@@ -66,7 +66,7 @@ virtualmachineinstance (vmi), virtualmachine (vm), virtualmachineinstancereplica
cmd.Flags().Int32Var(&port, "port", 0, "The port that the service should serve on")
cmd.MarkFlagRequired("port")
cmd.Flags().StringVar(&strProtocol, "protocol", "TCP", "The network protocol for the service to be created.")
cmd.Flags().IntVar(&intTargetPort, "target-port", 0, "Name or number for the port on the VM that the service should direct traffic to. Optional.")
cmd.Flags().StringVar(&strTargetPort, "target-port", "", "Name or number for the port on the VM that the service should direct traffic to. Optional.")
cmd.Flags().Int32Var(&nodePort, "node-port", 0, "Port used to expose the service on each node in a cluster.")
cmd.Flags().StringVar(&strServiceType, "type", "ClusterIP", "Type for this service: ClusterIP, NodePort, or LoadBalancer.")
cmd.Flags().StringVar(&portName, "port-name", "", "Name of the port. Optional.")
@@ -76,14 +76,14 @@ virtualmachineinstance (vmi), virtualmachine (vm), virtualmachineinstancereplica
}
func usage() string {
usage := ` # Expose SSH to a virtual machine called 'myvm' as a node port (5555) of the cluster:
virtctl expose vm myvm --port=5555 --target-port=22 --name=myvm-ssh --type=NodePort")`
usage := ` # Expose SSH to a virtual machine instance called 'myvm' as a node port (5555) of the cluster:
virtctl expose vmi myvm --port=5555 --target-port=22 --name=myvm-ssh --type=NodePort")`
return usage
}
// executing the "expose" command
func (o *Command) RunE(cmd *cobra.Command, args []string) error {
// first argument is type of VM: VM, offline VM or replica set VM
// first argument is type of VM: VMI, VM or VMIRS
vmType := strings.ToLower(args[0])
// second argument must be name of the VM
vmName := args[1]
@@ -94,7 +94,7 @@ func (o *Command) RunE(cmd *cobra.Command, args []string) error {
var serviceType v1.ServiceType
// convert from integer to the IntOrString type
targetPort = intstr.FromInt(intTargetPort)
targetPort = intstr.Parse(strTargetPort)
// convert from string to the protocol enum
switch strProtocol {
@@ -139,30 +139,30 @@ func (o *Command) RunE(cmd *cobra.Command, args []string) error {
switch vmType {
case "vmi", "vmis", "virtualmachineinstance", "virtualmachineinstances":
// get the VM
vm, err := virtClient.VirtualMachineInstance(namespace).Get(vmName, &options)
vmi, err := virtClient.VirtualMachineInstance(namespace).Get(vmName, &options)
if err != nil {
return fmt.Errorf("error fetching VirtualMachineInstance: %v", err)
}
serviceSelector = vm.ObjectMeta.Labels
serviceSelector = vmi.ObjectMeta.Labels
// remove unwanted labels
delete(serviceSelector, "kubevirt.io/nodeName")
case "vm", "vms", "virtualmachine", "virtualmachines":
// get the offline VM
ovm, err := virtClient.VirtualMachine(namespace).Get(vmName, &options)
vm, err := virtClient.VirtualMachine(namespace).Get(vmName, &options)
if err != nil {
return fmt.Errorf("error fetching OfflineVirtual: %v", err)
}
serviceSelector = ovm.Spec.Template.ObjectMeta.Labels
serviceSelector = vm.Spec.Template.ObjectMeta.Labels
case "vmirs", "vmirss", "virtualmachineinstancereplicaset", "virtualmachineinstancereplicasets":
// get the VM replica set
vmrs, err := virtClient.ReplicaSet(namespace).Get(vmName, options)
vmirs, err := virtClient.ReplicaSet(namespace).Get(vmName, options)
if err != nil {
return fmt.Errorf("error fetching VirtualMachineInstance ReplicaSet: %v", err)
}
if len(vmrs.Spec.Selector.MatchExpressions) > 0 {
if len(vmirs.Spec.Selector.MatchExpressions) > 0 {
return fmt.Errorf("cannot expose VirtualMachineInstance ReplicaSet with match expressions")
}
serviceSelector = vmrs.Spec.Selector.MatchLabels
serviceSelector = vmirs.Spec.Selector.MatchLabels
default:
return fmt.Errorf("unsupported resource type: %s", vmType)
}
View
@@ -202,5 +202,12 @@ var _ = Describe("Expose", func() {
Expect(cmd()).NotTo(BeNil())
})
})
Context("With string target-port", func() {
It("should succeed", func() {
err := tests.NewRepeatableVirtctlCommand(expose.COMMAND_EXPOSE, "vmi", vmName, "--name", "my-service",
"--port", "9999", "--target-port", "http")
Expect(err()).To(BeNil())
})
})
})
})
View
@@ -30,6 +30,8 @@ import (
"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/tests"
k8sv1 "k8s.io/api/core/v1"
)
var _ = Describe("Console", func() {
@@ -47,10 +49,9 @@ var _ = Describe("Console", func() {
By("Creating a new VirtualMachineInstance")
Expect(virtClient.RestClient().Post().Resource("virtualmachineinstances").Namespace(tests.NamespaceTestDefault).Body(vmi).Do().Error()).To(Succeed())
tests.WaitForSuccessfulVMIStart(vmi)
By("Expecting the VirtualMachineInstance console")
expecter, _, err := tests.NewConsoleExpecter(virtClient, vmi, 10*time.Second)
expecter, _, err := tests.NewConsoleExpecter(virtClient, vmi, 30*time.Second)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
@@ -88,7 +89,6 @@ var _ = Describe("Console", func() {
By("Creating a new VirtualMachineInstance")
Expect(virtClient.RestClient().Post().Resource("virtualmachineinstances").Namespace(tests.NamespaceTestDefault).Body(vmi).Do().Error()).To(Succeed())
tests.WaitForSuccessfulVMIStart(vmi)
for i := 0; i < 5; i++ {
By("Checking that the console output equals to expected one")
@@ -100,6 +100,61 @@ var _ = Describe("Console", func() {
Expect(err).ToNot(HaveOccurred())
}
}, 220)
It("should wait until the virtual machine is in running state and return a stream interface", func() {
vmi := tests.NewRandomVMIWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskAlpine))
By("Creating a new VirtualMachineInstance")
Expect(virtClient.RestClient().Post().Resource("virtualmachineinstances").Namespace(tests.NamespaceTestDefault).Body(vmi).Do().Error()).To(Succeed())
_, err := virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, 30*time.Second)
Expect(err).ToNot(HaveOccurred())
}, 220)
It("should fail waiting for the virtual machine instance to be running", func() {
vmi := tests.NewRandomVMIWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskAlpine))
vmi.Spec.Affinity = &k8sv1.Affinity{
NodeAffinity: &k8sv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{
NodeSelectorTerms: []k8sv1.NodeSelectorTerm{
{
MatchExpressions: []k8sv1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, Values: []string{"notexist"}},
},
},
},
},
},
}
By("Creating a new VirtualMachineInstance")
Expect(virtClient.RestClient().Post().Resource("virtualmachineinstances").Namespace(tests.NamespaceTestDefault).Body(vmi).Do().Error()).To(Succeed())
_, err := virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, 30*time.Second)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Timeout trying to connect to the virtual machine instance"))
}, 180)
It("should fail waiting for the expecter", func() {
vmi := tests.NewRandomVMIWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskAlpine))
vmi.Spec.Affinity = &k8sv1.Affinity{
NodeAffinity: &k8sv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{
NodeSelectorTerms: []k8sv1.NodeSelectorTerm{
{
MatchExpressions: []k8sv1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, Values: []string{"notexist"}},
},
},
},
},
},
}
By("Creating a new VirtualMachineInstance")
Expect(virtClient.RestClient().Post().Resource("virtualmachineinstances").Namespace(tests.NamespaceTestDefault).Body(vmi).Do().Error()).To(Succeed())
By("Expecting the VirtualMachineInstance console")
_, _, err := tests.NewConsoleExpecter(virtClient, vmi, 30*time.Second)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Timeout trying to connect to the virtual machine instance"))
}, 180)
})
})
})
Oops, something went wrong.