View
@@ -58,8 +58,12 @@ spec:
cores:
format: int64
type: integer
model:
type: string
devices:
properties:
autoattachGraphicsDevice:
type: boolean
disks:
items:
properties:
@@ -194,6 +198,8 @@ spec:
properties:
limits:
type: object
overcommitGuestOverhead:
type: boolean
requests:
type: object
required:
View
@@ -1,6 +1,6 @@
# Virtual Machine Presets
apiVersion: kubevirt.io/v1alpha2
kind: VirtualMachinePreset
kind: VirtualMachineInstancePreset
metadata:
name: windows-server-2012r2
selector:
View
@@ -5,6 +5,7 @@ metadata:
name: kubevirt.io:admin
labels:
kubevirt.io: ""
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- subresources.kubevirt.io
@@ -47,6 +48,7 @@ metadata:
name: kubevirt.io:edit
labels:
kubevirt.io: ""
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- subresources.kubevirt.io
@@ -77,6 +79,7 @@ metadata:
name: kubevirt.io:view
labels:
kubevirt.io: ""
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups:
- kubevirt.io
@@ -391,7 +394,7 @@ spec:
image: {{.DockerPrefix}}/virt-api:{{.DockerTag}}
imagePullPolicy: IfNotPresent
command:
- "/virt-api"
- "virt-api"
- "--port"
- "8443"
- "--subresources-only"
@@ -428,7 +431,7 @@ spec:
image: {{.DockerPrefix}}/virt-controller:{{.DockerTag}}
imagePullPolicy: IfNotPresent
command:
- "/virt-controller"
- "virt-controller"
- "--launcher-image"
- "{{.DockerPrefix}}/virt-launcher:{{.DockerTag}}"
- "--port"
@@ -480,7 +483,7 @@ spec:
image: {{.DockerPrefix}}/virt-handler:{{.DockerTag}}
imagePullPolicy: IfNotPresent
command:
- "/virt-handler"
- "virt-handler"
- "-v"
- "3"
- "--hostname-override"
View
@@ -246,6 +246,15 @@ func (in *Devices) DeepCopyInto(out *Devices) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AutoattachGraphicsDevice != nil {
in, out := &in.AutoattachGraphicsDevice, &out.AutoattachGraphicsDevice
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
return
}
View
@@ -98,6 +98,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
Format: "int64",
},
},
"model": {
SchemaProps: spec.SchemaProps{
Description: "Model specifies the CPU model inside the VMI. List of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml. You also can specify special cases like \"host-passthrough\" to get the same CPU as the node and \"host-model\" to get CPU closest to the node one. You can find more information under https://libvirt.org/formatdomain.html#elementsCPU. Defaults to host-model.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
@@ -235,6 +242,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
},
},
},
"autoattachGraphicsDevice": {
SchemaProps: spec.SchemaProps{
Description: "Wheater to attach the default graphics device or not. VNC will not be available if set to false. Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
@@ -1016,6 +1030,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
},
},
},
"overcommitGuestOverhead": {
SchemaProps: spec.SchemaProps{
Description: "Don't ask the scheduler to take the guest-management overhead into account. Instead put the overhead only into the requested memory limits. This can lead to crashes if all memory is in use on a node. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
View
@@ -86,6 +86,10 @@ type ResourceRequirements struct {
// Valid resource keys are "memory" and "cpu".
// +optional
Limits v1.ResourceList `json:"limits,omitempty"`
// Don't ask the scheduler to take the guest-management overhead into account. Instead
// put the overhead only into the requested memory limits. This can lead to crashes if
// all memory is in use on a node. Defaults to false.
OvercommitGuestOverhead bool `json:"overcommitGuestOverhead,omitempty"`
}
// CPU allow specifying the CPU topology
@@ -95,6 +99,14 @@ type CPU struct {
// Cores specifies the number of cores inside the vmi.
// Must be a value greater or equal 1.
Cores uint32 `json:"cores,omitempty"`
// Model specifies the CPU model inside the VMI.
// List of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml.
// You also can specify special cases like "host-passthrough" to get the same CPU as the node
// and "host-model" to get CPU closest to the node one.
// You can find more information under https://libvirt.org/formatdomain.html#elementsCPU.
// Defaults to host-model.
// +optional
Model string `json:"model,omitempty"`
}
// Memory allow specifying the VirtualMachineInstance memory features
@@ -138,6 +150,9 @@ type Devices struct {
Watchdog *Watchdog `json:"watchdog,omitempty"`
// Interfaces describe network interfaces which are added to the vm
Interfaces []Interface `json:"interfaces,omitempty"`
// Wheater to attach the default graphics device or not.
// VNC will not be available if set to false. Defaults to true.
AutoattachGraphicsDevice *bool `json:"autoattachGraphicsDevice,omitempty"`
}
// ---
View
@@ -26,15 +26,17 @@ func (DomainSpec) SwaggerDoc() map[string]string {
func (ResourceRequirements) SwaggerDoc() map[string]string {
return map[string]string{
"requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional",
"overcommitGuestOverhead": "Don't ask the scheduler to take the guest-management overhead into account. Instead\nput the overhead only into the requested memory limits. This can lead to crashes if\nall memory is in use on a node. Defaults to false.",
}
}
func (CPU) SwaggerDoc() map[string]string {
return map[string]string{
"": "CPU allow specifying the CPU topology",
"cores": "Cores specifies the number of cores inside the vmi.\nMust be a value greater or equal 1.",
"model": "Model specifies the CPU model inside the VMI.\nList of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml.\nYou also can specify special cases like \"host-passthrough\" to get the same CPU as the node\nand \"host-model\" to get CPU closest to the node one.\nYou can find more information under https://libvirt.org/formatdomain.html#elementsCPU.\nDefaults to host-model.\n+optional",
}
}
@@ -66,9 +68,10 @@ func (Firmware) SwaggerDoc() map[string]string {
func (Devices) SwaggerDoc() map[string]string {
return map[string]string{
"disks": "Disks describes disks, cdroms, floppy and luns which are connected to the vmi",
"watchdog": "Watchdog describes a watchdog device which can be added to the vmi",
"interfaces": "Interfaces describe network interfaces which are added to the vm",
"disks": "Disks describes disks, cdroms, floppy and luns which are connected to the vmi",
"watchdog": "Watchdog describes a watchdog device which can be added to the vmi",
"interfaces": "Interfaces describe network interfaces which are added to the vm",
"autoattachGraphicsDevice": "Wheater to attach the default graphics device or not.\nVNC will not be available if set to false. Defaults to true.",
}
}
View
@@ -44,7 +44,8 @@ var exampleJSON = `{
}
},
"cpu": {
"cores": 3
"cores": 3,
"model": "Conroe"
},
"machine": {
"type": "q35"
@@ -296,6 +297,7 @@ var _ = Describe("Schema", func() {
}
exampleVMI.Spec.Domain.CPU = &CPU{
Cores: 3,
Model: "Conroe",
}
exampleVMI.Spec.Domain.Devices.Interfaces = []Interface{
Interface{
View
@@ -47,12 +47,9 @@ type SubresourceAPIApp struct {
VirtCli kubecli.KubevirtClient
}
func (app *SubresourceAPIApp) requestHandler(request *restful.Request, response *restful.Response, cmd []string) {
func (app *SubresourceAPIApp) requestHandler(request *restful.Request, response *restful.Response, vmi *v1.VirtualMachineInstance, cmd []string) {
vmiName := request.PathParameter("name")
namespace := request.PathParameter("namespace")
podName, httpStatusCode, err := app.remoteExecInfo(vmiName, namespace)
podName, httpStatusCode, err := app.remoteExecInfo(vmi)
if err != nil {
log.Log.Reason(err).Error("Failed to gather remote exec info for subresource request.")
response.WriteError(httpStatusCode, err)
@@ -81,7 +78,7 @@ func (app *SubresourceAPIApp) requestHandler(request *restful.Request, response
httpResponseChan := make(chan int)
copyErr := make(chan error)
go func() {
httpCode, err := remoteExecHelper(podName, namespace, cmd, inReader, outWriter)
httpCode, err := remoteExecHelper(podName, vmi.Namespace, cmd, inReader, outWriter)
log.Log.Errorf("%v", err)
httpResponseChan <- httpCode
}()
@@ -114,18 +111,39 @@ func (app *SubresourceAPIApp) VNCRequestHandler(request *restful.Request, respon
vmiName := request.PathParameter("name")
namespace := request.PathParameter("namespace")
cmd := []string{"/usr/share/kubevirt/virt-launcher/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-%s", namespace, vmiName, "vnc")}
vmi, code, err := app.fetchVirtualMachineInstance(vmiName, namespace)
if err != nil {
log.Log.Reason(err).Error("Failed to gather remote exec info for subresource request.")
response.WriteError(code, err)
return
}
// If there are no graphics devices present, we can't proceed
if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice != nil && *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == false {
err := fmt.Errorf("No graphics devices are present.")
log.Log.Reason(err).Error("Can't establish VNC connection.")
response.WriteError(http.StatusBadRequest, err)
return
}
cmd := []string{"/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-%s", namespace, vmiName, "vnc")}
app.requestHandler(request, response, cmd)
app.requestHandler(request, response, vmi, cmd)
}
func (app *SubresourceAPIApp) ConsoleRequestHandler(request *restful.Request, response *restful.Response) {
vmiName := request.PathParameter("name")
namespace := request.PathParameter("namespace")
cmd := []string{"/usr/share/kubevirt/virt-launcher/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-%s", namespace, vmiName, "serial0")}
cmd := []string{"/sock-connector", fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-%s", namespace, vmiName, "serial0")}
vmi, code, err := app.fetchVirtualMachineInstance(vmiName, namespace)
if err != nil {
log.Log.Reason(err).Error("Failed to gather remote exec info for subresource request.")
response.WriteError(code, err)
return
}
app.requestHandler(request, response, cmd)
app.requestHandler(request, response, vmi, cmd)
}
func (app *SubresourceAPIApp) findPod(namespace string, name string) (string, error) {
@@ -147,22 +165,26 @@ func (app *SubresourceAPIApp) findPod(namespace string, name string) (string, er
return podList.Items[0].ObjectMeta.Name, nil
}
func (app *SubresourceAPIApp) remoteExecInfo(name string, namespace string) (string, int, error) {
podName := ""
func (app *SubresourceAPIApp) fetchVirtualMachineInstance(name string, namespace string) (*v1.VirtualMachineInstance, int, error) {
vmi, err := app.VirtCli.VirtualMachineInstance(namespace).Get(name, &k8smetav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return "", http.StatusNotFound, goerror.New(fmt.Sprintf("VirtualMachineInstance %s in namespace %s not found.", name, namespace))
return nil, http.StatusNotFound, goerror.New(fmt.Sprintf("VirtualMachineInstance %s in namespace %s not found.", name, namespace))
}
return podName, http.StatusInternalServerError, err
return nil, http.StatusInternalServerError, err
}
return vmi, 0, nil
}
func (app *SubresourceAPIApp) remoteExecInfo(vmi *v1.VirtualMachineInstance) (string, int, error) {
podName := ""
if vmi.IsRunning() == false {
return podName, http.StatusBadRequest, goerror.New(fmt.Sprintf("Unable to connect to VirtualMachineInstance because phase is %s instead of %s", vmi.Status.Phase, v1.Running))
}
podName, err = app.findPod(namespace, name)
podName, err := app.findPod(vmi.Namespace, vmi.Name)
if err != nil {
return podName, http.StatusBadRequest, fmt.Errorf("unable to find matching pod for remote execution: %v", err)
}
View
@@ -67,17 +67,13 @@ var _ = Describe("VirtualMachineInstance Subresources", func() {
podList.Items = append(podList.Items, *pod)
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/apis/kubevirt.io/v1alpha2/namespaces/default/virtualmachineinstances/testvmi"),
ghttp.RespondWithJSONEncoded(http.StatusOK, vmi),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/api/v1/namespaces/default/pods"),
ghttp.RespondWithJSONEncoded(http.StatusOK, podList),
),
)
podName, httpStatusCode, err := app.remoteExecInfo("testvmi", "default")
podName, httpStatusCode, err := app.remoteExecInfo(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(podName).To(Equal("madeup-name"))
@@ -90,14 +86,7 @@ var _ = Describe("VirtualMachineInstance Subresources", func() {
vmi.Status.Phase = v1.Succeeded
vmi.ObjectMeta.SetUID(uuid.NewUUID())
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/apis/kubevirt.io/v1alpha2/namespaces/default/virtualmachineinstances/testvmi"),
ghttp.RespondWithJSONEncoded(http.StatusOK, vmi),
),
)
_, httpStatusCode, err := app.remoteExecInfo("testvmi", "default")
_, httpStatusCode, err := app.remoteExecInfo(vmi)
Expect(err).To(HaveOccurred())
Expect(httpStatusCode).To(Equal(http.StatusBadRequest))
@@ -113,17 +102,13 @@ var _ = Describe("VirtualMachineInstance Subresources", func() {
podList.Items = []k8sv1.Pod{}
server.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/apis/kubevirt.io/v1alpha2/namespaces/default/virtualmachineinstances/testvmi"),
ghttp.RespondWithJSONEncoded(http.StatusOK, vmi),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/api/v1/namespaces/default/pods"),
ghttp.RespondWithJSONEncoded(http.StatusOK, podList),
),
)
_, httpStatusCode, err := app.remoteExecInfo("testvmi", "default")
_, httpStatusCode, err := app.remoteExecInfo(vmi)
Expect(err).To(HaveOccurred())
Expect(httpStatusCode).To(Equal(http.StatusBadRequest))
View
@@ -195,7 +195,9 @@ func (t *templateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (
} else {
// Add overhead memory
memoryRequest := resources.Requests[k8sv1.ResourceMemory]
memoryRequest.Add(*memoryOverhead)
if !vmi.Spec.Domain.Resources.OvercommitGuestOverhead {
memoryRequest.Add(*memoryOverhead)
}
resources.Requests[k8sv1.ResourceMemory] = memoryRequest
if memoryLimit, ok := resources.Limits[k8sv1.ResourceMemory]; ok {
@@ -204,7 +206,7 @@ func (t *templateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (
}
}
command := []string{"/entrypoint.sh",
command := []string{"/usr/share/kubevirt/virt-launcher/entrypoint.sh",
"--qemu-timeout", "5m",
"--name", domain,
"--namespace", namespace,
@@ -379,7 +381,9 @@ func getMemoryOverhead(domain v1.DomainSpec) *resource.Quantity {
overhead.Add(resource.MustParse("8Mi"))
// Add video RAM overhead
overhead.Add(resource.MustParse("16Mi"))
if domain.Devices.AutoattachGraphicsDevice == nil || *domain.Devices.AutoattachGraphicsDevice == true {
overhead.Add(resource.MustParse("16Mi"))
}
return overhead
}
View
@@ -64,7 +64,7 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.NodeSelector).To(Equal(map[string]string{
v1.NodeSchedulable: "true",
}))
Expect(pod.Spec.Containers[0].Command).To(Equal([]string{"/entrypoint.sh",
Expect(pod.Spec.Containers[0].Command).To(Equal([]string{"/usr/share/kubevirt/virt-launcher/entrypoint.sh",
"--qemu-timeout", "5m",
"--name", "testvmi",
"--namespace", "testns",
@@ -99,7 +99,7 @@ var _ = Describe("Template", func() {
"kubernetes.io/hostname": "master",
v1.NodeSchedulable: "true",
}))
Expect(pod.Spec.Containers[0].Command).To(Equal([]string{"/entrypoint.sh",
Expect(pod.Spec.Containers[0].Command).To(Equal([]string{"/usr/share/kubevirt/virt-launcher/entrypoint.sh",
"--qemu-timeout", "5m",
"--name", "testvmi",
"--namespace", "default",
@@ -247,6 +247,35 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().String()).To(Equal("1099507557"))
Expect(pod.Spec.Containers[0].Resources.Limits.Memory().String()).To(Equal("2099507557"))
})
It("should overcommit guest overhead if selected, by only adding the overhead to memory limits", func() {
vmi := v1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "testvmi",
Namespace: "default",
UID: "1234",
},
Spec: v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
Resources: v1.ResourceRequirements{
OvercommitGuestOverhead: true,
Requests: kubev1.ResourceList{
kubev1.ResourceMemory: resource.MustParse("1G"),
},
Limits: kubev1.ResourceList{
kubev1.ResourceMemory: resource.MustParse("2G"),
},
},
},
},
}
pod, err := svc.RenderLaunchManifest(&vmi)
Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().String()).To(Equal("1G"))
Expect(pod.Spec.Containers[0].Resources.Limits.Memory().String()).To(Equal("2099507557"))
})
It("should not add unset resources", func() {
vmi := v1.VirtualMachineInstance{
@@ -276,6 +305,40 @@ var _ = Describe("Template", func() {
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().ToDec().ScaledValue(resource.Mega)).To(Equal(int64(179)))
Expect(pod.Spec.Containers[0].Resources.Limits).To(BeNil())
})
table.DescribeTable("should check autoattachGraphicsDevicse", func(autoAttach *bool, memory int) {
vmi := v1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "testvmi",
Namespace: "default",
UID: "1234",
},
Spec: v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
CPU: &v1.CPU{Cores: 3},
Resources: v1.ResourceRequirements{
Requests: kubev1.ResourceList{
kubev1.ResourceCPU: resource.MustParse("1m"),
kubev1.ResourceMemory: resource.MustParse("64M"),
},
},
},
},
}
vmi.Spec.Domain.Devices = v1.Devices{
AutoattachGraphicsDevice: autoAttach,
}
pod, err := svc.RenderLaunchManifest(&vmi)
Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec.Containers[0].Resources.Requests.Memory().ToDec().ScaledValue(resource.Mega)).To(Equal(int64(memory)))
},
table.Entry("and consider graphics overhead if it is not set", nil, 179),
table.Entry("and consider graphics overhead if it is set to true", True(), 179),
table.Entry("and not consider graphics overhead if it is set to false", False(), 162),
)
})
Context("with hugepages constraints", func() {
@@ -508,3 +571,13 @@ func MakeFakeConfigMapWatcher(configMaps []kubev1.ConfigMap) *cache.ListWatch {
}
return cmListWatch
}
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
View
@@ -7,7 +7,6 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
// "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
View
@@ -43,7 +43,7 @@ type OnShutdownCallback func(pid int)
type monitor struct {
timeout time.Duration
pid int
commandPrefix string
cmdlineMatchStr string
start time.Time
isDone bool
gracePeriod int
@@ -167,12 +167,12 @@ func InitializeSharedDirectories(baseDir string) error {
return nil
}
func NewProcessMonitor(commandPrefix string,
func NewProcessMonitor(cmdlineMatchStr string,
gracefulShutdownTriggerFile string,
gracePeriod int,
shutdownCallback OnShutdownCallback) ProcessMonitor {
return &monitor{
commandPrefix: commandPrefix,
cmdlineMatchStr: cmdlineMatchStr,
gracePeriod: gracePeriod,
gracefulShutdownTriggerFile: gracefulShutdownTriggerFile,
shutdownCallback: shutdownCallback,
@@ -195,31 +195,31 @@ func (mon *monitor) refresh() {
return
}
log.Log.V(4).Infof("Refreshing. CommandPrefix %s pid %d", mon.commandPrefix, mon.pid)
log.Log.V(4).Infof("Refreshing. CommandPrefix %s pid %d", mon.cmdlineMatchStr, mon.pid)
expired := mon.isGracePeriodExpired()
// is the process there?
if mon.pid == 0 {
var err error
mon.pid, err = findPid(mon.commandPrefix)
mon.pid, err = findPid(mon.cmdlineMatchStr)
if err != nil {
log.Log.Infof("Still missing PID for %s, %v", mon.commandPrefix, err)
log.Log.Infof("Still missing PID for %s, %v", mon.cmdlineMatchStr, err)
// check to see if we've timed out looking for the process
elapsed := time.Since(mon.start)
if mon.timeout > 0 && elapsed >= mon.timeout {
log.Log.Infof("%s not found after timeout", mon.commandPrefix)
log.Log.Infof("%s not found after timeout", mon.cmdlineMatchStr)
mon.isDone = true
} else if expired {
log.Log.Infof("%s not found after grace period expired", mon.commandPrefix)
log.Log.Infof("%s not found after grace period expired", mon.cmdlineMatchStr)
mon.isDone = true
}
return
}
log.Log.Infof("Found PID for %s: %d", mon.commandPrefix, mon.pid)
log.Log.Infof("Found PID for %s: %d", mon.cmdlineMatchStr, mon.pid)
}
exists, err := pidExists(mon.pid)
@@ -228,7 +228,7 @@ func (mon *monitor) refresh() {
return
}
if exists == false {
log.Log.Infof("Process %s and pid %d is gone!", mon.commandPrefix, mon.pid)
log.Log.Infof("Process %s and pid %d is gone!", mon.cmdlineMatchStr, mon.pid)
mon.pid = 0
mon.isDone = true
return
@@ -322,14 +322,12 @@ func findPid(commandNamePrefix string) (int, error) {
}
for _, entry := range entries {
argv, err := readProcCmdline(entry)
content, err := ioutil.ReadFile(entry)
if err != nil {
return 0, err
}
match, _ := filepath.Match(fmt.Sprintf("%s*", commandNamePrefix), filepath.Base(argv[0]))
// command prefix does not match
if !match {
if !strings.Contains(string(content), commandNamePrefix) {
continue
}
View
@@ -37,6 +37,8 @@ var _ = Describe("VirtLauncher", func() {
var mon *monitor
var cmd *exec.Cmd
uuid := "123-123-123-123"
tmpDir, _ := ioutil.TempDir("", "monitortest")
log.Log.SetIOWriter(GinkgoWriter)
@@ -50,7 +52,7 @@ var _ = Describe("VirtLauncher", func() {
processStarted := false
StartProcess := func() {
cmd = exec.Command(processPath)
cmd = exec.Command(processPath, "--uuid", uuid)
err := cmd.Start()
Expect(err).ToNot(HaveOccurred())
@@ -102,7 +104,7 @@ var _ = Describe("VirtLauncher", func() {
syscall.Kill(pid, syscall.SIGTERM)
}
mon = &monitor{
commandPrefix: "fake-qemu",
cmdlineMatchStr: uuid,
gracePeriod: 30,
gracefulShutdownTriggerFile: triggerFile,
shutdownCallback: shutdownCallback,
View
@@ -38,6 +38,11 @@ import (
"kubevirt.io/kubevirt/pkg/registry-disk"
)
const (
CPUModeHostPassthrough = "host-passthrough"
CPUModeHostModel = "host-model"
)
type ConverterContext struct {
AllowEmulation bool
Secrets map[string]*k8sv1.Secret
@@ -439,17 +444,34 @@ func Convert_v1_VirtualMachine_To_api_Domain(vmi *v1.VirtualMachineInstance, dom
}
if vmi.Spec.Domain.CPU != nil {
domain.Spec.CPU.Topology = &CPUTopology{
Sockets: 1,
Cores: vmi.Spec.Domain.CPU.Cores,
Threads: 1,
// Set VM CPU cores
if vmi.Spec.Domain.CPU.Cores != 0 {
domain.Spec.CPU.Topology = &CPUTopology{
Sockets: 1,
Cores: vmi.Spec.Domain.CPU.Cores,
Threads: 1,
}
domain.Spec.VCPU = &VCPU{
Placement: "static",
CPUs: vmi.Spec.Domain.CPU.Cores,
}
}
domain.Spec.VCPU = &VCPU{
Placement: "static",
CPUs: vmi.Spec.Domain.CPU.Cores,
// Set VM CPU model and vendor
if vmi.Spec.Domain.CPU.Model != "" {
if vmi.Spec.Domain.CPU.Model == CPUModeHostModel || vmi.Spec.Domain.CPU.Model == CPUModeHostPassthrough {
domain.Spec.CPU.Mode = vmi.Spec.Domain.CPU.Model
} else {
domain.Spec.CPU.Mode = "custom"
domain.Spec.CPU.Model = vmi.Spec.Domain.CPU.Model
}
}
}
if vmi.Spec.Domain.CPU == nil || vmi.Spec.Domain.CPU.Model == "" {
domain.Spec.CPU.Mode = CPUModeHostModel
}
// Add mandatory console device
var serialPort uint = 0
var serialType string = "serial"
@@ -476,15 +498,27 @@ func Convert_v1_VirtualMachine_To_api_Domain(vmi *v1.VirtualMachineInstance, dom
},
}
// Add mandatory vnc device
domain.Spec.Devices.Graphics = []Graphics{
{
Listen: &GraphicsListen{
Type: "socket",
Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-vnc", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name),
if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == nil || *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == true {
var heads uint = 1
var vram uint = 16384
domain.Spec.Devices.Video = []Video{
{
Model: VideoModel{
Type: "vga",
Heads: &heads,
VRam: &vram,
},
},
Type: "vnc",
},
}
domain.Spec.Devices.Graphics = []Graphics{
{
Listen: &GraphicsListen{
Type: "socket",
Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/%s/virt-vnc", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name),
},
Type: "vnc",
},
}
}
// Add mandatory interface
View
@@ -25,6 +25,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo/extensions/table"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
k8smeta "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -417,7 +419,7 @@ var _ = Describe("Converter", func() {
<vendor_id state="off" value="myvendor"></vendor_id>
</hyperv>
</features>
<cpu></cpu>
<cpu mode="host-model"></cpu>
</domain>`, domainType)
var c *ConverterContext
@@ -446,17 +448,56 @@ var _ = Describe("Converter", func() {
Expect(vmiToDomainXMLToDomainSpec(vmi, c).Type).To(Equal(domainType))
})
It("should convert CPU cores", func() {
It("should convert CPU cores and model", func() {
v1.SetObjectDefaults_VirtualMachineInstance(vmi)
vmi.Spec.Domain.CPU = &v1.CPU{
Cores: 3,
Model: "Conroe",
}
Expect(vmiToDomainXMLToDomainSpec(vmi, c).CPU.Topology.Cores).To(Equal(uint32(3)))
Expect(vmiToDomainXMLToDomainSpec(vmi, c).CPU.Topology.Sockets).To(Equal(uint32(1)))
Expect(vmiToDomainXMLToDomainSpec(vmi, c).CPU.Topology.Threads).To(Equal(uint32(1)))
Expect(vmiToDomainXMLToDomainSpec(vmi, c).VCPU.Placement).To(Equal("static"))
Expect(vmiToDomainXMLToDomainSpec(vmi, c).VCPU.CPUs).To(Equal(uint32(3)))
domainSpec := vmiToDomainXMLToDomainSpec(vmi, c)
Expect(domainSpec.CPU.Topology.Cores).To(Equal(uint32(3)))
Expect(domainSpec.CPU.Topology.Sockets).To(Equal(uint32(1)))
Expect(domainSpec.CPU.Topology.Threads).To(Equal(uint32(1)))
Expect(domainSpec.CPU.Mode).To(Equal("custom"))
Expect(domainSpec.CPU.Model).To(Equal("Conroe"))
Expect(domainSpec.VCPU.Placement).To(Equal("static"))
Expect(domainSpec.VCPU.CPUs).To(Equal(uint32(3)))
})
table.DescribeTable("should convert CPU model", func(model string) {
v1.SetObjectDefaults_VirtualMachineInstance(vmi)
vmi.Spec.Domain.CPU = &v1.CPU{
Cores: 3,
Model: model,
}
domainSpec := vmiToDomainXMLToDomainSpec(vmi, c)
Expect(domainSpec.CPU.Mode).To(Equal(model))
},
table.Entry(CPUModeHostPassthrough, CPUModeHostPassthrough),
table.Entry(CPUModeHostModel, CPUModeHostModel),
)
Context("when CPU spec defined and model not", func() {
It("should set host-model CPU mode", func() {
v1.SetObjectDefaults_VirtualMachineInstance(vmi)
vmi.Spec.Domain.CPU = &v1.CPU{
Cores: 3,
}
domainSpec := vmiToDomainXMLToDomainSpec(vmi, c)
Expect(domainSpec.CPU.Mode).To(Equal("host-model"))
})
})
Context("when CPU spec not defined", func() {
It("should set host-model CPU mode", func() {
v1.SetObjectDefaults_VirtualMachineInstance(vmi)
domainSpec := vmiToDomainXMLToDomainSpec(vmi, c)
Expect(domainSpec.CPU.Mode).To(Equal("host-model"))
})
})
It("should select explicitly chosen network model", func() {
@@ -535,6 +576,42 @@ var _ = Describe("Converter", func() {
Expect(Convert_v1_VirtualMachine_To_api_Domain(vmi, &Domain{}, c)).ToNot(Succeed())
})
})
Context("graphics and video device", func() {
table.DescribeTable("should check autoattachGraphicsDevicse", func(autoAttach *bool, devices int) {
vmi := v1.VirtualMachineInstance{
ObjectMeta: k8smeta.ObjectMeta{
Name: "testvmi",
Namespace: "default",
UID: "1234",
},
Spec: v1.VirtualMachineInstanceSpec{
Domain: v1.DomainSpec{
CPU: &v1.CPU{Cores: 3},
Resources: v1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("1m"),
k8sv1.ResourceMemory: resource.MustParse("64M"),
},
},
},
},
}
vmi.Spec.Domain.Devices = v1.Devices{
AutoattachGraphicsDevice: autoAttach,
}
domain := vmiToDomain(&vmi, &ConverterContext{AllowEmulation: true})
Expect(domain.Spec.Devices.Video).To(HaveLen(devices))
Expect(domain.Spec.Devices.Graphics).To(HaveLen(devices))
},
table.Entry("and add the graphics and video device if it is not set", nil, 1),
table.Entry("and add the graphics and video device if it is set to true", True(), 1),
table.Entry("and not add the graphics and video device if it is set to false", False(), 0),
)
})
})
func diskToDiskXML(disk *v1.Disk) string {
@@ -572,3 +649,13 @@ func xmlToDomainSpec(data string) *DomainSpec {
func vmiToDomainXMLToDomainSpec(vmi *v1.VirtualMachineInstance, c *ConverterContext) *DomainSpec {
return xmlToDomainSpec(vmiToDomainXML(vmi, c))
}
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
View
@@ -2,23 +2,6 @@ package api
const DefaultBridgeName = "br1"
func SetDefaults_Devices(devices *Devices) {
// Use vga as video device, since it is better than cirrus
// and does not require guest drivers
var heads uint = 1
var vram uint = 16384
devices.Video = []Video{
{
Model: VideoModel{
Type: "vga",
Heads: &heads,
VRam: &vram,
},
},
}
}
func SetDefaults_OSType(ostype *OSType) {
ostype.OS = "hvm"
View
@@ -121,6 +121,7 @@ type VCPU struct {
type CPU struct {
Mode string `xml:"mode,attr,omitempty"`
Model string `xml:"model,omitempty"`
Topology *CPUTopology `xml:"topology"`
}
View
@@ -73,7 +73,8 @@ var exampleXML = `<domain type="kvm" xmlns:qemu="http://libvirt.org/schemas/doma
<features>
<acpi></acpi>
</features>
<cpu>
<cpu mode="custom">
<model>Conroe</model>
<topology sockets="1" cores="2" threads="1"></topology>
</cpu>
<vcpu placement="static">2</vcpu>
@@ -143,6 +144,8 @@ var _ = Describe("Schema", func() {
Placement: "static",
CPUs: 2,
}
exampleDomain.Spec.CPU.Mode = "custom"
exampleDomain.Spec.CPU.Model = "Conroe"
exampleDomain.Spec.Metadata.KubeVirt.UID = "f4686d2c-6e8d-4335-b8fd-81bee22f4814"
exampleDomain.Spec.Metadata.KubeVirt.GracePeriod.DeletionGracePeriodSeconds = 5
View
@@ -39,7 +39,6 @@ func SetObjectDefaults_Domain(in *Domain) {
if in.Spec.SysInfo != nil {
SetDefaults_SysInfo(in.Spec.SysInfo)
}
SetDefaults_Devices(&in.Spec.Devices)
}
func SetObjectDefaults_DomainList(in *DomainList) {
View
@@ -125,7 +125,7 @@ func StartLibvirt(stopChan chan struct{}) {
go func() {
for {
exitChan := make(chan struct{})
cmd := exec.Command("/libvirtd.sh")
cmd := exec.Command("/usr/share/kubevirt/virt-launcher/libvirtd.sh")
err := cmd.Start()
if err != nil {
View
@@ -1363,3 +1363,65 @@ func GetNodeWithHugepages(virtClient kubecli.KubevirtClient, hugepages k8sv1.Res
}
return nil
}
// StartVmOnNode will start a VMI on the specified node
func StartVmOnNode(vmi *v1.VirtualMachineInstance, nodeName string) {
virtClient, err := kubecli.GetKubevirtClient()
PanicOnError(err)
vmi.Spec.Affinity = &v1.Affinity{
NodeAffinity: &k8sv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{
NodeSelectorTerms: []k8sv1.NodeSelectorTerm{
{
MatchExpressions: []k8sv1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, Values: []string{nodeName}},
},
},
},
},
},
}
_, err = virtClient.VirtualMachineInstance(NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
WaitForSuccessfulVMIStart(vmi)
}
// RunCommandOnVmiPod will run specified command on the virt-launcher pod
func RunCommandOnVmiPod(vmi *v1.VirtualMachineInstance, command []string) string {
virtClient, err := kubecli.GetKubevirtClient()
PanicOnError(err)
pods, err := virtClient.CoreV1().Pods(NamespaceTestDefault).List(UnfinishedVMIPodSelector(vmi))
Expect(err).ToNot(HaveOccurred())
Expect(pods.Items).NotTo(BeEmpty())
vmiPod := pods.Items[0]
output, err := ExecuteCommandOnPod(
virtClient,
&vmiPod,
"compute",
command,
)
Expect(err).ToNot(HaveOccurred())
return output
}
// GetNodeLibvirtCapabilities returns node libvirt capabilities
func GetNodeLibvirtCapabilities(nodeName string) string {
// Create a virt-launcher pod, that can fetch virsh capabilities
vmi := NewRandomVMIWithEphemeralDiskAndUserdata(RegistryDiskFor(RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n")
StartVmOnNode(vmi, nodeName)
return RunCommandOnVmiPod(vmi, []string{"virsh", "-r", "capabilities"})
}
// GetNodeCPUInfo returns output of lscpu on the pod that runs on the specified node
func GetNodeCPUInfo(nodeName string) string {
vmi := NewRandomVMIWithEphemeralDiskAndUserdata(RegistryDiskFor(RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n")
StartVmOnNode(vmi, nodeName)
return RunCommandOnVmiPod(vmi, []string{"lscpu"})
}
View
@@ -22,6 +22,7 @@ package tests_test
import (
"flag"
"fmt"
"regexp"
"strconv"
"strings"
"time"
@@ -240,6 +241,125 @@ var _ = Describe("Configurations", func() {
})
})
Context("with CPU spec", func() {
libvirtCPUModelRegexp := regexp.MustCompile(`<model>(\w+)\-*\w*</model>`)
libvirtCPUVendorRegexp := regexp.MustCompile(`<vendor>(\w+)</vendor>`)
cpuModelNameRegexp := regexp.MustCompile(`Model name:\s*([\s\w\-@\.\(\)]+)`)
var libvirtCpuModel string
var libvirtCpuVendor string
var cpuModelName string
var cpuVmi *v1.VirtualMachineInstance
BeforeEach(func() {
nodes, err := virtClient.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(nodes.Items).NotTo(BeEmpty())
virshCaps := tests.GetNodeLibvirtCapabilities(nodes.Items[0].Name)
model := libvirtCPUModelRegexp.FindStringSubmatch(virshCaps)
Expect(len(model)).To(Equal(2))
libvirtCpuModel = model[1]
vendor := libvirtCPUVendorRegexp.FindStringSubmatch(virshCaps)
Expect(len(vendor)).To(Equal(2))
libvirtCpuVendor = vendor[1]
cpuInfo := tests.GetNodeCPUInfo(nodes.Items[0].Name)
modelName := cpuModelNameRegexp.FindStringSubmatch(cpuInfo)
Expect(len(modelName)).To(Equal(2))
cpuModelName = modelName[1]
cpuVmi = tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n")
cpuVmi.Spec.Affinity = &v1.Affinity{
NodeAffinity: &kubev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &kubev1.NodeSelector{
NodeSelectorTerms: []kubev1.NodeSelectorTerm{
{
MatchExpressions: []kubev1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: kubev1.NodeSelectorOpIn, Values: []string{nodes.Items[0].Name}},
},
},
},
},
},
}
})
Context("when CPU model defined", func() {
It("should report defined CPU model", func() {
vmiModel := "Conroe"
if libvirtCpuVendor == "AMD" {
vmiModel = "Opteron_G1"
}
cpuVmi.Spec.Domain.CPU = &v1.CPU{
Model: vmiModel,
}
By("Starting a VirtualMachineInstance")
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(cpuVmi)
Expect(err).ToNot(HaveOccurred())
tests.WaitForSuccessfulVMIStart(cpuVmi)
By("Expecting the VirtualMachineInstance console")
expecter, err := tests.LoggedInCirrosExpecter(cpuVmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Checking the CPU model under the guest OS")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("grep %s /proc/cpuinfo\n", vmiModel)},
&expect.BExp{R: "model name"},
}, 10*time.Second)
})
})
Context("when CPU model equals to passthrough", func() {
It("should report exactly the same model as node CPU", func() {
cpuVmi.Spec.Domain.CPU = &v1.CPU{
Model: "host-passthrough",
}
By("Starting a VirtualMachineInstance")
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(cpuVmi)
Expect(err).ToNot(HaveOccurred())
tests.WaitForSuccessfulVMIStart(cpuVmi)
By("Expecting the VirtualMachineInstance console")
expecter, err := tests.LoggedInCirrosExpecter(cpuVmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Checking the CPU model under the guest OS")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("grep %s /proc/cpuinfo\n", cpuModelName)},
&expect.BExp{R: "model name"},
}, 10*time.Second)
})
})
Context("when CPU model not defined", func() {
It("should report CPU model from libvirt capabilities", func() {
By("Starting a VirtualMachineInstance")
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(cpuVmi)
Expect(err).ToNot(HaveOccurred())
tests.WaitForSuccessfulVMIStart(cpuVmi)
By("Expecting the VirtualMachineInstance console")
expecter, err := tests.LoggedInCirrosExpecter(cpuVmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Checking the CPU model under the guest OS")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("grep %s /proc/cpuinfo\n", libvirtCpuModel)},
&expect.BExp{R: "model name"},
}, 10*time.Second)
})
})
})
Context("New VirtualMachineInstance with all supported drives", func() {
var vmi *v1.VirtualMachineInstance
View
@@ -78,7 +78,7 @@ var _ = Describe("VMIlifecycle", func() {
Eventually(logs,
11*time.Second,
500*time.Millisecond).
Should(ContainSubstring("Found PID for qemu"))
Should(ContainSubstring("Found PID for"))
})
It("should reject POST if schema is invalid", func() {