diff --git a/driverloader/driverloader_linux.go b/driverloader/driverloader_linux.go
index 0178e392..9ebc2082 100644
--- a/driverloader/driverloader_linux.go
+++ b/driverloader/driverloader_linux.go
@@ -11,6 +11,7 @@ import (
"github.com/hyperhq/runv/hypervisor/libvirt"
"github.com/hyperhq/runv/hypervisor/qemu"
"github.com/hyperhq/runv/hypervisor/xen"
+ "github.com/hyperhq/runv/hypervisor/xenpv"
"github.com/hyperhq/runv/lib/vsock"
)
@@ -38,6 +39,12 @@ func Probe(driver string) (hd hypervisor.HypervisorDriver, err error) {
glog.V(1).Infof("Driver %q loaded", driver)
return qd, nil
}
+ case "xenpv":
+ xpd := xenpv.InitDriver()
+ if xpd != nil {
+ glog.V(1).Infof("Driver xenpv loaded")
+ return xpd, nil
+ }
case "xen", "":
xd := xen.InitDriver()
if xd != nil {
diff --git a/hyperstart/api/json/constants.go b/hyperstart/api/json/constants.go
index 4577e985..ee41877a 100644
--- a/hyperstart/api/json/constants.go
+++ b/hyperstart/api/json/constants.go
@@ -38,3 +38,4 @@ const HYPER_VSOCK_CTL_PORT = 2718
const HYPER_VSOCK_MSG_PORT = 2719
const HYPER_USE_SERIAL = "hyper_use_serial"
+const HYPER_P9_USE_XEN = "hyper_p9_xen"
diff --git a/hypervisor/context.go b/hypervisor/context.go
index d8887438..e720b12f 100644
--- a/hypervisor/context.go
+++ b/hypervisor/context.go
@@ -58,6 +58,8 @@ type VmContext struct {
cancelWatchHyperstart chan struct{}
+ sockConnected chan bool
+
logPrefix string
lock sync.RWMutex //protect update of context
@@ -115,28 +117,28 @@ func InitContext(id string, hub chan VmEvent, client chan *types.VmResponse, dc
}
ctx = &VmContext{
- Id: id,
- Boot: boot,
- PauseState: PauseStateUnpaused,
- pciAddr: PciAddrFrom,
- scsiId: 0,
- GuestCid: cid,
- Hub: hub,
- client: client,
- DCtx: dc,
- HomeDir: homeDir,
- HyperSockName: hyperSockName,
- TtySockName: ttySockName,
- ConsoleSockName: consoleSockName,
- ShareDir: shareDir,
- timer: nil,
- handler: stateRunning,
- current: StateRunning,
- volumes: make(map[string]*DiskContext),
- containers: make(map[string]*ContainerContext),
- networks: NewNetworkContext(),
- logPrefix: fmt.Sprintf("SB[%s] ", id),
-
+ Id: id,
+ Boot: boot,
+ PauseState: PauseStateUnpaused,
+ pciAddr: PciAddrFrom,
+ scsiId: 0,
+ GuestCid: cid,
+ Hub: hub,
+ client: client,
+ DCtx: dc,
+ HomeDir: homeDir,
+ HyperSockName: hyperSockName,
+ TtySockName: ttySockName,
+ ConsoleSockName: consoleSockName,
+ ShareDir: shareDir,
+ timer: nil,
+ handler: stateRunning,
+ current: StateRunning,
+ volumes: make(map[string]*DiskContext),
+ containers: make(map[string]*ContainerContext),
+ networks: NewNetworkContext(),
+ logPrefix: fmt.Sprintf("SB[%s] ", id),
+ sockConnected: make(chan bool),
cancelWatchHyperstart: make(chan struct{}),
}
ctx.networks.sandbox = ctx
diff --git a/hypervisor/driver.go b/hypervisor/driver.go
index d549b300..65250df8 100644
--- a/hypervisor/driver.go
+++ b/hypervisor/driver.go
@@ -92,6 +92,12 @@ type DriverContext interface {
Close()
}
+type ConsoleDriverContext interface {
+ DriverContext
+
+ ConnectConsole(console chan<- string) error
+}
+
type LazyDriverContext interface {
DriverContext
diff --git a/hypervisor/hypervisor.go b/hypervisor/hypervisor.go
index afc5ec1f..6ea47be4 100644
--- a/hypervisor/hypervisor.go
+++ b/hypervisor/hypervisor.go
@@ -64,6 +64,10 @@ loop:
timeout.Stop()
}
+func (ctx *VmContext) WaitSockConnected() {
+ <-ctx.sockConnected
+}
+
func (ctx *VmContext) Launch() {
var err error
@@ -84,7 +88,7 @@ func (ctx *VmContext) Launch() {
if ctx.LogLevel(DEBUG) {
go watchVmConsole(ctx)
}
-
+ close(ctx.sockConnected)
go ctx.loop()
}
diff --git a/hypervisor/kvmtool/lkvm.go b/hypervisor/kvmtool/lkvm.go
index 2a4fc969..b2c86f93 100644
--- a/hypervisor/kvmtool/lkvm.go
+++ b/hypervisor/kvmtool/lkvm.go
@@ -37,6 +37,7 @@ type KvmtoolDriver struct {
//implement the hypervisor.DriverContext interface
type KvmtoolContext struct {
driver *KvmtoolDriver
+ conPty string
}
func InitDriver() *KvmtoolDriver {
@@ -221,11 +222,6 @@ func (kc *KvmtoolContext) Launch(ctx *hypervisor.VmContext) {
conPty, ctlPty, ttyPty := lookupPtys(output[:len])
ctx.Log(hypervisor.INFO, "find %v %v %v", conPty, ctlPty, ttyPty)
if conPty != "" && ctlPty != "" && ttyPty != "" {
- conSock, err = net.Listen("unix", ctx.ConsoleSockName)
- if err != nil {
- return
- }
-
ctlSock, err = net.Listen("unix", ctx.HyperSockName)
if err != nil {
return
@@ -235,9 +231,9 @@ func (kc *KvmtoolContext) Launch(ctx *hypervisor.VmContext) {
return
}
- go sock2pty(conSock, conPty, false)
- go sock2pty(ctlSock, ctlPty, true)
- go sock2pty(ttySock, ttyPty, true)
+ kc.conPty = conPty
+ go sock2pty(ctlSock, ctlPty)
+ go sock2pty(ttySock, ttyPty)
go func() {
// without this, guest burst output will crash kvmtool, why?
@@ -264,7 +260,7 @@ func (kc *KvmtoolContext) Launch(ctx *hypervisor.VmContext) {
err = fmt.Errorf("cannot find pts devices used by lkvm")
}
-func sock2pty(ls net.Listener, ptypath string, input bool) {
+func sock2pty(ls net.Listener, ptypath string) {
defer ls.Close()
conn, err := ls.Accept()
@@ -299,13 +295,11 @@ func sock2pty(ls net.Listener, ptypath string, input bool) {
var newbuf []byte = nil
nr, er := src.Read(buf)
if nr > 0 {
+ newbuf = buf
if input {
newbuf = bytes.Replace(buf[:nr], []byte{1}, []byte{1, 1}, -1)
nr = len(newbuf)
- } else {
- newbuf = buf
}
-
nw, ew := dst.Write(newbuf[:nr])
if ew != nil {
glog.Infof("write failed: %v", ew)
@@ -334,10 +328,8 @@ func sock2pty(ls net.Listener, ptypath string, input bool) {
wg.Add(1)
go copy(conn, pty, false)
- if input {
- wg.Add(1)
- go copy(pty, conn, true)
- }
+ wg.Add(1)
+ go copy(pty, conn, true)
wg.Wait()
}
@@ -454,3 +446,33 @@ func (kc *KvmtoolContext) AddMem(ctx *hypervisor.VmContext, slot, size int) erro
func (kc *KvmtoolContext) Save(ctx *hypervisor.VmContext, path string) error {
return fmt.Errorf("Save is unsupported on kvmtool driver")
}
+
+func (kc *KvmtoolContext) ConnectConsole(console chan<- string) error {
+ pty, err := os.OpenFile(kc.conPty, os.O_RDWR|syscall.O_NOCTTY, 0600)
+ if err != nil {
+ glog.Errorf("fail to open %v, %v", kc.conPty, err)
+ return err
+ }
+
+ _, err = term.SetRawTerminal(pty.Fd())
+ if err != nil {
+ glog.Errorf("fail to setrowmode for %v: %v", kc.conPty, err)
+ return err
+ }
+
+ go func() {
+ data := make([]byte, 128)
+ for {
+ nr, err := pty.Read(data)
+ if err != nil {
+ glog.Errorf("fail to read console: %v", err)
+ break
+ }
+ console <- string(data[:nr])
+ }
+ pty.Close()
+ }()
+
+ return nil
+
+}
diff --git a/hypervisor/vm_console.go b/hypervisor/vm_console.go
index 1470c903..88fae3fd 100644
--- a/hypervisor/vm_console.go
+++ b/hypervisor/vm_console.go
@@ -59,24 +59,28 @@ func watchVmConsole(ctx *VmContext) {
return
}
- conn, err := utils.UnixSocketConnect(ctx.ConsoleSockName)
- if err != nil {
- ctx.Log(ERROR, "failed to connected to %s: %v", ctx.ConsoleSockName, err)
- return
- }
+ cout := make(chan string, 128)
+ if dctx, ok := ctx.DCtx.(ConsoleDriverContext); ok {
+ dctx.ConnectConsole(cout)
+ } else {
- ctx.Log(TRACE, "connected to %s", ctx.ConsoleSockName)
+ conn, err := utils.UnixSocketConnect(ctx.ConsoleSockName)
+ if err != nil {
+ ctx.Log(ERROR, "failed to connected to %s: %v", ctx.ConsoleSockName, err)
+ return
+ }
- tc, err := telnet.NewConn(conn)
- if err != nil {
- ctx.Log(ERROR, "fail to init telnet connection to %s: %v", ctx.ConsoleSockName, err)
- return
- }
- ctx.Log(TRACE, "connected %s as telnet mode.", ctx.ConsoleSockName)
+ ctx.Log(TRACE, "connected to %s", ctx.ConsoleSockName)
- cout := make(chan string, 128)
- go TtyLiner(tc, cout)
+ tc, err := telnet.NewConn(conn)
+ if err != nil {
+ ctx.Log(ERROR, "fail to init telnet connection to %s: %v", ctx.ConsoleSockName, err)
+ return
+ }
+ ctx.Log(TRACE, "connected %s as telnet mode.", ctx.ConsoleSockName)
+ go TtyLiner(tc, cout)
+ }
const ignoreLines = 128
for consoleLines := 0; consoleLines < ignoreLines; consoleLines++ {
line, ok := <-cout
diff --git a/hypervisor/xenpv/xenpv.go b/hypervisor/xenpv/xenpv.go
new file mode 100644
index 00000000..768fd7e3
--- /dev/null
+++ b/hypervisor/xenpv/xenpv.go
@@ -0,0 +1,304 @@
+// +build linux,with_xen
+
+package xenpv
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os/exec"
+
+ "github.com/golang/glog"
+ api "github.com/hyperhq/runv/hyperstart/api/json"
+ "github.com/hyperhq/runv/hypervisor"
+ "github.com/hyperhq/runv/hypervisor/types"
+ xl "github.com/hyperhq/runv/lib/runvxenlight"
+ "github.com/hyperhq/runv/lib/utils"
+)
+
+const (
+ XENLIGHT_EXEC = "xl"
+)
+
+//implement the hypervisor.HypervisorDriver interface
+type XenPvDriver struct {
+ executable string
+ ctx *xl.Context
+}
+
+//implement the hypervisor.DriverContext interface
+type XenPvContext struct {
+ driver *XenPvDriver
+ domId xl.Domid
+}
+
+func InitDriver() *XenPvDriver {
+ cmd, err := exec.LookPath(XENLIGHT_EXEC)
+ if err != nil {
+ return nil
+ }
+
+ ctx := &xl.Context{}
+ ctx.Open()
+
+ ctx.SigChildHandle()
+
+ return &XenPvDriver{
+ executable: cmd,
+ ctx: ctx,
+ }
+}
+
+func (xd *XenPvDriver) Name() string {
+ return "xenpv"
+}
+
+func (xd *XenPvDriver) InitContext(homeDir string) hypervisor.DriverContext {
+ return &XenPvContext{
+ driver: xd,
+ }
+}
+
+func (xd *XenPvDriver) LoadContext(persisted map[string]interface{}) (hypervisor.DriverContext, error) {
+ if t, ok := persisted["hypervisor"]; !ok || t != xd.Name() {
+ return nil, fmt.Errorf("wrong driver type %v in persist info, expect %v", t, xd.Name())
+ }
+
+ name, ok := persisted["name"]
+ if !ok {
+ return nil, fmt.Errorf("there is no xenpv domain name")
+ }
+
+ id, err := xd.ctx.DomainQualifierToId(name.(string))
+ if err != nil {
+ return nil, fmt.Errorf("cannot find domain whose name is %v", name)
+ }
+
+ return &XenPvContext{
+ driver: xd,
+ domId: id,
+ }, nil
+}
+
+func (xd *XenPvDriver) SupportLazyMode() bool {
+ return false
+}
+
+func (xd *XenPvDriver) SupportVmSocket() bool {
+ return false
+}
+
+func (xc *XenPvContext) Launch(ctx *hypervisor.VmContext) {
+ if xc.driver.executable == "" {
+ glog.Errorf("can not find xl executable")
+ ctx.Hub <- &hypervisor.VmStartFailEvent{Message: "can not find xl executable"}
+ return
+ }
+ uuid, err := xl.GenerateUuid()
+ if err != nil {
+ glog.Errorf("generate uuid failed: %v\n", err)
+ ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()}
+ return
+ }
+
+ boot := ctx.Boot
+ config := &xl.DomainConfig{
+ Cinfo: xl.CreateInfo{
+ Type: "pv",
+ Name: ctx.Id,
+ Uuid: uuid,
+ },
+ Binfo: xl.BuildInfo{
+ Kernel: boot.Kernel,
+ Initrd: boot.Initrd,
+ Cmdline: "console=hvc0 " + api.HYPER_P9_USE_XEN,
+ ClaimMode: "True",
+ MaxVcpus: boot.CPU,
+ MaxMemory: uint64(boot.Memory * 1024),
+ TargetMemory: uint64(boot.Memory * 1024),
+ DeviceModeVersion: "qemu_xen",
+ PvInfo: xl.PvInfo{},
+ },
+ Channels: []xl.ChannelInfo{
+ {
+ DevId: 0,
+ Name: "sh.hyper.channel.0",
+ Socket: xl.SocketInfo{Path: ctx.HyperSockName},
+ },
+ {
+ DevId: 1,
+ Name: "sh.hyper.channel.1",
+ Socket: xl.SocketInfo{Path: ctx.TtySockName},
+ },
+ },
+ P9: []xl.P9Info{
+ {
+ ShareTag: hypervisor.ShareDirTag,
+ ShareDir: ctx.ShareDir,
+ SecurityModel: "none",
+ },
+ },
+ }
+
+ b, err := json.Marshal(config)
+ if err != nil {
+ glog.Errorf("fail to marshal xen domain config: %v, %v", b, err)
+ ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()}
+ return
+ }
+
+ xctx := xc.driver.ctx
+ domid, err := xctx.CreateNewDomainFromJson(string(b))
+ if err != nil {
+ glog.Errorf("fail to create xen pv domain: %v", err)
+ xctx.DestroyDomainByName(ctx.Id)
+ ctx.Hub <- &hypervisor.VmStartFailEvent{Message: err.Error()}
+ return
+ }
+
+ glog.Infof("create success, domid: %v\n", domid)
+
+ xc.domId = xl.Domid(domid)
+
+ go func() {
+ //unpause dom until runv connected to the socket, otherwise may loss the ready message
+ ctx.WaitSockConnected()
+ xctx.DomainUnpause(xc.domId)
+ }()
+}
+
+func (xc *XenPvContext) Associate(ctx *hypervisor.VmContext) {
+
+}
+
+func (xc *XenPvContext) Dump() (map[string]interface{}, error) {
+ return nil, nil
+}
+
+func (xc *XenPvContext) Shutdown(ctx *hypervisor.VmContext) {
+ go xc.driver.ctx.DestroyDomain(xc.domId)
+}
+
+func (xc *XenPvContext) Kill(ctx *hypervisor.VmContext) {
+ go func() {
+ xc.Shutdown(ctx)
+ ctx.Hub <- &hypervisor.VmKilledEvent{Success: true}
+ }()
+}
+
+func (xc *XenPvContext) Stats(ctx *hypervisor.VmContext) (*types.PodStats, error) {
+ return nil, nil
+}
+
+func (xc *XenPvContext) Close() {}
+
+func (xc *XenPvContext) Pause(ctx *hypervisor.VmContext, pause bool) error {
+ if pause {
+ return xc.driver.ctx.DomainPause(xc.domId)
+ }
+ return xc.driver.ctx.DomainUnpause(xc.domId)
+}
+
+func xvdId2Name(id int) string {
+ return "xvd" + utils.DiskId2Name(id)
+}
+
+func (xc *XenPvContext) AddDisk(ctx *hypervisor.VmContext, sourceType string, blockInfo *hypervisor.DiskDescriptor, result chan<- hypervisor.VmEvent) {
+ if blockInfo.Format == "rbd" {
+ result <- &hypervisor.DeviceFailed{
+ Session: nil,
+ }
+ glog.Infof("xenpv driver doesn't support rbd device")
+ return
+ }
+ devName := xvdId2Name(blockInfo.ScsiId)
+ if err := xc.driver.ctx.DomainAddDisk(xc.domId, blockInfo.Filename, devName, !blockInfo.ReadOnly); err != nil {
+ result <- &hypervisor.DeviceFailed{
+ Session: nil,
+ }
+ return
+ }
+ result <- &hypervisor.BlockdevInsertedEvent{
+ DeviceName: devName,
+ }
+}
+
+func (xc *XenPvContext) RemoveDisk(ctx *hypervisor.VmContext, blockInfo *hypervisor.DiskDescriptor, callback hypervisor.VmEvent, result chan<- hypervisor.VmEvent) {
+ if err := xc.driver.ctx.DomainRemoveDisk(xc.domId, xvdId2Name(blockInfo.ScsiId)); err != nil {
+ result <- &hypervisor.DeviceFailed{
+ Session: callback,
+ }
+ return
+ }
+
+ result <- callback
+}
+
+func (xc *XenPvContext) AddNic(ctx *hypervisor.VmContext, host *hypervisor.HostNicInfo, guest *hypervisor.GuestNicInfo, result chan<- hypervisor.VmEvent) {
+ callback := &hypervisor.NetDevInsertedEvent{
+ Id: host.Id,
+ Index: guest.Index,
+ DeviceName: guest.Device,
+ Address: guest.Busaddr,
+ }
+
+ if err := xc.driver.ctx.DomainAddNic(xc.domId, guest.Index, host.Bridge, host.Mac); err != nil {
+ result <- &hypervisor.DeviceFailed{
+ Session: callback,
+ }
+ return
+ }
+ result <- callback
+}
+
+func (xc *XenPvContext) RemoveNic(ctx *hypervisor.VmContext, n *hypervisor.InterfaceCreated, callback hypervisor.VmEvent, result chan<- hypervisor.VmEvent) {
+ if err := xc.driver.ctx.DomainRemoveNic(xc.domId, n.Index); err != nil {
+ result <- &hypervisor.DeviceFailed{
+ Session: callback,
+ }
+ return
+ }
+
+ result <- callback
+}
+
+func (xc *XenPvContext) SetCpus(ctx *hypervisor.VmContext, cpus int) error {
+ return fmt.Errorf("SetCpus is unsupported on xenpv driver")
+}
+
+func (xc *XenPvContext) AddMem(ctx *hypervisor.VmContext, slot, size int) error {
+ return fmt.Errorf("AddMem is unsupported on xenpv driver")
+}
+
+func (xc *XenPvContext) Save(ctx *hypervisor.VmContext, path string) error {
+ return fmt.Errorf("Save is unsupported on xenpv driver")
+}
+
+func (xc *XenPvContext) ConnectConsole(console chan<- string) error {
+ reader, writer := io.Pipe()
+ args := []string{"console", "-t", "pv", fmt.Sprintf("%d", xc.domId)}
+ cmd := exec.Command(xc.driver.executable, args...)
+ cmd.Stdout = writer
+ cmd.Stderr = writer
+ err := cmd.Start()
+ if err != nil {
+ return fmt.Errorf("fail to connect to console of dom %d: %v\n", xc.domId, err)
+ }
+
+ go func() {
+ data := make([]byte, 128)
+ for {
+ nr, err := reader.Read(data)
+ if err != nil {
+ glog.Errorf("fail to read console: %v", err)
+ break
+ }
+ console <- string(data[:nr])
+ }
+ reader.Close()
+ writer.Close()
+ cmd.Wait()
+ }()
+
+ return nil
+}
diff --git a/hypervisor/xenpv/xenpv_unsupported.go b/hypervisor/xenpv/xenpv_unsupported.go
new file mode 100644
index 00000000..e4fd67a2
--- /dev/null
+++ b/hypervisor/xenpv/xenpv_unsupported.go
@@ -0,0 +1,9 @@
+// +build !with_xen
+
+package xenpv
+
+import "github.com/hyperhq/runv/hypervisor"
+
+func InitDriver() hypervisor.HypervisorDriver {
+ return nil
+}
diff --git a/lib/runvxenlight/xenlight-runv.go b/lib/runvxenlight/xenlight-runv.go
new file mode 100644
index 00000000..4f9e0fa8
--- /dev/null
+++ b/lib/runvxenlight/xenlight-runv.go
@@ -0,0 +1,413 @@
+// +build linux,with_xen
+
+/*
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see .
+ */
+package xenlight
+
+/*
+#cgo LDFLAGS: -lxenlight -lyajl -lxentoollog
+#include
+#include
+#include
+
+typedef struct runvxl_domain_config {
+ int dom_type;
+ int domid;
+ char* name;
+ char* uuid;
+ char* kernel;
+ char* initrd;
+ char* cmdline;
+
+ int max_vcpus;
+ uint64_t max_memkb;
+
+ char *p9_tag;
+ char *p9_path;
+
+ char *hyper_path;
+ char *hyper_name;
+ char *tty_path;
+ char *tty_name;
+
+ void *extra;
+} runvxl_domain_config;
+
+static int runvxl_domain_create_new(libxl_ctx *ctx, runvxl_domain_config *config) {
+ int i;
+ uint32_t domid = 0;
+ libxl_domain_config d_config;
+
+ libxl_domain_config_init(&d_config);
+
+ d_config.num_p9s = 1;
+ d_config.p9 = malloc(sizeof(libxl_device_p9));
+ if (d_config.p9 == NULL) {
+ return -1;
+ }
+ d_config.p9->tag = config->p9_tag;
+ d_config.p9->path = config->p9_path;
+ d_config.p9->security_model = "none";
+
+ d_config.num_channels = 2;
+ d_config.channels = malloc(sizeof(libxl_device_channel) * 2);
+
+ d_config.channels[0].name = config->hyper_name;
+ d_config.channels[0].connection = LIBXL_CHANNEL_CONNECTION_SOCKET;
+ d_config.channels[0].u.socket.path = config->hyper_path;
+
+ d_config.channels[1].name = config->tty_name;
+ d_config.channels[1].connection = LIBXL_CHANNEL_CONNECTION_SOCKET;
+ d_config.channels[1].u.socket.path = config->tty_path;
+
+ d_config.c_info.type = config->dom_type;
+
+ d_config.b_info.type = config->dom_type;
+ d_config.b_info.max_vcpus = config->max_vcpus;
+ d_config.b_info.max_memkb = config->max_memkb;
+ d_config.b_info.kernel = config->kernel;
+ d_config.b_info.ramdisk = config->initrd;
+ d_config.b_info.cmdline = config->cmdline;
+
+ libxl_cpu_bitmap_alloc(ctx, &d_config.b_info.avail_vcpus, config->max_vcpus);
+ libxl_bitmap_set_none(&d_config.b_info.avail_vcpus);
+ for (i = 0; i < config->max_vcpus; i++)
+ libxl_bitmap_set((&d_config.b_info.avail_vcpus), i);
+
+ if (libxl_domain_create_new(ctx, &d_config, &domid, 0, 0))
+ return -1;
+
+ return domid;
+}
+
+static int runvxl_domain_create_new_from_json(libxl_ctx *ctx, char *config) {
+ int ret;
+ uint32_t domid = 0;
+ libxl_domain_config d_config;
+
+ libxl_domain_config_from_json(ctx, &d_config, config);
+
+ if (libxl_domain_create_new(ctx, &d_config, &domid, 0, 0))
+ return -1;
+
+ return domid;
+}
+
+static void runvxl_sigchld_handler(libxl_ctx* ctx) {
+ int status, res;
+ pid_t pid = waitpid(-1, &status, WNOHANG);
+ printf("got child pid: %d\n", pid);
+ if (pid > 0) {
+ res = libxl_childproc_reaped(ctx, pid, status);
+ printf("check whether child proc is created by libxl: %d\n", res);
+ }
+}
+
+static const libxl_childproc_hooks childproc_hooks = {
+ .chldowner = libxl_sigchld_owner_mainloop,
+};
+
+void runvxl_childproc_setmode(libxl_ctx *ctx) {
+ libxl_childproc_setmode(ctx, &childproc_hooks, 0);
+}
+
+int runvxl_add_nic(libxl_ctx *ctx, int32_t domid, int id, char *bridge, char *mac) {
+ libxl_device_nic nic;
+ int ret;
+
+ libxl_device_nic_init(&nic);
+ nic.bridge = strdup(bridge);
+ nic.nictype = LIBXL_NIC_TYPE_VIF;
+ nic.devid = id;
+ // TODO: why self defined mac cause nic fail to up?
+ //memcpy(nic.mac, mac, 6);
+
+ ret = libxl_device_nic_add(ctx, domid, &nic, 0);
+ libxl_device_nic_dispose(&nic);
+ return ret;
+}
+
+int runvxl_remove_nic(libxl_ctx *ctx, int32_t domid, int id) {
+ libxl_device_nic nic;
+ int ret;
+
+ libxl_device_nic_init(&nic);
+ ret = libxl_devid_to_device_nic(ctx, domid, id, &nic);
+ if (ret)
+ goto cleanup;
+ ret = libxl_device_nic_remove(ctx, domid, &nic, 0);
+cleanup:
+ libxl_device_nic_dispose(&nic);
+ return ret;
+}
+
+int runvxl_add_disk(libxl_ctx *ctx, int32_t domid, char *filename, char *vdev, bool readwrite) {
+ libxl_device_disk disk;
+ int ret;
+
+ libxl_device_disk_init(&disk);
+ disk.format = LIBXL_DISK_FORMAT_RAW;
+ disk.pdev_path = strdup(filename);
+ disk.vdev = strdup(vdev);
+ disk.readwrite = readwrite;
+
+ ret = libxl_device_disk_add(ctx, domid, &disk, 0);
+ libxl_device_disk_dispose(&disk);
+ return ret;
+}
+
+int runvxl_remove_disk(libxl_ctx *ctx, int32_t domid, char *vdev) {
+ libxl_device_disk disk;
+ int ret;
+
+ libxl_device_disk_init(&disk);
+ ret = libxl_vdev_to_device_disk(ctx, domid, vdev, &disk);
+ if (ret)
+ goto cleanup;
+ ret = libxl_device_disk_remove(ctx, domid, &disk, 0);
+cleanup:
+ libxl_device_disk_dispose(&disk);
+ return ret;
+}
+
+
+int runvxl_domain_destroy_byname(libxl_ctx *ctx, char *name) {
+ int ret;
+ uint32_t domid;
+
+ ret = libxl_domain_qualifier_to_domid(ctx, name, &domid);
+ if (ret){
+ return ret;
+ }
+
+ return libxl_domain_destroy(ctx, domid, 0);
+}
+*/
+import "C"
+
+/*
+ * Other flags that may be needed at some point:
+ * -lnl-route-3 -lnl-3
+ *
+ * To get back to static linking:
+ * #cgo LDFLAGS: -lxenlight -lyajl_s -lxengnttab -lxenstore -lxenguest -lxentoollog -lxenevtchn -lxenctrl -lblktapctl -lxenforeignmemory -lxencall -lz -luuid -lutil
+ */
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "syscall"
+ "unsafe"
+)
+
+type CreateInfo struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Uuid string `json:"uuid"`
+}
+
+type PvInfo struct {
+ SlackMemory int `json:"slack_memkb,omitempty"`
+ Bootloader string `json:"bootloader,omitempty"`
+ BootloaderArgs string `json:"bootloader_args,omitempty"`
+}
+
+type BuildInfo struct {
+ MaxVcpus int `json:"max_vcpus"`
+ MaxMemory uint64 `json:"max_memkb"`
+ TargetMemory uint64 `json:"target_memkb"`
+ ClaimMode string `json:"claim_mode"`
+ DeviceModeVersion string `json:"device_model_version"`
+ Kernel string `json:"kernel"`
+ Initrd string `json:"ramdisk"`
+ Cmdline string `json:"cmdline"`
+ PvInfo PvInfo `json:"type.pv"`
+}
+
+type P9Info struct {
+ ShareTag string `json:"tag"`
+ ShareDir string `json:"path"`
+ SecurityModel string `json:"security_model"`
+}
+
+type SocketInfo struct {
+ Path string `json:"path"`
+}
+
+type ChannelInfo struct {
+ DevId int `json:"devid"`
+ Name string `json:"name"`
+ Socket SocketInfo `json:"connection.socket"`
+}
+
+type DomainConfig struct {
+ Cinfo CreateInfo `json:"c_info"`
+ Binfo BuildInfo `json:"b_info"`
+ //Disks []DiskInfo `json:"disks"`
+ Channels []ChannelInfo `json:"channels"`
+ P9 []P9Info `json:"p9"`
+
+ domType int
+}
+
+func (dc *DomainConfig) toC() (rdc C.struct_runvxl_domain_config) {
+ rdc.dom_type = C.int(dc.domType)
+
+ cinfo := dc.Cinfo
+ rdc.uuid = C.CString(cinfo.Uuid)
+ rdc.name = C.CString(cinfo.Name)
+
+ binfo := dc.Binfo
+ rdc.kernel = C.CString(binfo.Kernel)
+ rdc.initrd = C.CString(binfo.Initrd)
+ rdc.cmdline = C.CString(binfo.Cmdline)
+
+ rdc.max_memkb = C.uint64_t(binfo.MaxMemory)
+ rdc.max_vcpus = C.int(binfo.MaxVcpus)
+
+ p9 := dc.P9[0]
+ rdc.p9_tag = C.CString(p9.ShareTag)
+ rdc.p9_path = C.CString(p9.ShareDir)
+
+ rdc.hyper_path = C.CString(dc.Channels[1].Socket.Path)
+ rdc.hyper_name = C.CString(dc.Channels[1].Name)
+
+ rdc.tty_path = C.CString(dc.Channels[2].Socket.Path)
+ rdc.tty_name = C.CString(dc.Channels[2].Name)
+
+ return
+}
+
+func (Ctx *Context) CreateNewDomain(config *DomainConfig) (int, error) {
+ if err := Ctx.CheckOpen(); err != nil {
+ return -1, err
+ }
+ rdc := config.toC()
+ ret := C.runvxl_domain_create_new(Ctx.ctx, &rdc)
+ if ret < 0 {
+ return -1, fmt.Errorf("fail to create xen domain: %d", ret)
+ }
+
+ return int(ret), nil
+}
+
+func (Ctx *Context) DestroyDomain(domId Domid) error {
+ ret := C.libxl_domain_destroy(Ctx.ctx, C.uint32_t(domId), (*C.libxl_asyncop_how)(nil))
+ if ret != 0 {
+ return fmt.Errorf("fail to destroy dom %v: %v\n", domId, ret)
+ }
+ return nil
+}
+
+func (Ctx *Context) DestroyDomainByName(name string) error {
+ ret := C.runvxl_domain_destroy_byname(Ctx.ctx, C.CString(name))
+ if ret != 0 {
+ return fmt.Errorf("fail to destroy dom %v: %v\n", name, ret)
+ }
+ return nil
+}
+
+func (Ctx *Context) CreateNewDomainFromJson(config string) (int, error) {
+ if err := Ctx.CheckOpen(); err != nil {
+ return -1, err
+ }
+ ret := C.runvxl_domain_create_new_from_json(Ctx.ctx, C.CString(config))
+ if ret < 0 {
+ return -1, fmt.Errorf("fail to create xen domain: %d", ret)
+ }
+
+ return int(ret), nil
+}
+
+func (Ctx *Context) SigChildHandle() {
+ sigchan := make(chan os.Signal, 1)
+ go func() {
+ for {
+ _, ok := <-sigchan
+ if !ok {
+ break
+ }
+ C.runvxl_sigchld_handler(Ctx.ctx)
+ }
+ }()
+ signal.Notify(sigchan, syscall.SIGCHLD)
+
+ C.runvxl_childproc_setmode(Ctx.ctx)
+}
+
+func (Ctx *Context) DomainAddNic(domId Domid, id int, bridge, mac string) error {
+ if ret := C.runvxl_add_nic(Ctx.ctx, C.int32_t(domId), C.int(id), C.CString(bridge), C.CString(mac)); ret != 0 {
+ return fmt.Errorf("fail to add nic %v for dom %v", id, domId)
+ }
+ return nil
+}
+
+func (Ctx *Context) DomainRemoveNic(domId Domid, id int) error {
+ if ret := C.runvxl_remove_nic(Ctx.ctx, C.int32_t(domId), C.int(id)); ret != 0 {
+ return fmt.Errorf("fail to remove nic %v for dom %v", id, domId)
+ }
+ return nil
+}
+
+func (Ctx *Context) DomainAddDisk(domId Domid, filename, vdev string, readwrite bool) error {
+ if ret := C.runvxl_add_disk(Ctx.ctx, C.int32_t(domId), C.CString(filename), C.CString(vdev), C.bool(readwrite)); ret != 0 {
+ return fmt.Errorf("fail to add vdev %v %v from dom %v\n", vdev, filename, domId)
+ }
+ return nil
+}
+
+func (Ctx *Context) DomainRemoveDisk(domId Domid, vdev string) error {
+ if ret := C.runvxl_remove_disk(Ctx.ctx, C.int32_t(domId), C.CString(vdev)); ret != 0 {
+ return fmt.Errorf("fail to remove vdev %v from dom %v\n", vdev, domId)
+ }
+ return nil
+}
+
+func (Ctx *Context) DomainQualifierToId(name string) (Domid, error) {
+ var id Domid
+ if ret := C.libxl_domain_qualifier_to_domid(Ctx.ctx, C.CString(name), (*C.uint32_t)(unsafe.Pointer(&id))); ret != 0 {
+ return 0, fmt.Errorf("fail to get id for domain %v: v", name, ret)
+ }
+ return id, nil
+}
+
+func GenerateUuid() (string, error) {
+ var buf [16]byte
+ var uuid [36]byte
+
+ _, err := io.ReadFull(rand.Reader, buf[:])
+ if err != nil {
+ return "", err
+ }
+ buf[6] = (buf[6] & 0x0f) | 0x40 // Version 4
+ buf[8] = (buf[8] & 0x3f) | 0x80 // Variant is 10
+
+ hex.Encode(uuid[:], buf[:4])
+ uuid[8] = '-'
+ hex.Encode(uuid[9:13], buf[4:6])
+ uuid[13] = '-'
+ hex.Encode(uuid[14:18], buf[6:8])
+ uuid[18] = '-'
+ hex.Encode(uuid[19:23], buf[8:10])
+ uuid[23] = '-'
+ hex.Encode(uuid[24:], buf[10:])
+
+ return string(uuid[:]), nil
+}
diff --git a/lib/runvxenlight/xenlight.go b/lib/runvxenlight/xenlight.go
new file mode 100644
index 00000000..9c37e59e
--- /dev/null
+++ b/lib/runvxenlight/xenlight.go
@@ -0,0 +1,1197 @@
+/*
+ * Copyright (C) 2016 George W. Dunlap, Citrix Systems UK Ltd
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see .
+ */
+package xenlight
+
+/*
+#cgo LDFLAGS: -lxenlight -lyajl -lxentoollog
+#include
+#include
+*/
+import "C"
+
+/*
+ * Other flags that may be needed at some point:
+ * -lnl-route-3 -lnl-3
+ *
+ * To get back to static linking:
+ * #cgo LDFLAGS: -lxenlight -lyajl_s -lxengnttab -lxenstore -lxenguest -lxentoollog -lxenevtchn -lxenctrl -lblktapctl -lxenforeignmemory -lxencall -lz -luuid -lutil
+ */
+
+import (
+ "fmt"
+ "time"
+ "unsafe"
+)
+
+/*
+ * Errors
+ */
+
+type Error int
+
+const (
+ ErrorNonspecific = Error(-C.ERROR_NONSPECIFIC)
+ ErrorVersion = Error(-C.ERROR_VERSION)
+ ErrorFail = Error(-C.ERROR_FAIL)
+ ErrorNi = Error(-C.ERROR_NI)
+ ErrorNomem = Error(-C.ERROR_NOMEM)
+ ErrorInval = Error(-C.ERROR_INVAL)
+ ErrorBadfail = Error(-C.ERROR_BADFAIL)
+ ErrorGuestTimedout = Error(-C.ERROR_GUEST_TIMEDOUT)
+ ErrorTimedout = Error(-C.ERROR_TIMEDOUT)
+ ErrorNoparavirt = Error(-C.ERROR_NOPARAVIRT)
+ ErrorNotReady = Error(-C.ERROR_NOT_READY)
+ ErrorOseventRegFail = Error(-C.ERROR_OSEVENT_REG_FAIL)
+ ErrorBufferfull = Error(-C.ERROR_BUFFERFULL)
+ ErrorUnknownChild = Error(-C.ERROR_UNKNOWN_CHILD)
+ ErrorLockFail = Error(-C.ERROR_LOCK_FAIL)
+ ErrorJsonConfigEmpty = Error(-C.ERROR_JSON_CONFIG_EMPTY)
+ ErrorDeviceExists = Error(-C.ERROR_DEVICE_EXISTS)
+ ErrorCheckpointDevopsDoesNotMatch = Error(-C.ERROR_CHECKPOINT_DEVOPS_DOES_NOT_MATCH)
+ ErrorCheckpointDeviceNotSupported = Error(-C.ERROR_CHECKPOINT_DEVICE_NOT_SUPPORTED)
+ ErrorVnumaConfigInvalid = Error(-C.ERROR_VNUMA_CONFIG_INVALID)
+ ErrorDomainNotfound = Error(-C.ERROR_DOMAIN_NOTFOUND)
+ ErrorAborted = Error(-C.ERROR_ABORTED)
+ ErrorNotfound = Error(-C.ERROR_NOTFOUND)
+ ErrorDomainDestroyed = Error(-C.ERROR_DOMAIN_DESTROYED)
+ ErrorFeatureRemoved = Error(-C.ERROR_FEATURE_REMOVED)
+)
+
+var errors = [...]string{
+ ErrorNonspecific: "Non-specific error",
+ ErrorVersion: "Wrong version",
+ ErrorFail: "Failed",
+ ErrorNi: "Not Implemented",
+ ErrorNomem: "No memory",
+ ErrorInval: "Invalid argument",
+ ErrorBadfail: "Bad Fail",
+ ErrorGuestTimedout: "Guest timed out",
+ ErrorTimedout: "Timed out",
+ ErrorNoparavirt: "No Paravirtualization",
+ ErrorNotReady: "Not ready",
+ ErrorOseventRegFail: "OS event registration failed",
+ ErrorBufferfull: "Buffer full",
+ ErrorUnknownChild: "Unknown child",
+ ErrorLockFail: "Lock failed",
+ ErrorJsonConfigEmpty: "JSON config empty",
+ ErrorDeviceExists: "Device exists",
+ ErrorCheckpointDevopsDoesNotMatch: "Checkpoint devops does not match",
+ ErrorCheckpointDeviceNotSupported: "Checkpoint device not supported",
+ ErrorVnumaConfigInvalid: "VNUMA config invalid",
+ ErrorDomainNotfound: "Domain not found",
+ ErrorAborted: "Aborted",
+ ErrorNotfound: "Not found",
+ ErrorDomainDestroyed: "Domain destroyed",
+ ErrorFeatureRemoved: "Feature removed",
+}
+
+func (e Error) Error() string {
+ if 0 < int(e) && int(e) < len(errors) {
+ s := errors[e]
+ if s != "" {
+ return s
+ }
+ }
+ return fmt.Sprintf("libxl error: %d", -e)
+
+}
+
+/*
+ * Types: Builtins
+ */
+
+type Domid uint32
+
+type MemKB uint64
+
+type Uuid C.libxl_uuid
+
+type Context struct {
+ ctx *C.libxl_ctx
+ logger *C.xentoollog_logger_stdiostream
+}
+
+type Hwcap []C.uint32_t
+
+func (chwcap C.libxl_hwcap) toGo() (ghwcap Hwcap) {
+ // Alloc a Go slice for the bytes
+ size := 8
+ ghwcap = make([]C.uint32_t, size)
+
+ // Make a slice pointing to the C array
+ mapslice := (*[1 << 30]C.uint32_t)(unsafe.Pointer(&chwcap[0]))[:size:size]
+
+ // And copy the C array into the Go array
+ copy(ghwcap, mapslice)
+
+ return
+}
+
+// typedef struct {
+// uint32_t size; /* number of bytes in map */
+// uint8_t *map;
+// } libxl_bitmap;
+
+// Implement the Go bitmap type such that the underlying data can
+// easily be copied in and out. NB that we still have to do copies
+// both directions, because cgo runtime restrictions forbid passing to
+// a C function a pointer to a Go-allocated structure which contains a
+// pointer.
+type Bitmap struct {
+ bitmap []C.uint8_t
+}
+
+/*
+ * Types: IDL
+ *
+ * FIXME: Generate these automatically from the IDL
+ */
+
+type Physinfo struct {
+ ThreadsPerCore uint32
+ CoresPerSocket uint32
+ MaxCpuId uint32
+ NrCpus uint32
+ CpuKhz uint32
+ TotalPages uint64
+ FreePages uint64
+ ScrubPages uint64
+ OutstandingPages uint64
+ SharingFreedPages uint64
+ SharingUsedFrames uint64
+ NrNodes uint32
+ HwCap Hwcap
+ CapHvm bool
+ CapHvmDirectio bool
+}
+
+func (cphys *C.libxl_physinfo) toGo() (physinfo *Physinfo) {
+
+ physinfo = &Physinfo{}
+ physinfo.ThreadsPerCore = uint32(cphys.threads_per_core)
+ physinfo.CoresPerSocket = uint32(cphys.cores_per_socket)
+ physinfo.MaxCpuId = uint32(cphys.max_cpu_id)
+ physinfo.NrCpus = uint32(cphys.nr_cpus)
+ physinfo.CpuKhz = uint32(cphys.cpu_khz)
+ physinfo.TotalPages = uint64(cphys.total_pages)
+ physinfo.FreePages = uint64(cphys.free_pages)
+ physinfo.ScrubPages = uint64(cphys.scrub_pages)
+ physinfo.ScrubPages = uint64(cphys.scrub_pages)
+ physinfo.SharingFreedPages = uint64(cphys.sharing_freed_pages)
+ physinfo.SharingUsedFrames = uint64(cphys.sharing_used_frames)
+ physinfo.NrNodes = uint32(cphys.nr_nodes)
+ physinfo.HwCap = cphys.hw_cap.toGo()
+ physinfo.CapHvm = bool(cphys.cap_hvm)
+ physinfo.CapHvmDirectio = bool(cphys.cap_hvm_directio)
+
+ return
+}
+
+type VersionInfo struct {
+ XenVersionMajor int
+ XenVersionMinor int
+ XenVersionExtra string
+ Compiler string
+ CompileBy string
+ CompileDomain string
+ CompileDate string
+ Capabilities string
+ Changeset string
+ VirtStart uint64
+ Pagesize int
+ Commandline string
+ BuildId string
+}
+
+func (cinfo *C.libxl_version_info) toGo() (info *VersionInfo) {
+ info = &VersionInfo{}
+ info.XenVersionMajor = int(cinfo.xen_version_major)
+ info.XenVersionMinor = int(cinfo.xen_version_minor)
+ info.XenVersionExtra = C.GoString(cinfo.xen_version_extra)
+ info.Compiler = C.GoString(cinfo.compiler)
+ info.CompileBy = C.GoString(cinfo.compile_by)
+ info.CompileDomain = C.GoString(cinfo.compile_domain)
+ info.CompileDate = C.GoString(cinfo.compile_date)
+ info.Capabilities = C.GoString(cinfo.capabilities)
+ info.Changeset = C.GoString(cinfo.changeset)
+ info.VirtStart = uint64(cinfo.virt_start)
+ info.Pagesize = int(cinfo.pagesize)
+ info.Commandline = C.GoString(cinfo.commandline)
+ info.BuildId = C.GoString(cinfo.build_id)
+
+ return
+}
+
+type ShutdownReason int32
+
+const (
+ ShutdownReasonUnknown = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_UNKNOWN)
+ ShutdownReasonPoweroff = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_POWEROFF)
+ ShutdownReasonReboot = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_REBOOT)
+ ShutdownReasonSuspend = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_SUSPEND)
+ ShutdownReasonCrash = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_CRASH)
+ ShutdownReasonWatchdog = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_WATCHDOG)
+ ShutdownReasonSoftReset = ShutdownReason(C.LIBXL_SHUTDOWN_REASON_SOFT_RESET)
+)
+
+func (sr ShutdownReason) String() (str string) {
+ cstr := C.libxl_shutdown_reason_to_string(C.libxl_shutdown_reason(sr))
+ str = C.GoString(cstr)
+
+ return
+}
+
+type DomainType int32
+
+const (
+ DomainTypeInvalid = DomainType(C.LIBXL_DOMAIN_TYPE_INVALID)
+ DomainTypeHvm = DomainType(C.LIBXL_DOMAIN_TYPE_HVM)
+ DomainTypePv = DomainType(C.LIBXL_DOMAIN_TYPE_PV)
+)
+
+func (dt DomainType) String() (str string) {
+ cstr := C.libxl_domain_type_to_string(C.libxl_domain_type(dt))
+ str = C.GoString(cstr)
+
+ return
+}
+
+type Dominfo struct {
+ Uuid Uuid
+ Domid Domid
+ Ssidref uint32
+ SsidLabel string
+ Running bool
+ Blocked bool
+ Paused bool
+ Shutdown bool
+ Dying bool
+ NeverStop bool
+
+ ShutdownReason int32
+ OutstandingMemkb MemKB
+ CurrentMemkb MemKB
+ SharedMemkb MemKB
+ PagedMemkb MemKB
+ MaxMemkb MemKB
+ CpuTime time.Duration
+ VcpuMaxId uint32
+ VcpuOnline uint32
+ Cpupool uint32
+ DomainType int32
+}
+
+func (cdi *C.libxl_dominfo) toGo() (di *Dominfo) {
+
+ di = &Dominfo{}
+ di.Uuid = Uuid(cdi.uuid)
+ di.Domid = Domid(cdi.domid)
+ di.Ssidref = uint32(cdi.ssidref)
+ di.SsidLabel = C.GoString(cdi.ssid_label)
+ di.Running = bool(cdi.running)
+ di.Blocked = bool(cdi.blocked)
+ di.Paused = bool(cdi.paused)
+ di.Shutdown = bool(cdi.shutdown)
+ di.Dying = bool(cdi.dying)
+ di.NeverStop = bool(cdi.never_stop)
+ di.ShutdownReason = int32(cdi.shutdown_reason)
+ di.OutstandingMemkb = MemKB(cdi.outstanding_memkb)
+ di.CurrentMemkb = MemKB(cdi.current_memkb)
+ di.SharedMemkb = MemKB(cdi.shared_memkb)
+ di.PagedMemkb = MemKB(cdi.paged_memkb)
+ di.MaxMemkb = MemKB(cdi.max_memkb)
+ di.CpuTime = time.Duration(cdi.cpu_time)
+ di.VcpuMaxId = uint32(cdi.vcpu_max_id)
+ di.VcpuOnline = uint32(cdi.vcpu_online)
+ di.Cpupool = uint32(cdi.cpupool)
+ di.DomainType = int32(cdi.domain_type)
+
+ return
+}
+
+// # Consistent with values defined in domctl.h
+// # Except unknown which we have made up
+// libxl_scheduler = Enumeration("scheduler", [
+// (0, "unknown"),
+// (4, "sedf"),
+// (5, "credit"),
+// (6, "credit2"),
+// (7, "arinc653"),
+// (8, "rtds"),
+// ])
+type Scheduler int
+
+var (
+ SchedulerUnknown Scheduler = C.LIBXL_SCHEDULER_UNKNOWN
+ SchedulerSedf Scheduler = C.LIBXL_SCHEDULER_SEDF
+ SchedulerCredit Scheduler = C.LIBXL_SCHEDULER_CREDIT
+ SchedulerCredit2 Scheduler = C.LIBXL_SCHEDULER_CREDIT2
+ SchedulerArinc653 Scheduler = C.LIBXL_SCHEDULER_ARINC653
+ SchedulerRTDS Scheduler = C.LIBXL_SCHEDULER_RTDS
+)
+
+// const char *libxl_scheduler_to_string(libxl_scheduler p);
+func (s Scheduler) String() string {
+ cs := C.libxl_scheduler_to_string(C.libxl_scheduler(s))
+ // No need to free const return value
+
+ return C.GoString(cs)
+}
+
+// int libxl_scheduler_from_string(const char *s, libxl_scheduler *e);
+func (s *Scheduler) FromString(gstr string) (err error) {
+ *s, err = SchedulerFromString(gstr)
+ return
+}
+
+func SchedulerFromString(name string) (s Scheduler, err error) {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+
+ var cs C.libxl_scheduler
+
+ ret := C.libxl_scheduler_from_string(cname, &cs)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ s = Scheduler(cs)
+
+ return
+}
+
+// libxl_cpupoolinfo = Struct("cpupoolinfo", [
+// ("poolid", uint32),
+// ("pool_name", string),
+// ("sched", libxl_scheduler),
+// ("n_dom", uint32),
+// ("cpumap", libxl_bitmap)
+// ], dir=DIR_OUT)
+
+type CpupoolInfo struct {
+ Poolid uint32
+ PoolName string
+ Scheduler Scheduler
+ DomainCount int
+ Cpumap Bitmap
+}
+
+func (cci C.libxl_cpupoolinfo) toGo() (gci CpupoolInfo) {
+ gci.Poolid = uint32(cci.poolid)
+ gci.PoolName = C.GoString(cci.pool_name)
+ gci.Scheduler = Scheduler(cci.sched)
+ gci.DomainCount = int(cci.n_dom)
+ gci.Cpumap = cci.cpumap.toGo()
+
+ return
+}
+
+// libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx*, int *nb_pool_out);
+// void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nb_pool);
+func (Ctx *Context) ListCpupool() (list []CpupoolInfo) {
+ err := Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var nbPool C.int
+
+ c_cpupool_list := C.libxl_list_cpupool(Ctx.ctx, &nbPool)
+
+ defer C.libxl_cpupoolinfo_list_free(c_cpupool_list, nbPool)
+
+ if int(nbPool) == 0 {
+ return
+ }
+
+ // Magic
+ cpupoolListSlice := (*[1 << 30]C.libxl_cpupoolinfo)(unsafe.Pointer(c_cpupool_list))[:nbPool:nbPool]
+ for i := range cpupoolListSlice {
+ info := cpupoolListSlice[i].toGo()
+ list = append(list, info)
+ }
+
+ return
+}
+
+// int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t poolid);
+func (Ctx *Context) CpupoolInfo(Poolid uint32) (pool CpupoolInfo) {
+ err := Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var c_cpupool C.libxl_cpupoolinfo
+
+ ret := C.libxl_cpupool_info(Ctx.ctx, &c_cpupool, C.uint32_t(Poolid))
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+ defer C.libxl_cpupoolinfo_dispose(&c_cpupool)
+
+ pool = c_cpupool.toGo()
+
+ return
+}
+
+// int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
+// libxl_scheduler sched,
+// libxl_bitmap cpumap, libxl_uuid *uuid,
+// uint32_t *poolid);
+// FIXME: uuid
+// FIXME: Setting poolid
+func (Ctx *Context) CpupoolCreate(Name string, Scheduler Scheduler, Cpumap Bitmap) (err error, Poolid uint32) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ poolid := C.uint32_t(C.LIBXL_CPUPOOL_POOLID_ANY)
+ name := C.CString(Name)
+ defer C.free(unsafe.Pointer(name))
+
+ // For now, just do what xl does, and make a new uuid every time we create the pool
+ var uuid C.libxl_uuid
+ C.libxl_uuid_generate(&uuid)
+
+ cbm := Cpumap.toC()
+ defer C.libxl_bitmap_dispose(&cbm)
+
+ ret := C.libxl_cpupool_create(Ctx.ctx, name, C.libxl_scheduler(Scheduler),
+ cbm, &uuid, &poolid)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ Poolid = uint32(poolid)
+
+ return
+}
+
+// int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid);
+func (Ctx *Context) CpupoolDestroy(Poolid uint32) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_cpupool_destroy(Ctx.ctx, C.uint32_t(Poolid))
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+// int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+func (Ctx *Context) CpupoolCpuadd(Poolid uint32, Cpu int) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_cpupool_cpuadd(Ctx.ctx, C.uint32_t(Poolid), C.int(Cpu))
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+// int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
+// const libxl_bitmap *cpumap);
+func (Ctx *Context) CpupoolCpuaddCpumap(Poolid uint32, Cpumap Bitmap) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ cbm := Cpumap.toC()
+ defer C.libxl_bitmap_dispose(&cbm)
+
+ ret := C.libxl_cpupool_cpuadd_cpumap(Ctx.ctx, C.uint32_t(Poolid), &cbm)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+// int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+func (Ctx *Context) CpupoolCpuremove(Poolid uint32, Cpu int) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_cpupool_cpuremove(Ctx.ctx, C.uint32_t(Poolid), C.int(Cpu))
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+// int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
+// const libxl_bitmap *cpumap);
+func (Ctx *Context) CpupoolCpuremoveCpumap(Poolid uint32, Cpumap Bitmap) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ cbm := Cpumap.toC()
+ defer C.libxl_bitmap_dispose(&cbm)
+
+ ret := C.libxl_cpupool_cpuremove_cpumap(Ctx.ctx, C.uint32_t(Poolid), &cbm)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+// int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
+func (Ctx *Context) CpupoolRename(Name string, Poolid uint32) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ name := C.CString(Name)
+ defer C.free(unsafe.Pointer(name))
+
+ ret := C.libxl_cpupool_rename(Ctx.ctx, name, C.uint32_t(Poolid))
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+// int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
+func (Ctx *Context) CpupoolCpuaddNode(Poolid uint32, Node int) (Cpus int, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ccpus := C.int(0)
+
+ ret := C.libxl_cpupool_cpuadd_node(Ctx.ctx, C.uint32_t(Poolid), C.int(Node), &ccpus)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ Cpus = int(ccpus)
+
+ return
+}
+
+// int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
+func (Ctx *Context) CpupoolCpuremoveNode(Poolid uint32, Node int) (Cpus int, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ccpus := C.int(0)
+
+ ret := C.libxl_cpupool_cpuremove_node(Ctx.ctx, C.uint32_t(Poolid), C.int(Node), &ccpus)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ Cpus = int(ccpus)
+
+ return
+}
+
+// int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+func (Ctx *Context) CpupoolMovedomain(Poolid uint32, Id Domid) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_cpupool_movedomain(Ctx.ctx, C.uint32_t(Poolid), C.uint32_t(Id))
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ return
+}
+
+//
+// Utility functions
+//
+func (Ctx *Context) CpupoolFindByName(name string) (info CpupoolInfo, found bool) {
+ plist := Ctx.ListCpupool()
+
+ for i := range plist {
+ if plist[i].PoolName == name {
+ found = true
+ info = plist[i]
+ return
+ }
+ }
+ return
+}
+
+func (Ctx *Context) CpupoolMakeFree(Cpumap Bitmap) (err error) {
+ plist := Ctx.ListCpupool()
+
+ for i := range plist {
+ var Intersection Bitmap
+ Intersection = Cpumap.And(plist[i].Cpumap)
+ if !Intersection.IsEmpty() {
+ err = Ctx.CpupoolCpuremoveCpumap(plist[i].Poolid, Intersection)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+/*
+ * Bitmap operations
+ */
+
+// Return a Go bitmap which is a copy of the referred C bitmap.
+func (cbm C.libxl_bitmap) toGo() (gbm Bitmap) {
+ // Alloc a Go slice for the bytes
+ size := int(cbm.size)
+ gbm.bitmap = make([]C.uint8_t, size)
+
+ // Make a slice pointing to the C array
+ mapslice := (*[1 << 30]C.uint8_t)(unsafe.Pointer(cbm._map))[:size:size]
+
+ // And copy the C array into the Go array
+ copy(gbm.bitmap, mapslice)
+
+ return
+}
+
+// Must be C.libxl_bitmap_dispose'd of afterwards
+func (gbm Bitmap) toC() (cbm C.libxl_bitmap) {
+ C.libxl_bitmap_init(&cbm)
+
+ size := len(gbm.bitmap)
+ cbm._map = (*C.uint8_t)(C.malloc(C.size_t(size)))
+ cbm.size = C.uint32_t(size)
+ if cbm._map == nil {
+ panic("C.calloc failed!")
+ }
+
+ // Make a slice pointing to the C array
+ mapslice := (*[1 << 30]C.uint8_t)(unsafe.Pointer(cbm._map))[:size:size]
+
+ // And copy the Go array into the C array
+ copy(mapslice, gbm.bitmap)
+
+ return
+}
+
+func (bm *Bitmap) Test(bit int) bool {
+ ubit := uint(bit)
+ if bit > bm.Max() || bm.bitmap == nil {
+ return false
+ }
+
+ return (bm.bitmap[bit/8] & (1 << (ubit & 7))) != 0
+}
+
+func (bm *Bitmap) Set(bit int) {
+ ibit := bit / 8
+ if ibit+1 > len(bm.bitmap) {
+ bm.bitmap = append(bm.bitmap, make([]C.uint8_t, ibit+1-len(bm.bitmap))...)
+ }
+
+ bm.bitmap[ibit] |= 1 << (uint(bit) & 7)
+}
+
+func (bm *Bitmap) SetRange(start int, end int) {
+ for i := start; i <= end; i++ {
+ bm.Set(i)
+ }
+}
+
+func (bm *Bitmap) Clear(bit int) {
+ ubit := uint(bit)
+ if bit > bm.Max() || bm.bitmap == nil {
+ return
+ }
+
+ bm.bitmap[bit/8] &= ^(1 << (ubit & 7))
+}
+
+func (bm *Bitmap) ClearRange(start int, end int) {
+ for i := start; i <= end; i++ {
+ bm.Clear(i)
+ }
+}
+
+func (bm *Bitmap) Max() int {
+ return len(bm.bitmap)*8 - 1
+}
+
+func (bm *Bitmap) IsEmpty() bool {
+ for i := 0; i < len(bm.bitmap); i++ {
+ if bm.bitmap[i] != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (a Bitmap) And(b Bitmap) (c Bitmap) {
+ var max, min int
+ if len(a.bitmap) > len(b.bitmap) {
+ max = len(a.bitmap)
+ min = len(b.bitmap)
+ } else {
+ max = len(b.bitmap)
+ min = len(a.bitmap)
+ }
+ c.bitmap = make([]C.uint8_t, max)
+
+ for i := 0; i < min; i++ {
+ c.bitmap[i] = a.bitmap[i] & b.bitmap[i]
+ }
+ return
+}
+
+func (bm Bitmap) String() (s string) {
+ lastOnline := false
+ crange := false
+ printed := false
+ var i int
+ /// --x-xxxxx-x -> 2,4-8,10
+ /// --x-xxxxxxx -> 2,4-10
+ for i = 0; i <= bm.Max(); i++ {
+ if bm.Test(i) {
+ if !lastOnline {
+ // Switching offline -> online, print this cpu
+ if printed {
+ s += ","
+ }
+ s += fmt.Sprintf("%d", i)
+ printed = true
+ } else if !crange {
+ // last was online, but we're not in a range; print -
+ crange = true
+ s += "-"
+ } else {
+ // last was online, we're in a range, nothing else to do
+ }
+ lastOnline = true
+ } else {
+ if lastOnline {
+ // Switching online->offline; do we need to end a range?
+ if crange {
+ s += fmt.Sprintf("%d", i-1)
+ }
+ }
+ lastOnline = false
+ crange = false
+ }
+ }
+ if lastOnline {
+ // Switching online->offline; do we need to end a range?
+ if crange {
+ s += fmt.Sprintf("%d", i-1)
+ }
+ }
+
+ return
+}
+
+/*
+ * Context
+ */
+var Ctx Context
+
+func (Ctx *Context) IsOpen() bool {
+ return Ctx.ctx != nil
+}
+
+func (Ctx *Context) Open() (err error) {
+ if Ctx.ctx != nil {
+ return
+ }
+
+ Ctx.logger = C.xtl_createlogger_stdiostream(C.stderr, C.XTL_ERROR, 0)
+ if Ctx.logger == nil {
+ err = fmt.Errorf("Cannot open stdiostream")
+ return
+ }
+
+ ret := C.libxl_ctx_alloc(&Ctx.ctx, C.LIBXL_VERSION,
+ 0, unsafe.Pointer(Ctx.logger))
+
+ if ret != 0 {
+ err = Error(-ret)
+ }
+ return
+}
+
+func (Ctx *Context) Close() (err error) {
+ ret := C.libxl_ctx_free(Ctx.ctx)
+ Ctx.ctx = nil
+
+ if ret != 0 {
+ err = Error(-ret)
+ }
+ C.xtl_logger_destroy(unsafe.Pointer(Ctx.logger))
+ return
+}
+
+func (Ctx *Context) CheckOpen() (err error) {
+ if Ctx.ctx == nil {
+ err = fmt.Errorf("Context not opened")
+ }
+ return
+}
+
+//int libxl_get_max_cpus(libxl_ctx *ctx);
+func (Ctx *Context) GetMaxCpus() (maxCpus int, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_get_max_cpus(Ctx.ctx)
+ if ret < 0 {
+ err = Error(-ret)
+ return
+ }
+ maxCpus = int(ret)
+ return
+}
+
+//int libxl_get_online_cpus(libxl_ctx *ctx);
+func (Ctx *Context) GetOnlineCpus() (onCpus int, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_get_online_cpus(Ctx.ctx)
+ if ret < 0 {
+ err = Error(-ret)
+ return
+ }
+ onCpus = int(ret)
+ return
+}
+
+//int libxl_get_max_nodes(libxl_ctx *ctx);
+func (Ctx *Context) GetMaxNodes() (maxNodes int, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+ ret := C.libxl_get_max_nodes(Ctx.ctx)
+ if ret < 0 {
+ err = Error(-ret)
+ return
+ }
+ maxNodes = int(ret)
+ return
+}
+
+//int libxl_get_free_memory(libxl_ctx *ctx, uint64_t *memkb);
+func (Ctx *Context) GetFreeMemory() (memkb uint64, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+ var cmem C.uint64_t
+ ret := C.libxl_get_free_memory(Ctx.ctx, &cmem)
+
+ if ret < 0 {
+ err = Error(-ret)
+ return
+ }
+
+ memkb = uint64(cmem)
+ return
+
+}
+
+//int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo)
+func (Ctx *Context) GetPhysinfo() (physinfo *Physinfo, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+ var cphys C.libxl_physinfo
+ C.libxl_physinfo_init(&cphys)
+ defer C.libxl_physinfo_dispose(&cphys)
+
+ ret := C.libxl_get_physinfo(Ctx.ctx, &cphys)
+
+ if ret < 0 {
+ err = Error(ret)
+ return
+ }
+ physinfo = cphys.toGo()
+
+ return
+}
+
+//const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx);
+func (Ctx *Context) GetVersionInfo() (info *VersionInfo, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var cinfo *C.libxl_version_info
+
+ cinfo = C.libxl_get_version_info(Ctx.ctx)
+
+ info = cinfo.toGo()
+
+ return
+}
+
+func (Ctx *Context) DomainInfo(Id Domid) (di *Dominfo, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var cdi C.libxl_dominfo
+ C.libxl_dominfo_init(&cdi)
+ defer C.libxl_dominfo_dispose(&cdi)
+
+ ret := C.libxl_domain_info(Ctx.ctx, &cdi, C.uint32_t(Id))
+
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+
+ di = cdi.toGo()
+
+ return
+}
+
+func (Ctx *Context) DomainUnpause(Id Domid) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_domain_unpause(Ctx.ctx, C.uint32_t(Id))
+
+ if ret != 0 {
+ err = Error(-ret)
+ }
+ return
+}
+
+//int libxl_domain_pause(libxl_ctx *ctx, uint32_t domain);
+func (Ctx *Context) DomainPause(id Domid) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_domain_pause(Ctx.ctx, C.uint32_t(id))
+
+ if ret != 0 {
+ err = Error(-ret)
+ }
+ return
+}
+
+//int libxl_domain_shutdown(libxl_ctx *ctx, uint32_t domid);
+func (Ctx *Context) DomainShutdown(id Domid) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_domain_shutdown(Ctx.ctx, C.uint32_t(id))
+
+ if ret != 0 {
+ err = Error(-ret)
+ }
+ return
+}
+
+//int libxl_domain_reboot(libxl_ctx *ctx, uint32_t domid);
+func (Ctx *Context) DomainReboot(id Domid) (err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ ret := C.libxl_domain_reboot(Ctx.ctx, C.uint32_t(id))
+
+ if ret != 0 {
+ err = Error(-ret)
+ }
+ return
+}
+
+//libxl_dominfo * libxl_list_domain(libxl_ctx*, int *nb_domain_out);
+//void libxl_dominfo_list_free(libxl_dominfo *list, int nb_domain);
+func (Ctx *Context) ListDomain() (glist []Dominfo) {
+ err := Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var nbDomain C.int
+ clist := C.libxl_list_domain(Ctx.ctx, &nbDomain)
+ defer C.libxl_dominfo_list_free(clist, nbDomain)
+
+ if int(nbDomain) == 0 {
+ return
+ }
+
+ gslice := (*[1 << 30]C.libxl_dominfo)(unsafe.Pointer(clist))[:nbDomain:nbDomain]
+ for i := range gslice {
+ info := gslice[i].toGo()
+ glist = append(glist, *info)
+ }
+
+ return
+}
+
+type Vcpuinfo struct {
+ Vcpuid uint32
+ Cpu uint32
+ Online bool
+ Blocked bool
+ Running bool
+ VCpuTime time.Duration
+ Cpumap Bitmap
+ CpumapSoft Bitmap
+}
+
+func (cvci C.libxl_vcpuinfo) toGo() (gvci Vcpuinfo) {
+ gvci.Vcpuid = uint32(cvci.vcpuid)
+ gvci.Cpu = uint32(cvci.cpu)
+ gvci.Online = bool(cvci.online)
+ gvci.Blocked = bool(cvci.blocked)
+ gvci.Running = bool(cvci.running)
+ gvci.VCpuTime = time.Duration(cvci.vcpu_time)
+ gvci.Cpumap = cvci.cpumap.toGo()
+ gvci.CpumapSoft = cvci.cpumap_soft.toGo()
+
+ return
+}
+
+//libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
+// int *nb_vcpu, int *nr_cpus_out);
+//void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr_vcpus);
+func (Ctx *Context) ListVcpu(id Domid) (glist []Vcpuinfo) {
+ err := Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var nbVcpu C.int
+ var nrCpu C.int
+
+ clist := C.libxl_list_vcpu(Ctx.ctx, C.uint32_t(id), &nbVcpu, &nrCpu)
+ defer C.libxl_vcpuinfo_list_free(clist, nbVcpu)
+
+ if int(nbVcpu) == 0 {
+ return
+ }
+
+ gslice := (*[1 << 30]C.libxl_vcpuinfo)(unsafe.Pointer(clist))[:nbVcpu:nbVcpu]
+ for i := range gslice {
+ info := gslice[i].toGo()
+ glist = append(glist, info)
+ }
+
+ return
+}
+
+type ConsoleType int
+
+const (
+ ConsoleTypeUnknown = ConsoleType(C.LIBXL_CONSOLE_TYPE_UNKNOWN)
+ ConsoleTypeSerial = ConsoleType(C.LIBXL_CONSOLE_TYPE_SERIAL)
+ ConsoleTypePV = ConsoleType(C.LIBXL_CONSOLE_TYPE_PV)
+)
+
+func (ct ConsoleType) String() (str string) {
+ cstr := C.libxl_console_type_to_string(C.libxl_console_type(ct))
+ str = C.GoString(cstr)
+
+ return
+}
+
+//int libxl_console_get_tty(libxl_ctx *ctx, uint32_t domid, int cons_num,
+//libxl_console_type type, char **path);
+func (Ctx *Context) ConsoleGetTty(id Domid, consNum int, conType ConsoleType) (path string, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var cpath *C.char
+ ret := C.libxl_console_get_tty(Ctx.ctx, C.uint32_t(id), C.int(consNum), C.libxl_console_type(conType), &cpath)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+ defer C.free(cpath)
+
+ path = C.GoString(cpath)
+ return
+}
+
+//int libxl_primary_console_get_tty(libxl_ctx *ctx, uint32_t domid_vm,
+// char **path);
+func (Ctx *Context) PrimaryConsoleGetTty(domid uint32) (path string, err error) {
+ err = Ctx.CheckOpen()
+ if err != nil {
+ return
+ }
+
+ var cpath *C.char
+ ret := C.libxl_primary_console_get_tty(Ctx.ctx, C.uint32_t(domid), &cpath)
+ if ret != 0 {
+ err = Error(-ret)
+ return
+ }
+ defer C.free(cpath)
+
+ path = C.GoString(cpath)
+ return
+}