Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

merged conflicts

  • Loading branch information...
commit 880e5aeaae182e0898d727d7906723a6679310c7 2 parents e3e24cc + f5105ef
@floren authored
Showing with 932 additions and 538 deletions.
  1. +10 −10 tools/kvm/8250-serial.c
  2. +3 −0  tools/kvm/Documentation/kvm-run.txt
  3. +2 −0  tools/kvm/Makefile
  4. +20 −7 tools/kvm/bios.c
  5. +3 −3 tools/kvm/cpuid.c
  6. +39 −39 tools/kvm/disk-image.c
  7. +22 −18 tools/kvm/include/kvm/disk-image.h
  8. +1 −1  tools/kvm/include/kvm/e820.h
  9. +3 −3 tools/kvm/include/kvm/interrupt.h
  10. +2 −2 tools/kvm/include/kvm/ioport.h
  11. +8 −8 tools/kvm/include/kvm/kvm-cpu.h
  12. +20 −16 tools/kvm/include/kvm/kvm.h
  13. +2 −1  tools/kvm/include/kvm/qcow.h
  14. +12 −0 tools/kvm/include/kvm/symbol.h
  15. +13 −0 tools/kvm/include/kvm/util.h
  16. +1 −1  tools/kvm/include/kvm/virtio-blk.h
  17. +2 −2 tools/kvm/include/kvm/virtio-console.h
  18. +1 −1  tools/kvm/include/kvm/virtio-net.h
  19. +3 −0  tools/kvm/include/kvm/virtio-pci-dev.h
  20. +0 −74 tools/kvm/include/kvm/virtio-pci.h
  21. +11 −0 tools/kvm/include/kvm/virtio.h
  22. +7 −7 tools/kvm/interrupt.c
  23. +6 −6 tools/kvm/ioport.c
  24. +107 −98 tools/kvm/kvm-cpu.c
  25. +47 −9 tools/kvm/kvm-run.c
  26. +118 −66 tools/kvm/kvm.c
  27. +1 −1  tools/kvm/mmio.c
  28. +1 −1  tools/kvm/mptable.c
  29. +4 −4 tools/kvm/pci.c
  30. +230 −25 tools/kvm/qcow.c
  31. +4 −4 tools/kvm/rtc.c
  32. +98 −0 tools/kvm/symbol.c
  33. +10 −11 tools/kvm/virtio/blk.c
  34. +17 −18 tools/kvm/virtio/console.c
  35. +102 −99 tools/kvm/virtio/net.c
  36. +2 −3 tools/kvm/virtio/rng.c
View
20 tools/kvm/8250-serial.c
@@ -70,7 +70,7 @@ static struct serial8250_device devices[] = {
static int sysrq_pending;
-static void serial8250__sysrq(struct kvm *self, struct serial8250_device *dev)
+static void serial8250__sysrq(struct kvm *kvm, struct serial8250_device *dev)
{
switch (sysrq_pending) {
case SYSRQ_PENDING_BREAK:
@@ -87,7 +87,7 @@ static void serial8250__sysrq(struct kvm *self, struct serial8250_device *dev)
}
}
-static void serial8250__receive(struct kvm *self, struct serial8250_device *dev)
+static void serial8250__receive(struct kvm *kvm, struct serial8250_device *dev)
{
int c;
@@ -95,7 +95,7 @@ static void serial8250__receive(struct kvm *self, struct serial8250_device *dev)
return;
if (sysrq_pending) {
- serial8250__sysrq(self, dev);
+ serial8250__sysrq(kvm, dev);
return;
}
@@ -114,13 +114,13 @@ static void serial8250__receive(struct kvm *self, struct serial8250_device *dev)
/*
* Interrupts are injected for ttyS0 only.
*/
-void serial8250__inject_interrupt(struct kvm *self)
+void serial8250__inject_interrupt(struct kvm *kvm)
{
struct serial8250_device *dev = &devices[0];
mutex_lock(&dev->mutex);
- serial8250__receive(self, dev);
+ serial8250__receive(kvm, dev);
if (dev->ier & UART_IER_RDI && dev->lsr & UART_LSR_DR)
dev->iir = UART_IIR_RDI;
@@ -130,14 +130,14 @@ void serial8250__inject_interrupt(struct kvm *self)
dev->iir = UART_IIR_NO_INT;
if (dev->iir != UART_IIR_NO_INT) {
- kvm__irq_line(self, dev->irq, 0);
- kvm__irq_line(self, dev->irq, 1);
+ kvm__irq_line(kvm, dev->irq, 0);
+ kvm__irq_line(kvm, dev->irq, 1);
}
mutex_unlock(&dev->mutex);
}
-void serial8250__inject_sysrq(struct kvm *self)
+void serial8250__inject_sysrq(struct kvm *kvm)
{
sysrq_pending = SYSRQ_PENDING_BREAK;
}
@@ -155,7 +155,7 @@ static struct serial8250_device *find_device(u16 port)
return NULL;
}
-static bool serial8250_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool serial8250_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct serial8250_device *dev;
u16 offset;
@@ -243,7 +243,7 @@ static bool serial8250_out(struct kvm *self, u16 port, void *data, int size, u32
return ret;
}
-static bool serial8250_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool serial8250_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct serial8250_device *dev;
u16 offset;
View
3  tools/kvm/Documentation/kvm-run.txt
@@ -55,6 +55,9 @@ OPTIONS
--cpus::
The number of virtual CPUs to run.
+--debug::
+ Enable debug messages.
+
SEE ALSO
--------
linkkvm:
View
2  tools/kvm/Makefile
@@ -26,6 +26,7 @@ OBJS += main.o
OBJS += mmio.o
OBJS += pci.o
OBJS += rtc.o
+OBJS += symbol.o
OBJS += term.o
OBJS += util.o
OBJS += virtio/blk.o
@@ -53,6 +54,7 @@ OBJS += bios/bios.o
LIBS += -lrt
LIBS += -lpthread
LIBS += -lvncserver
+LIBS += -lbfd
# Additional ARCH settings for x86
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
View
27 tools/kvm/bios.c
@@ -61,8 +61,6 @@ static void e820_setup(struct kvm *kvm)
size = guest_flat_to_host(kvm, E820_MAP_SIZE);
mem_map = guest_flat_to_host(kvm, E820_MAP_START);
- *size = E820_MEM_AREAS;
-
mem_map[i++] = (struct e820_entry) {
.addr = REAL_MODE_IVT_BEGIN,
.size = EBDA_START - REAL_MODE_IVT_BEGIN,
@@ -78,13 +76,28 @@ static void e820_setup(struct kvm *kvm)
.size = MB_BIOS_END - MB_BIOS_BEGIN,
.type = E820_MEM_RESERVED,
};
- mem_map[i++] = (struct e820_entry) {
- .addr = BZ_KERNEL_START,
- .size = kvm->ram_size - BZ_KERNEL_START,
- .type = E820_MEM_USABLE,
- };
+ if (kvm->ram_size < KVM_32BIT_GAP_START) {
+ mem_map[i++] = (struct e820_entry) {
+ .addr = BZ_KERNEL_START,
+ .size = kvm->ram_size - BZ_KERNEL_START,
+ .type = E820_MEM_USABLE,
+ };
+ } else {
+ mem_map[i++] = (struct e820_entry) {
+ .addr = BZ_KERNEL_START,
+ .size = KVM_32BIT_GAP_START - BZ_KERNEL_START,
+ .type = E820_MEM_USABLE,
+ };
+ mem_map[i++] = (struct e820_entry) {
+ .addr = 0x100000000ULL,
+ .size = kvm->ram_size - KVM_32BIT_GAP_START,
+ .type = E820_MEM_USABLE,
+ };
+ }
BUILD_BUG_ON(i > E820_MEM_AREAS);
+
+ *size = i;
}
/**
View
6 tools/kvm/cpuid.c
@@ -32,19 +32,19 @@ static void filter_cpuid(struct kvm_cpuid2 *kvm_cpuid)
}
}
-void kvm_cpu__setup_cpuid(struct kvm_cpu *self)
+void kvm_cpu__setup_cpuid(struct kvm_cpu *vcpu)
{
struct kvm_cpuid2 *kvm_cpuid;
kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) + MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries));
kvm_cpuid->nent = MAX_KVM_CPUID_ENTRIES;
- if (ioctl(self->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0)
+ if (ioctl(vcpu->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0)
die_perror("KVM_GET_SUPPORTED_CPUID failed");
filter_cpuid(kvm_cpuid);
- if (ioctl(self->vcpu_fd, KVM_SET_CPUID2, kvm_cpuid) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_CPUID2, kvm_cpuid) < 0)
die_perror("KVM_SET_CPUID2 failed");
free(kvm_cpuid);
View
78 tools/kvm/disk-image.c
@@ -19,74 +19,74 @@
struct disk_image *disk_image__new(int fd, u64 size, struct disk_image_operations *ops)
{
- struct disk_image *self;
+ struct disk_image *disk;
- self = malloc(sizeof *self);
- if (!self)
+ disk = malloc(sizeof *disk);
+ if (!disk)
return NULL;
- self->fd = fd;
- self->size = size;
- self->ops = ops;
- return self;
+ disk->fd = fd;
+ disk->size = size;
+ disk->ops = ops;
+ return disk;
}
struct disk_image *disk_image__new_readonly(int fd, u64 size, struct disk_image_operations *ops)
{
- struct disk_image *self;
+ struct disk_image *disk;
- self = disk_image__new(fd, size, ops);
- if (!self)
+ disk = disk_image__new(fd, size, ops);
+ if (!disk)
return NULL;
- self->priv = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_NORESERVE, fd, 0);
- if (self->priv == MAP_FAILED)
+ disk->priv = mmap(NULL, size, PROT_RW, MAP_PRIVATE | MAP_NORESERVE, fd, 0);
+ if (disk->priv == MAP_FAILED)
die("mmap() failed");
- return self;
+ return disk;
}
-static ssize_t raw_image__read_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static ssize_t raw_image__read_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
u64 offset = sector << SECTOR_SHIFT;
- return preadv_in_full(self->fd, iov, iovcount, offset);
+ return preadv_in_full(disk->fd, iov, iovcount, offset);
}
-static ssize_t raw_image__write_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static ssize_t raw_image__write_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
u64 offset = sector << SECTOR_SHIFT;
- return pwritev_in_full(self->fd, iov, iovcount, offset);
+ return pwritev_in_full(disk->fd, iov, iovcount, offset);
}
-static int raw_image__read_sector_ro_mmap(struct disk_image *self, u64 sector, void *dst, u32 dst_len)
+static int raw_image__read_sector_ro_mmap(struct disk_image *disk, u64 sector, void *dst, u32 dst_len)
{
u64 offset = sector << SECTOR_SHIFT;
- if (offset + dst_len > self->size)
+ if (offset + dst_len > disk->size)
return -1;
- memcpy(dst, self->priv + offset, dst_len);
+ memcpy(dst, disk->priv + offset, dst_len);
return 0;
}
-static int raw_image__write_sector_ro_mmap(struct disk_image *self, u64 sector, void *src, u32 src_len)
+static int raw_image__write_sector_ro_mmap(struct disk_image *disk, u64 sector, void *src, u32 src_len)
{
u64 offset = sector << SECTOR_SHIFT;
- if (offset + src_len > self->size)
+ if (offset + src_len > disk->size)
return -1;
- memcpy(self->priv + offset, src, src_len);
+ memcpy(disk->priv + offset, src, src_len);
return 0;
}
-static void raw_image__close_ro_mmap(struct disk_image *self)
+static void raw_image__close_ro_mmap(struct disk_image *disk)
{
- if (self->priv != MAP_FAILED)
- munmap(self->priv, self->size);
+ if (disk->priv != MAP_FAILED)
+ munmap(disk->priv, disk->size);
}
static struct disk_image_operations raw_image_ops = {
@@ -130,7 +130,7 @@ static struct disk_image *blkdev__probe(const char *filename, struct stat *st)
struct disk_image *disk_image__open(const char *filename, bool readonly)
{
- struct disk_image *self;
+ struct disk_image *disk;
struct stat st;
int fd;
@@ -144,13 +144,13 @@ struct disk_image *disk_image__open(const char *filename, bool readonly)
if (fd < 0)
return NULL;
- self = qcow_probe(fd);
- if (self)
- return self;
+ disk = qcow_probe(fd, readonly);
+ if (disk)
+ return disk;
- self = raw_image__probe(fd, &st, readonly);
- if (self)
- return self;
+ disk = raw_image__probe(fd, &st, readonly);
+ if (disk)
+ return disk;
if (close(fd) < 0)
warning("close() failed");
@@ -158,17 +158,17 @@ struct disk_image *disk_image__open(const char *filename, bool readonly)
return NULL;
}
-void disk_image__close(struct disk_image *self)
+void disk_image__close(struct disk_image *disk)
{
/* If there was no disk image then there's nothing to do: */
- if (!self)
+ if (!disk)
return;
- if (self->ops->close)
- self->ops->close(self);
+ if (disk->ops->close)
+ disk->ops->close(disk);
- if (close(self->fd) < 0)
+ if (close(disk->fd) < 0)
warning("close() failed");
- free(self);
+ free(disk);
}
View
40 tools/kvm/include/kvm/disk-image.h
@@ -11,11 +11,11 @@
struct disk_image;
struct disk_image_operations {
- int (*read_sector)(struct disk_image *self, u64 sector, void *dst, u32 dst_len);
- int (*write_sector)(struct disk_image *self, u64 sector, void *src, u32 src_len);
- ssize_t (*read_sector_iov)(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount);
- ssize_t (*write_sector_iov)(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount);
- void (*close)(struct disk_image *self);
+ int (*read_sector)(struct disk_image *disk, u64 sector, void *dst, u32 dst_len);
+ int (*write_sector)(struct disk_image *disk, u64 sector, void *src, u32 src_len);
+ ssize_t (*read_sector_iov)(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount);
+ ssize_t (*write_sector_iov)(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount);
+ void (*close)(struct disk_image *disk);
};
struct disk_image {
@@ -28,25 +28,27 @@ struct disk_image {
struct disk_image *disk_image__open(const char *filename, bool readonly);
struct disk_image *disk_image__new(int fd, u64 size, struct disk_image_operations *ops);
struct disk_image *disk_image__new_readonly(int fd, u64 size, struct disk_image_operations *ops);
-void disk_image__close(struct disk_image *self);
+void disk_image__close(struct disk_image *disk);
-static inline int disk_image__read_sector(struct disk_image *self, u64 sector, void *dst, u32 dst_len)
+static inline int disk_image__read_sector(struct disk_image *disk, u64 sector, void *dst, u32 dst_len)
{
- return self->ops->read_sector(self, sector, dst, dst_len);
+ return disk->ops->read_sector(disk, sector, dst, dst_len);
}
-static inline int disk_image__write_sector(struct disk_image *self, u64 sector, void *src, u32 src_len)
+static inline int disk_image__write_sector(struct disk_image *disk, u64 sector, void *src, u32 src_len)
{
- return self->ops->write_sector(self, sector, src, src_len);
+ return disk->ops->write_sector(disk, sector, src, src_len);
}
-static inline ssize_t disk_image__read_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static inline ssize_t disk_image__read_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
- if (self->ops->read_sector_iov)
- return self->ops->read_sector_iov(self, sector, iov, iovcount);
+ if (disk->ops->read_sector_iov)
+ return disk->ops->read_sector_iov(disk, sector, iov, iovcount);
while (iovcount--) {
- self->ops->read_sector(self, sector, iov->iov_base, iov->iov_len);
+ if (disk->ops->read_sector(disk, sector, iov->iov_base, iov->iov_len) < 0)
+ return -1;
+
sector += iov->iov_len >> SECTOR_SHIFT;
iov++;
}
@@ -54,13 +56,15 @@ static inline ssize_t disk_image__read_sector_iov(struct disk_image *self, u64 s
return sector << SECTOR_SHIFT;
}
-static inline ssize_t disk_image__write_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static inline ssize_t disk_image__write_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
- if (self->ops->write_sector_iov)
- return self->ops->write_sector_iov(self, sector, iov, iovcount);
+ if (disk->ops->write_sector_iov)
+ return disk->ops->write_sector_iov(disk, sector, iov, iovcount);
while (iovcount--) {
- self->ops->write_sector(self, sector, iov->iov_base, iov->iov_len);
+ if (disk->ops->write_sector(disk, sector, iov->iov_base, iov->iov_len) < 0)
+ return -1;
+
sector += iov->iov_len >> SECTOR_SHIFT;
iov++;
}
View
2  tools/kvm/include/kvm/e820.h
@@ -8,7 +8,7 @@
#define E820_MEM_USABLE 1
#define E820_MEM_RESERVED 2
-#define E820_MEM_AREAS 4
+#define E820_MEM_AREAS 5
struct e820_entry {
u64 addr; /* start of memory segment */
View
6 tools/kvm/include/kvm/interrupt.h
@@ -19,8 +19,8 @@ struct interrupt_table {
struct real_intr_desc entries[REAL_INTR_VECTORS];
};
-void interrupt_table__copy(struct interrupt_table *self, void *dst, unsigned int size);
-void interrupt_table__setup(struct interrupt_table *self, struct real_intr_desc *entry);
-void interrupt_table__set(struct interrupt_table *self, struct real_intr_desc *entry, unsigned int num);
+void interrupt_table__copy(struct interrupt_table *itable, void *dst, unsigned int size);
+void interrupt_table__setup(struct interrupt_table *itable, struct real_intr_desc *entry);
+void interrupt_table__set(struct interrupt_table *itable, struct real_intr_desc *entry, unsigned int num);
#endif /* KVM__INTERRUPT_H */
View
4 tools/kvm/include/kvm/ioport.h
@@ -21,8 +21,8 @@
struct kvm;
struct ioport_operations {
- bool (*io_in)(struct kvm *self, u16 port, void *data, int size, u32 count);
- bool (*io_out)(struct kvm *self, u16 port, void *data, int size, u32 count);
+ bool (*io_in)(struct kvm *kvm, u16 port, void *data, int size, u32 count);
+ bool (*io_out)(struct kvm *kvm, u16 port, void *data, int size, u32 count);
};
void ioport__setup_legacy(void);
View
16 tools/kvm/include/kvm/kvm-cpu.h
@@ -24,15 +24,15 @@ struct kvm_cpu {
};
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id);
-void kvm_cpu__delete(struct kvm_cpu *self);
-void kvm_cpu__reset_vcpu(struct kvm_cpu *self);
-void kvm_cpu__setup_cpuid(struct kvm_cpu *self);
-void kvm_cpu__enable_singlestep(struct kvm_cpu *self);
-void kvm_cpu__run(struct kvm_cpu *self);
+void kvm_cpu__delete(struct kvm_cpu *vcpu);
+void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu);
+void kvm_cpu__setup_cpuid(struct kvm_cpu *vcpu);
+void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu);
+void kvm_cpu__run(struct kvm_cpu *vcpu);
int kvm_cpu__start(struct kvm_cpu *cpu);
-void kvm_cpu__show_code(struct kvm_cpu *self);
-void kvm_cpu__show_registers(struct kvm_cpu *self);
-void kvm_cpu__show_page_tables(struct kvm_cpu *self);
+void kvm_cpu__show_code(struct kvm_cpu *vcpu);
+void kvm_cpu__show_registers(struct kvm_cpu *vcpu);
+void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu);
#endif /* KVM__KVM_CPU_H */
View
36 tools/kvm/include/kvm/kvm.h
@@ -8,6 +8,8 @@
#include <time.h>
#define KVM_NR_CPUS (255)
+#define KVM_32BIT_GAP_SIZE (512 << 20)
+#define KVM_32BIT_GAP_START ((1ULL << 32) - KVM_32BIT_GAP_SIZE)
struct kvm {
int sys_fd; /* For system ioctls(), i.e. /dev/kvm */
@@ -26,31 +28,33 @@ struct kvm {
u16 boot_sp;
struct interrupt_table interrupt_table;
+
+ const char *vmlinux;
};
struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size);
-int kvm__max_cpus(struct kvm *self);
-void kvm__init_ram(struct kvm *self);
-void kvm__delete(struct kvm *self);
+int kvm__max_cpus(struct kvm *kvm);
+void kvm__init_ram(struct kvm *kvm);
+void kvm__delete(struct kvm *kvm);
bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
const char *initrd_filename, const char *kernel_cmdline);
-void kvm__setup_bios(struct kvm *self);
-void kvm__start_timer(struct kvm *self);
-void kvm__stop_timer(struct kvm *self);
-void kvm__irq_line(struct kvm *self, int irq, int level);
-bool kvm__emulate_io(struct kvm *self, u16 port, void *data, int direction, int size, u32 count);
-bool kvm__emulate_mmio(struct kvm *self, u64 phys_addr, u8 *data, u32 len, u8 is_write);
+void kvm__setup_bios(struct kvm *kvm);
+void kvm__start_timer(struct kvm *kvm);
+void kvm__stop_timer(struct kvm *kvm);
+void kvm__irq_line(struct kvm *kvm, int irq, int level);
+bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
+bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
/*
* Debugging
*/
-void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size);
+void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size);
extern const char *kvm_exit_reasons[];
-static inline bool host_ptr_in_ram(struct kvm *self, void *p)
+static inline bool host_ptr_in_ram(struct kvm *kvm, void *p)
{
- return self->ram_start <= p && p < (self->ram_start + self->ram_size);
+ return kvm->ram_start <= p && p < (kvm->ram_start + kvm->ram_size);
}
static inline u32 segment_to_flat(u16 selector, u16 offset)
@@ -58,16 +62,16 @@ static inline u32 segment_to_flat(u16 selector, u16 offset)
return ((u32)selector << 4) + (u32) offset;
}
-static inline void *guest_flat_to_host(struct kvm *self, unsigned long offset)
+static inline void *guest_flat_to_host(struct kvm *kvm, unsigned long offset)
{
- return self->ram_start + offset;
+ return kvm->ram_start + offset;
}
-static inline void *guest_real_to_host(struct kvm *self, u16 selector, u16 offset)
+static inline void *guest_real_to_host(struct kvm *kvm, u16 selector, u16 offset)
{
unsigned long flat = segment_to_flat(selector, offset);
- return guest_flat_to_host(self, flat);
+ return guest_flat_to_host(kvm, flat);
}
#endif /* KVM__KVM_H */
View
3  tools/kvm/include/kvm/qcow.h
@@ -2,6 +2,7 @@
#define KVM__QCOW_H
#include <linux/types.h>
+#include <stdbool.h>
#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
@@ -74,6 +75,6 @@ struct qcow2_header_disk {
u64 snapshots_offset;
};
-struct disk_image *qcow_probe(int fd);
+struct disk_image *qcow_probe(int fd, bool readonly);
#endif /* KVM__QCOW_H */
View
12 tools/kvm/include/kvm/symbol.h
@@ -0,0 +1,12 @@
+#ifndef KVM__SYMBOL_H
+#define KVM__SYMBOL_H
+
+#include <stddef.h>
+
+struct kvm;
+
+void symbol__init(const char *vmlinux);
+
+char *symbol__lookup(struct kvm *kvm, unsigned long addr, char *sym, size_t size);
+
+#endif /* KVM__SYMBOL_H */
View
13 tools/kvm/include/kvm/util.h
@@ -15,6 +15,7 @@
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
+#include <stdbool.h>
#include <errno.h>
#include <limits.h>
#include <sys/param.h>
@@ -29,6 +30,11 @@
#endif
#endif
+extern bool do_debug_print;
+
+#define PROT_RW (PROT_READ|PROT_WRITE)
+#define MAP_ANON_NORESERVE (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE)
+
extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
extern void die_perror(const char *s) NORETURN;
extern int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
@@ -36,6 +42,13 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
extern void info(const char *err, ...) __attribute__((format (printf, 1, 2)));
extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
+#define debug(fmt, ...) \
+ do { \
+ if (do_debug_print) \
+ info("(%s) %s:%d: " fmt, __FILE__, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
#define DIE_IF(cnd) \
View
2  tools/kvm/include/kvm/virtio-blk.h
@@ -5,6 +5,6 @@
struct kvm;
-void virtio_blk__init(struct kvm *self, struct disk_image *disk);
+void virtio_blk__init(struct kvm *kvm, struct disk_image *disk);
#endif /* KVM__BLK_VIRTIO_H */
View
4 tools/kvm/include/kvm/virtio-console.h
@@ -3,7 +3,7 @@
struct kvm;
-void virtio_console__init(struct kvm *self);
-void virtio_console__inject_interrupt(struct kvm *self);
+void virtio_console__init(struct kvm *kvm);
+void virtio_console__inject_interrupt(struct kvm *kvm);
#endif /* KVM__CONSOLE_VIRTIO_H */
View
2  tools/kvm/include/kvm/virtio-net.h
@@ -4,7 +4,7 @@
struct kvm;
struct virtio_net_parameters {
- struct kvm *self;
+ struct kvm *kvm;
const char *host_ip;
char guest_mac[6];
const char *script;
View
3  tools/kvm/include/kvm/virtio-pci-dev.h
@@ -16,4 +16,7 @@
#define PCI_SUBSYSTEM_ID_VIRTIO_CONSOLE 0x0003
#define PCI_SUBSYSTEM_ID_VIRTIO_RNG 0x0004
+#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
+#define PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET 0x1af4
+
#endif /* VIRTIO_PCI_DEV_H_ */
View
74 tools/kvm/include/kvm/virtio-pci.h
@@ -1,74 +0,0 @@
-/*
- * Virtio PCI driver
- *
- * This module allows virtio devices to be used over a virtual PCI device.
- * This can be used with QEMU based VMMs like KVM or Xen.
- *
- * Copyright IBM Corp. 2007
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This header is BSD licensed so anyone can use the definitions to implement
- * compatible drivers/servers.
- */
-
-#ifndef _LINUX_VIRTIO_PCI_H
-#define _LINUX_VIRTIO_PCI_H
-
-/* A 32-bit r/o bitmask of the features supported by the host */
-#define VIRTIO_PCI_HOST_FEATURES 0
-
-/* A 32-bit r/w bitmask of features activated by the guest */
-#define VIRTIO_PCI_GUEST_FEATURES 4
-
-/* A 32-bit r/w PFN for the currently selected queue */
-#define VIRTIO_PCI_QUEUE_PFN 8
-
-/* A 16-bit r/o queue size for the currently selected queue */
-#define VIRTIO_PCI_QUEUE_NUM 12
-
-/* A 16-bit r/w queue selector */
-#define VIRTIO_PCI_QUEUE_SEL 14
-
-/* A 16-bit r/w queue notifier */
-#define VIRTIO_PCI_QUEUE_NOTIFY 16
-
-/* An 8-bit device status register */
-#define VIRTIO_PCI_STATUS 18
-
-/*
- * An 8-bit r/o interrupt status register.
- *
- * Reading the value will return the current contents of
- * the ISR and will also clear it. This is effectively
- * a read-and-acknowledge.
- */
-#define VIRTIO_PCI_ISR 19
-
-/*
- * MSI-X registers: only enabled if MSI-X is enabled.
- */
-
-/* A 16-bit vector for configuration changes */
-#define VIRTIO_MSI_CONFIG_VECTOR 20
-
-/* A 16-bit vector for selected queue notifications */
-#define VIRTIO_MSI_QUEUE_VECTOR 22
-
-/* Vector value used to disable MSI for queue */
-#define VIRTIO_MSI_NO_VECTOR 0xffff
-
-/*
- * Config space size.
- */
-#define VIRTIO_PCI_CONFIG_NOMSI 20
-#define VIRTIO_PCI_CONFIG_MSI 24
-
-/*
- * Virtio config space constants.
- */
-#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
-#define PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET 0x1af4
-
-#endif /* _LINUX_VIRTIO_PCI_H */
View
11 tools/kvm/include/kvm/virtio.h
@@ -2,6 +2,7 @@
#define KVM__VIRTIO_H
#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
#include <linux/types.h>
#include <sys/uio.h>
@@ -36,6 +37,16 @@ static inline bool virt_queue__available(struct virt_queue *vq)
return vq->vring.avail->idx != vq->last_avail_idx;
}
+/*
+ * Warning: on 32-bit hosts, shifting pfn left may cause a truncation of pfn values
+ * higher than 4GB - thus, pointing to the wrong area in guest virtual memory space
+ * and breaking the virt queue which owns this pfn.
+ */
+static inline void *guest_pfn_to_host(struct kvm *kvm, u32 pfn)
+{
+ return guest_flat_to_host(kvm, (unsigned long)pfn << VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+}
+
struct vring_used_elem *virt_queue__set_used_elem(struct virt_queue *queue, u32 head, u32 len);
u16 virt_queue__get_iov(struct virt_queue *queue, struct iovec iov[], u16 *out, u16 *in, struct kvm *kvm);
View
14 tools/kvm/interrupt.c
@@ -4,24 +4,24 @@
#include <string.h>
-void interrupt_table__copy(struct interrupt_table *self, void *dst, unsigned int size)
+void interrupt_table__copy(struct interrupt_table *itable, void *dst, unsigned int size)
{
- if (size < sizeof(self->entries))
+ if (size < sizeof(itable->entries))
die("An attempt to overwrite host memory");
- memcpy(dst, self->entries, sizeof(self->entries));
+ memcpy(dst, itable->entries, sizeof(itable->entries));
}
-void interrupt_table__setup(struct interrupt_table *self, struct real_intr_desc *entry)
+void interrupt_table__setup(struct interrupt_table *itable, struct real_intr_desc *entry)
{
unsigned int i;
for (i = 0; i < REAL_INTR_VECTORS; i++)
- self->entries[i] = *entry;
+ itable->entries[i] = *entry;
}
-void interrupt_table__set(struct interrupt_table *self, struct real_intr_desc *entry, unsigned int num)
+void interrupt_table__set(struct interrupt_table *itable, struct real_intr_desc *entry, unsigned int num)
{
if (num < REAL_INTR_VECTORS)
- self->entries[num] = *entry;
+ itable->entries[num] = *entry;
}
View
12 tools/kvm/ioport.c
@@ -13,7 +13,7 @@
bool ioport_debug;
-static bool debug_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool debug_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
exit(EXIT_SUCCESS);
}
@@ -22,12 +22,12 @@ static struct ioport_operations debug_ops = {
.io_out = debug_io_out,
};
-static bool dummy_io_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool dummy_io_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
return true;
}
-static bool dummy_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool dummy_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
return true;
}
@@ -64,7 +64,7 @@ static void ioport_error(u16 port, void *data, int direction, int size, u32 coun
fprintf(stderr, "IO error: %s port=%x, size=%d, count=%u\n", to_direction(direction), port, size, count);
}
-bool kvm__emulate_io(struct kvm *self, u16 port, void *data, int direction, int size, u32 count)
+bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count)
{
struct ioport_operations *ops = ioport_ops[port];
bool ret;
@@ -76,14 +76,14 @@ bool kvm__emulate_io(struct kvm *self, u16 port, void *data, int direction, int
if (!ops->io_in)
goto error;
- ret = ops->io_in(self, port, data, size, count);
+ ret = ops->io_in(kvm, port, data, size, count);
if (!ret)
goto error;
} else {
if (!ops->io_out)
goto error;
- ret = ops->io_out(self, port, data, size, count);
+ ret = ops->io_out(kvm, port, data, size, count);
if (!ret)
goto error;
}
View
205 tools/kvm/kvm-cpu.c
@@ -1,5 +1,6 @@
#include "kvm/kvm-cpu.h"
+#include "kvm/symbol.h"
#include "kvm/util.h"
#include "kvm/kvm.h"
@@ -9,15 +10,16 @@
#include <sys/mman.h>
#include <signal.h>
#include <stdlib.h>
+#include <string.h>
#include <errno.h>
#include <stdio.h>
-static inline bool is_in_protected_mode(struct kvm_cpu *self)
+static inline bool is_in_protected_mode(struct kvm_cpu *vcpu)
{
- return self->sregs.cr0 & 0x01;
+ return vcpu->sregs.cr0 & 0x01;
}
-static inline u64 ip_to_flat(struct kvm_cpu *self, u64 ip)
+static inline u64 ip_to_flat(struct kvm_cpu *vcpu, u64 ip)
{
u64 cs;
@@ -25,10 +27,10 @@ static inline u64 ip_to_flat(struct kvm_cpu *self, u64 ip)
* NOTE! We should take code segment base address into account here.
* Luckily it's usually zero because Linux uses flat memory model.
*/
- if (is_in_protected_mode(self))
+ if (is_in_protected_mode(vcpu))
return ip;
- cs = self->sregs.cs.selector;
+ cs = vcpu->sregs.cs.selector;
return ip + (cs << 4);
}
@@ -43,159 +45,159 @@ static inline u32 selector_to_base(u16 selector)
static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
{
- struct kvm_cpu *self;
+ struct kvm_cpu *vcpu;
- self = calloc(1, sizeof *self);
- if (!self)
+ vcpu = calloc(1, sizeof *vcpu);
+ if (!vcpu)
return NULL;
- self->kvm = kvm;
+ vcpu->kvm = kvm;
- return self;
+ return vcpu;
}
-void kvm_cpu__delete(struct kvm_cpu *self)
+void kvm_cpu__delete(struct kvm_cpu *vcpu)
{
- if (self->msrs)
- free(self->msrs);
+ if (vcpu->msrs)
+ free(vcpu->msrs);
- free(self);
+ free(vcpu);
}
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id)
{
- struct kvm_cpu *self;
+ struct kvm_cpu *vcpu;
int mmap_size;
- self = kvm_cpu__new(kvm);
- if (!self)
+ vcpu = kvm_cpu__new(kvm);
+ if (!vcpu)
return NULL;
- self->cpu_id = cpu_id;
+ vcpu->cpu_id = cpu_id;
- self->vcpu_fd = ioctl(self->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
- if (self->vcpu_fd < 0)
+ vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
+ if (vcpu->vcpu_fd < 0)
die_perror("KVM_CREATE_VCPU ioctl");
- mmap_size = ioctl(self->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
+ mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0)
die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
- self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
- if (self->kvm_run == MAP_FAILED)
+ vcpu->kvm_run = mmap(NULL, mmap_size, PROT_RW, MAP_SHARED, vcpu->vcpu_fd, 0);
+ if (vcpu->kvm_run == MAP_FAILED)
die("unable to mmap vcpu fd");
- return self;
+ return vcpu;
}
-void kvm_cpu__enable_singlestep(struct kvm_cpu *self)
+void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
{
struct kvm_guest_debug debug = {
.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
};
- if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
warning("KVM_SET_GUEST_DEBUG failed");
}
static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
{
- struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
+ struct kvm_msrs *vcpu = calloc(1, sizeof(*vcpu) + (sizeof(struct kvm_msr_entry) * nmsrs));
- if (!self)
+ if (!vcpu)
die("out of memory");
- return self;
+ return vcpu;
}
#define KVM_MSR_ENTRY(_index, _data) \
(struct kvm_msr_entry) { .index = _index, .data = _data }
-static void kvm_cpu__setup_msrs(struct kvm_cpu *self)
+static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu)
{
unsigned long ndx = 0;
- self->msrs = kvm_msrs__new(100);
+ vcpu->msrs = kvm_msrs__new(100);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0);
#ifdef CONFIG_X86_64
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR, 0x0);
#endif
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC, 0x0);
- self->msrs->nmsrs = ndx;
+ vcpu->msrs->nmsrs = ndx;
- if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0)
die_perror("KVM_SET_MSRS failed");
}
-static void kvm_cpu__setup_fpu(struct kvm_cpu *self)
+static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
{
- self->fpu = (struct kvm_fpu) {
+ vcpu->fpu = (struct kvm_fpu) {
.fcw = 0x37f,
.mxcsr = 0x1f80,
};
- if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_FPU, &vcpu->fpu) < 0)
die_perror("KVM_SET_FPU failed");
}
-static void kvm_cpu__setup_regs(struct kvm_cpu *self)
+static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
{
- self->regs = (struct kvm_regs) {
+ vcpu->regs = (struct kvm_regs) {
/* We start the guest in 16-bit real mode */
.rflags = 0x0000000000000002ULL,
- .rip = self->kvm->boot_ip,
- .rsp = self->kvm->boot_sp,
- .rbp = self->kvm->boot_sp,
+ .rip = vcpu->kvm->boot_ip,
+ .rsp = vcpu->kvm->boot_sp,
+ .rbp = vcpu->kvm->boot_sp,
};
- if (self->regs.rip > USHRT_MAX)
- die("ip 0x%llx is too high for real mode", (u64) self->regs.rip);
+ if (vcpu->regs.rip > USHRT_MAX)
+ die("ip 0x%llx is too high for real mode", (u64) vcpu->regs.rip);
- if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
die_perror("KVM_SET_REGS failed");
}
-static void kvm_cpu__setup_sregs(struct kvm_cpu *self)
+static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
{
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_GET_SREGS failed");
- self->sregs.cs.selector = self->kvm->boot_selector;
- self->sregs.cs.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.ss.selector = self->kvm->boot_selector;
- self->sregs.ss.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.ds.selector = self->kvm->boot_selector;
- self->sregs.ds.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.es.selector = self->kvm->boot_selector;
- self->sregs.es.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.fs.selector = self->kvm->boot_selector;
- self->sregs.fs.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.gs.selector = self->kvm->boot_selector;
- self->sregs.gs.base = selector_to_base(self->kvm->boot_selector);
-
- if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
+ vcpu->sregs.cs.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.cs.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.ss.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.ss.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.ds.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.ds.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.es.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.es.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.fs.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.fs.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.gs.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.gs.base = selector_to_base(vcpu->kvm->boot_selector);
+
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_SET_SREGS failed");
}
/**
* kvm_cpu__reset_vcpu - reset virtual CPU to a known state
*/
-void kvm_cpu__reset_vcpu(struct kvm_cpu *self)
+void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
{
- kvm_cpu__setup_sregs(self);
- kvm_cpu__setup_regs(self);
- kvm_cpu__setup_fpu(self);
- kvm_cpu__setup_msrs(self);
+ kvm_cpu__setup_sregs(vcpu);
+ kvm_cpu__setup_regs(vcpu);
+ kvm_cpu__setup_fpu(vcpu);
+ kvm_cpu__setup_msrs(vcpu);
}
static void print_dtable(const char *name, struct kvm_dtable *dtable)
@@ -211,7 +213,7 @@ static void print_segment(const char *name, struct kvm_segment *seg)
(u8) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
}
-void kvm_cpu__show_registers(struct kvm_cpu *self)
+void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
{
unsigned long cr0, cr2, cr3;
unsigned long cr4, cr8;
@@ -226,7 +228,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
struct kvm_regs regs;
int i;
- if (ioctl(self->vcpu_fd, KVM_GET_REGS, &regs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &regs) < 0)
die("KVM_GET_REGS failed");
rflags = regs.rflags;
@@ -247,7 +249,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
printf(" r10: %016lx r11: %016lx r12: %016lx\n", r10, r11, r12);
printf(" r13: %016lx r14: %016lx r15: %016lx\n", r13, r14, r15);
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
die("KVM_GET_REGS failed");
cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
@@ -273,7 +275,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
printf( " -----\n");
printf(" efer: %016llx apic base: %016llx nmi: %s\n",
(u64) sregs.efer, (u64) sregs.apic_base,
- (self->kvm->nmi_disabled ? "disabled" : "enabled"));
+ (vcpu->kvm->nmi_disabled ? "disabled" : "enabled"));
printf("\n Interrupt bitmap:\n");
printf( " -----------------\n");
@@ -282,33 +284,40 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
printf("\n");
}
-void kvm_cpu__show_code(struct kvm_cpu *self)
+#define MAX_SYM_LEN 128
+
+void kvm_cpu__show_code(struct kvm_cpu *vcpu)
{
unsigned int code_bytes = 64;
unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes;
+ char sym[MAX_SYM_LEN];
unsigned char c;
unsigned int i;
u8 *ip;
- if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0)
die("KVM_GET_REGS failed");
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die("KVM_GET_SREGS failed");
- ip = guest_flat_to_host(self->kvm, ip_to_flat(self, self->regs.rip) - code_prologue);
+ ip = guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip) - code_prologue);
printf("\n Code:\n");
printf( " -----\n");
+ symbol__lookup(vcpu->kvm, vcpu->regs.rip, sym, MAX_SYM_LEN);
+
+ printf(" rip: [<%016lx>] %s\n\n", (unsigned long) vcpu->regs.rip, sym);
+
for (i = 0; i < code_len; i++, ip++) {
- if (!host_ptr_in_ram(self->kvm, ip))
+ if (!host_ptr_in_ram(vcpu->kvm, ip))
break;
c = *ip;
- if (ip == guest_flat_to_host(self->kvm, ip_to_flat(self, self->regs.rip)))
+ if (ip == guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip)))
printf(" <%02x>", c);
else
printf(" %02x", c);
@@ -318,36 +327,36 @@ void kvm_cpu__show_code(struct kvm_cpu *self)
printf("\n Stack:\n");
printf( " ------\n");
- kvm__dump_mem(self->kvm, self->regs.rsp, 32);
+ kvm__dump_mem(vcpu->kvm, vcpu->regs.rsp, 32);
}
-void kvm_cpu__show_page_tables(struct kvm_cpu *self)
+void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
{
u64 *pte1;
u64 *pte2;
u64 *pte3;
u64 *pte4;
- if (!is_in_protected_mode(self))
+ if (!is_in_protected_mode(vcpu))
return;
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die("KVM_GET_SREGS failed");
- pte4 = guest_flat_to_host(self->kvm, self->sregs.cr3);
- if (!host_ptr_in_ram(self->kvm, pte4))
+ pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3);
+ if (!host_ptr_in_ram(vcpu->kvm, pte4))
return;
- pte3 = guest_flat_to_host(self->kvm, (*pte4 & ~0xfff));
- if (!host_ptr_in_ram(self->kvm, pte3))
+ pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff));
+ if (!host_ptr_in_ram(vcpu->kvm, pte3))
return;
- pte2 = guest_flat_to_host(self->kvm, (*pte3 & ~0xfff));
- if (!host_ptr_in_ram(self->kvm, pte2))
+ pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff));
+ if (!host_ptr_in_ram(vcpu->kvm, pte2))
return;
- pte1 = guest_flat_to_host(self->kvm, (*pte2 & ~0xfff));
- if (!host_ptr_in_ram(self->kvm, pte1))
+ pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff));
+ if (!host_ptr_in_ram(vcpu->kvm, pte1))
return;
printf("Page Tables:\n");
@@ -361,11 +370,11 @@ void kvm_cpu__show_page_tables(struct kvm_cpu *self)
*pte4, *pte3, *pte2, *pte1);
}
-void kvm_cpu__run(struct kvm_cpu *self)
+void kvm_cpu__run(struct kvm_cpu *vcpu)
{
int err;
- err = ioctl(self->vcpu_fd, KVM_RUN, 0);
+ err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
if (err && (errno != EINTR && errno != EAGAIN))
die_perror("KVM_RUN failed");
}
View
56 tools/kvm/kvm-run.c
@@ -29,6 +29,7 @@
#include <kvm/threadpool.h>
#include <kvm/barrier.h>
#include <kvm/pckbd.h>
+#include <kvm/symbol.h>
/* header files for gitish interface */
#include <kvm/kvm-run.h>
@@ -55,6 +56,7 @@ static u64 ram_size;
static u8 image_count;
static const char *kernel_cmdline;
static const char *kernel_filename;
+static const char *vmlinux_filename;
static const char *initrd_filename;
static const char *image_filename[MAX_DISK_IMAGES];
static const char *console;
@@ -70,6 +72,8 @@ static bool vnc;
extern bool ioport_debug;
extern int active_console;
+bool do_debug_print = false;
+
static int nrcpus = 1;
static const char * const run_usage[] = {
@@ -99,10 +103,10 @@ static int img_name_parser(const struct option *opt, const char *arg, int unset)
static const struct option options[] = {
OPT_GROUP("Basic options:"),
- OPT_INTEGER('\0', "cpus", &nrcpus, "Number of CPUs"),
+ OPT_INTEGER('c', "cpus", &nrcpus, "Number of CPUs"),
OPT_U64('m', "mem", &ram_size, "Virtual machine memory size in MiB."),
OPT_CALLBACK('i', "image", NULL, "image", "Disk image", img_name_parser),
- OPT_STRING('c', "console", &console, "serial or virtio",
+ OPT_STRING('\0', "console", &console, "serial or virtio",
"Console to use"),
OPT_BOOLEAN('\0', "virtio-rng", &virtio_rng,
"Enable virtio Random Number Generator"),
@@ -130,8 +134,9 @@ static const struct option options[] = {
"Enable single stepping"),
OPT_BOOLEAN('g', "ioport-debug", &ioport_debug,
"Enable ioport debugging"),
-
OPT_BOOLEAN('\0', "vnc", &vnc, "Enable VNC framebuffer"),
+ OPT_BOOLEAN('\0', "debug", &do_debug_print,
+ "Enable debug messages"),
OPT_END()
};
@@ -216,17 +221,25 @@ static void *kvm_cpu_thread(void *arg)
}
static char kernel[PATH_MAX];
-const char *host_kernels[] = {
+
+static const char *host_kernels[] = {
"/boot/vmlinuz",
"/boot/bzImage",
NULL
};
-const char *default_kernels[] = {
+
+static const char *default_kernels[] = {
"./bzImage",
"../../arch/x86/boot/bzImage",
NULL
};
+static const char *default_vmlinux[] = {
+ "../../../vmlinux",
+ "../../vmlinux",
+ NULL
+};
+
static void kernel_usage_with_options(void)
{
const char **k;
@@ -319,6 +332,23 @@ static const char *find_kernel(void)
return NULL;
}
+static const char *find_vmlinux(void)
+{
+ const char **vmlinux;
+
+ vmlinux = &default_vmlinux[0];
+ while (*vmlinux) {
+ struct stat st;
+
+ if (stat(*vmlinux, &st) < 0 || !S_ISREG(st.st_mode)) {
+ vmlinux++;
+ continue;
+ }
+ return *vmlinux;
+ }
+ return NULL;
+}
+
static int root_device(char *dev, long *part)
{
struct stat st;
@@ -361,13 +391,13 @@ static char *host_image(char *cmd_line, size_t size)
int kvm_cmd_run(int argc, const char **argv, const char *prefix)
{
+ struct virtio_net_parameters net_params;
static char real_cmdline[2048];
unsigned int nr_online_cpus;
- int max_cpus;
int exit_code = 0;
- int i;
- struct virtio_net_parameters net_params;
+ int max_cpus;
char *hi;
+ int i;
signal(SIGALRM, handle_sigalrm);
signal(SIGQUIT, handle_sigquit);
@@ -401,6 +431,8 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
return EINVAL;
}
+ vmlinux_filename = find_vmlinux();
+
if (nrcpus < 1 || nrcpus > KVM_NR_CPUS)
die("Number of CPUs %d is out of [1;%d] range", nrcpus, KVM_NR_CPUS);
@@ -435,6 +467,8 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
if (!script)
script = DEFAULT_SCRIPT;
+ symbol__init(vmlinux_filename);
+
term_init();
kvm = kvm__init(kvm_dev, ram_size);
@@ -481,10 +515,14 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
}
free(hi);
+ printf(" # kvm run -k %s -m %Lu -c %d\n", kernel_filename, ram_size / 1024 / 1024, nrcpus);
+
if (!kvm__load_kernel(kvm, kernel_filename, initrd_filename,
real_cmdline))
die("unable to load kernel %s", kernel_filename);
+ kvm->vmlinux = vmlinux_filename;
+
ioport__setup_legacy();
rtc__init();
@@ -507,7 +545,7 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
if (!strncmp(network, "virtio", 6)) {
net_params = (struct virtio_net_parameters) {
.host_ip = host_ip_addr,
- .self = kvm,
+ .kvm = kvm,
.script = script
};
sscanf(guest_mac, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
View
184 tools/kvm/kvm.c
@@ -67,23 +67,23 @@ struct {
{ DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
};
-static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
+static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
{
int ret;
- ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
+ ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
if (ret < 0)
return false;
return ret;
}
-static int kvm__check_extensions(struct kvm *self)
+static int kvm__check_extensions(struct kvm *kvm)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
- if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
+ if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
error("Unsuppored KVM extension detected: %s",
kvm_req_ext[i].name);
return (int)-i;
@@ -95,20 +95,20 @@ static int kvm__check_extensions(struct kvm *self)
static struct kvm *kvm__new(void)
{
- struct kvm *self = calloc(1, sizeof *self);
+ struct kvm *kvm = calloc(1, sizeof *kvm);
- if (!self)
+ if (!kvm)
die("out of memory");
- return self;
+ return kvm;
}
-void kvm__delete(struct kvm *self)
+void kvm__delete(struct kvm *kvm)
{
- kvm__stop_timer(self);
+ kvm__stop_timer(kvm);
- munmap(self->ram_start, self->ram_size);
- free(self);
+ munmap(kvm->ram_start, kvm->ram_size);
+ free(kvm);
}
static bool kvm__cpu_supports_vm(void)
@@ -153,28 +153,69 @@ static bool kvm__cpu_supports_vm(void)
return regs.ecx & (1 << feature);
}
-void kvm__init_ram(struct kvm *self)
+static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
{
struct kvm_userspace_memory_region mem;
int ret;
mem = (struct kvm_userspace_memory_region) {
- .slot = 0,
- .guest_phys_addr = 0x0UL,
- .memory_size = self->ram_size,
- .userspace_addr = (unsigned long) self->ram_start,
+ .slot = slot,
+ .guest_phys_addr = guest_phys,
+ .memory_size = size,
+ .userspace_addr = (u64)userspace_addr,
};
- ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
+ ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
if (ret < 0)
die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
}
-int kvm__max_cpus(struct kvm *self)
+/*
+ * Allocating RAM size bigger than 4GB requires us to leave a gap
+ * in the RAM which is used for PCI MMIO, hotplug, and unconfigured
+ * devices (see documentation of e820_setup_gap() for details).
+ *
+ * If we're required to initialize RAM bigger than 4GB, we will create
+ * a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space.
+ */
+
+void kvm__init_ram(struct kvm *kvm)
+{
+ u64 phys_start, phys_size;
+ void *host_mem;
+
+ if (kvm->ram_size < KVM_32BIT_GAP_START) {
+ /* Use a single block of RAM for 32bit RAM */
+
+ phys_start = 0;
+ phys_size = kvm->ram_size;
+ host_mem = kvm->ram_start;
+
+ kvm_register_mem_slot(kvm, 0, 0, kvm->ram_size, kvm->ram_start);
+ } else {
+ /* First RAM range from zero to the PCI gap: */
+
+ phys_start = 0;
+ phys_size = KVM_32BIT_GAP_START;
+ host_mem = kvm->ram_start;
+
+ kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+
+ /* Second RAM range from 4GB to the end of RAM: */
+
+ phys_start = 0x100000000ULL;
+ phys_size = kvm->ram_size - phys_size;
+ host_mem = kvm->ram_start + phys_start;
+
+ kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
+ }
+}
+
+int kvm__max_cpus(struct kvm *kvm)
{
int ret;
- ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
+ ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
if (ret < 0)
die_perror("KVM_CAP_NR_VCPUS");
@@ -184,16 +225,16 @@ int kvm__max_cpus(struct kvm *self)
struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
{
struct kvm_pit_config pit_config = { .flags = 0, };
- struct kvm *self;
+ struct kvm *kvm;
int ret;
if (!kvm__cpu_supports_vm())
die("Your CPU does not support hardware virtualization");
- self = kvm__new();
+ kvm = kvm__new();
- self->sys_fd = open(kvm_dev, O_RDWR);
- if (self->sys_fd < 0) {
+ kvm->sys_fd = open(kvm_dev, O_RDWR);
+ if (kvm->sys_fd < 0) {
if (errno == ENOENT)
die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
if (errno == ENODEV)
@@ -204,36 +245,47 @@ struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
exit(1);
}
- ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
+ ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
if (ret != KVM_API_VERSION)
die_perror("KVM_API_VERSION ioctl");
- self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
- if (self->vm_fd < 0)
+ kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
+ if (kvm->vm_fd < 0)
die_perror("KVM_CREATE_VM ioctl");
- if (kvm__check_extensions(self))
+ if (kvm__check_extensions(kvm))
die("A required KVM extention is not supported by OS");
- ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
+ ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
if (ret < 0)
die_perror("KVM_SET_TSS_ADDR ioctl");
- ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
+ ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config);
if (ret < 0)
die_perror("KVM_CREATE_PIT2 ioctl");
- self->ram_size = ram_size;
-
- self->ram_start = mmap(NULL, ram_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
- if (self->ram_start == MAP_FAILED)
+ kvm->ram_size = ram_size;
+
+ if (kvm->ram_size < KVM_32BIT_GAP_START) {
+ kvm->ram_start = mmap(NULL, ram_size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
+ } else {
+ kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
+ if (kvm->ram_start != MAP_FAILED) {
+ /*
+ * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
+ * if we accidently write to it, we will know.
+ */
+ mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
+ }
+ }
+ if (kvm->ram_start == MAP_FAILED)
die("out of memory");
- ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
+ ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
if (ret < 0)
die_perror("KVM_CREATE_IRQCHIP ioctl");
- return self;
+ return kvm;
}
#define BOOT_LOADER_SELECTOR 0x1000
@@ -244,7 +296,7 @@ struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
#define BOOT_PROTOCOL_REQUIRED 0x206
#define LOAD_HIGH 0x01
-static int load_flat_binary(struct kvm *self, int fd)
+static int load_flat_binary(struct kvm *kvm, int fd)
{
void *p;
int nr;
@@ -252,21 +304,21 @@ static int load_flat_binary(struct kvm *self, int fd)
if (lseek(fd, 0, SEEK_SET) < 0)
die_perror("lseek");
- p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
+ p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
while ((nr = read(fd, p, 65536)) > 0)
p += nr;
- self->boot_selector = BOOT_LOADER_SELECTOR;
- self->boot_ip = BOOT_LOADER_IP;
- self->boot_sp = BOOT_LOADER_SP;
+ kvm->boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->boot_ip = BOOT_LOADER_IP;
+ kvm->boot_sp = BOOT_LOADER_SP;
return true;
}
static const char *BZIMAGE_MAGIC = "HdrS";
-static bool load_bzimage(struct kvm *self, int fd_kernel,
+static bool load_bzimage(struct kvm *kvm, int fd_kernel,
int fd_initrd, const char *kernel_cmdline)
{
struct boot_params *kern_boot;
@@ -302,19 +354,19 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
setup_sects = boot.hdr.setup_sects + 1;
setup_size = setup_sects << 9;
- p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
+ p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
/* copy setup.bin to mem*/
if (read(fd_kernel, p, setup_size) != setup_size)
die_perror("read");
/* copy vmlinux.bin to BZ_KERNEL_START*/
- p = guest_flat_to_host(self, BZ_KERNEL_START);
+ p = guest_flat_to_host(kvm, BZ_KERNEL_START);
while ((nr = read(fd_kernel, p, 65536)) > 0)
p += nr;
- p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
+ p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET);
if (kernel_cmdline) {
cmdline_size = strlen(kernel_cmdline) + 1;
if (cmdline_size > boot.hdr.cmdline_size)
@@ -324,7 +376,7 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
memcpy(p, kernel_cmdline, cmdline_size - 1);
}
- kern_boot = guest_real_to_host(self, BOOT_LOADER_SELECTOR, 0x00);
+ kern_boot = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00);
kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET;
kern_boot->hdr.type_of_loader = 0xff;
@@ -345,12 +397,12 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
for (;;) {
if (addr < BZ_KERNEL_START)
die("Not enough memory for initrd");
- else if (addr < (self->ram_size - initrd_stat.st_size))
+ else if (addr < (kvm->ram_size - initrd_stat.st_size))
break;
addr -= 0x100000;
}
- p = guest_flat_to_host(self, addr);
+ p = guest_flat_to_host(kvm, addr);
nr = read(fd_initrd, p, initrd_stat.st_size);
if (nr != initrd_stat.st_size)
die("Failed to read initrd");
@@ -359,13 +411,13 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
kern_boot->hdr.ramdisk_size = initrd_stat.st_size;
}
- self->boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->boot_selector = BOOT_LOADER_SELECTOR;
/*
* The real-mode setup code starts at offset 0x200 of a bzImage. See
* Documentation/x86/boot.txt for details.
*/
- self->boot_ip = BOOT_LOADER_IP + 0x200;
- self->boot_sp = BOOT_LOADER_SP;
+ kvm->boot_ip = BOOT_LOADER_IP + 0x200;
+ kvm->boot_sp = BOOT_LOADER_SP;
return true;
}
@@ -412,20 +464,20 @@ bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
/**
* kvm__setup_bios - inject BIOS into guest system memory
- * @self - guest system descriptor
+ * @kvm - guest system descriptor
*
* This function is a main routine where we poke guest memory
* and install BIOS there.
*/
-void kvm__setup_bios(struct kvm *self)
+void kvm__setup_bios(struct kvm *kvm)
{
/* standart minimal configuration */
- setup_bios(self);
+ setup_bios(kvm);
/* FIXME: SMP, ACPI and friends here */
/* MP table */
- mptable_setup(self, self->nrcpus);
+ mptable_setup(kvm, kvm->nrcpus);
}
#define TIMER_INTERVAL_NS 1000000 /* 1 msec */
@@ -435,7 +487,7 @@ void kvm__setup_bios(struct kvm *self)
* userspace hypervisor into the guest at periodical intervals. Please note
* that clock interrupt, for example, is not handled here.
*/
-void kvm__start_timer(struct kvm *self)
+void kvm__start_timer(struct kvm *kvm)
{
struct itimerspec its;
struct sigevent sev;
@@ -445,7 +497,7 @@ void kvm__start_timer(struct kvm *self)
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGALRM;
- if (timer_create(CLOCK_REALTIME, &sev, &self->timerid) < 0)
+ if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
die("timer_create()");
its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000;
@@ -453,20 +505,20 @@ void kvm__start_timer(struct kvm *self)
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
- if (timer_settime(self->timerid, 0, &its, NULL) < 0)
+ if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
die("timer_settime()");
}
-void kvm__stop_timer(struct kvm *self)
+void kvm__stop_timer(struct kvm *kvm)
{
- if (self->timerid)
- if (timer_delete(self->timerid) < 0)
+ if (kvm->timerid)
+ if (timer_delete(kvm->timerid) < 0)
die("timer_delete()");
- self->timerid = 0;
+ kvm->timerid = 0;
}
-void kvm__irq_line(struct kvm *self, int irq, int level)
+void kvm__irq_line(struct kvm *kvm, int irq, int level)
{
struct kvm_irq_level irq_level;
@@ -477,11 +529,11 @@ void kvm__irq_line(struct kvm *self, int irq, int level)
.level = level,
};
- if (ioctl(self->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
+ if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
die_perror("KVM_IRQ_LINE failed");
}
-void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
+void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
{
unsigned char *p;
unsigned long n;
@@ -490,10 +542,10 @@ void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
if (!size)
return;
- p = guest_flat_to_host(self, addr);
+ p = guest_flat_to_host(kvm, addr);
for (n = 0; n < size; n += 8) {
- if (!host_ptr_in_ram(self, p + n))
+ if (!host_ptr_in_ram(kvm, p + n))
break;
printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n",
View
2  tools/kvm/mmio.c
@@ -16,7 +16,7 @@ static const char *to_direction(u8 is_write)
u8 videomem[2000000];
-bool kvm__emulate_mmio(struct kvm *self, u64 phys_addr, u8 *data, u32 len, u8 is_write)
+bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write)
{
// u32 ptr;
if (is_write) {
View
2  tools/kvm/mptable.c
@@ -260,7 +260,7 @@ void mptable_setup(struct kvm *kvm, unsigned int ncpus)
/*
* We will copy the whole table, no need to separate
- * floating structure and table itself.
+ * floating structure and table itkvm.
*/
size = (unsigned long)mpf_intel + sizeof(*mpf_intel) - (unsigned long)mpc_table;
View
8 tools/kvm/pci.c
@@ -21,7 +21,7 @@ static void *pci_config_address_ptr(u16 port)
return base + offset;
}
-static bool pci_config_address_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_address_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
void *p = pci_config_address_ptr(port);
@@ -32,7 +32,7 @@ static bool pci_config_address_out(struct kvm *self, u16 port, void *data, int s
return true;
}
-static bool pci_config_address_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_address_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
void *p = pci_config_address_ptr(port);
@@ -48,7 +48,7 @@ static struct ioport_operations pci_config_address_ops = {
.io_out = pci_config_address_out,
};
-static bool pci_config_data_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_data_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
// printf("in pci_config_data_out, port = %d, size = %d, count = %d\n", port, size, count);
return true;
@@ -72,7 +72,7 @@ static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_numbe
return dev != NULL;
}
-static bool pci_config_data_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_data_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
unsigned long start;
u8 dev_num;
View
255 tools/kvm/qcow.c
@@ -13,6 +13,7 @@
#include <fcntl.h>
#include <linux/byteorder.h>
+#include <linux/kernel.h>
#include <linux/types.h>
static inline u64 get_l1_index(struct qcow *q, u64 offset)
@@ -40,16 +41,18 @@ static ssize_t qcow1_read_cluster(struct qcow *q, u64 offset, void *dst, u32 dst
{
struct qcow_header *header = q->header;
struct qcow_table *table = &q->table;
- u64 *l2_table = NULL;
u64 l2_table_offset;
u64 l2_table_size;
u64 cluster_size;
u64 clust_offset;
u64 clust_start;
size_t length;
+ u64 *l2_table;
u64 l1_idx;
u64 l2_idx;
+ l2_table = NULL;
+
cluster_size = 1 << header->cluster_bits;
l1_idx = get_l1_index(q, offset);
@@ -73,8 +76,7 @@ static ssize_t qcow1_read_cluster(struct qcow *q, u64 offset, void *dst, u32 dst
if (!l2_table)
goto out_error;
- if (pread_in_full(q->fd, l2_table, l2_table_size * sizeof(u64),
- l2_table_offset) < 0)
+ if (pread_in_full(q->fd, l2_table, l2_table_size * sizeof(u64), l2_table_offset) < 0)
goto out_error;
l2_idx = get_l2_index(q, offset);
@@ -101,55 +103,250 @@ static ssize_t qcow1_read_cluster(struct qcow *q, u64 offset, void *dst, u32 dst
goto out;
}
-static int qcow1_read_sector(struct disk_image *self, u64 sector,
- void *dst, u32 dst_len)
+static int qcow1_read_sector(struct disk_image *disk, u64 sector, void *dst, u32 dst_len)
{
- struct qcow *q = self->priv;
+ struct qcow *q = disk->priv;
struct qcow_header *header = q->header;
- char *buf = dst;
- u64 offset;
u32 nr_read;
+ u64 offset;
+ char *buf;
u32 nr;
- nr_read = 0;
+ buf = dst;
+ nr_read = 0;
+
while (nr_read < dst_len) {
- offset = sector << SECTOR_SHIFT;
+ offset = sector << SECTOR_SHIFT;
if (offset >= header->size)
- goto out_error;
+ return -1;
nr = qcow1_read_cluster(q, offset, buf, dst_len - nr_read);
if (nr <= 0)
- goto out_error;
+ return -1;
nr_read += nr;
buf += nr;
sector += (nr >> SECTOR_SHIFT);
}
+
return 0;
-out_error:
+}
+
+static inline u64 file_size(int fd)
+{
+ struct stat st;
+
+ if (fstat(fd, &st) < 0)
+ return 0;
+
+ return st.st_size;
+}
+
+#define SYNC_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE)
+
+static inline int qcow_pwrite_sync(int fd, void *buf, size_t count, off_t offset)
+{
+ if (pwrite_in_full(fd, buf, count, offset) < 0)
+ return -1;
+
+ return sync_file_range(fd, offset, count, SYNC_FLAGS);
+}
+
+/* Writes a level 2 table at the end of the file. */
+static u64 qcow1_write_l2_table(struct qcow *q, u64 *table)
+{
+ struct qcow_header *header = q->header;
+ u64 clust_sz;
+ u64 f_sz;
+ u64 off;
+ u64 sz;
+
+ f_sz = file_size(q->fd);
+ if (!f_sz)
+ return 0;
+
+ sz = 1 << header->l2_bits;
+ clust_sz = 1 << header->cluster_bits;
+ off = ALIGN(f_sz, clust_sz);
+
+ if (qcow_pwrite_sync(q->fd, table, sz * sizeof(u64), off) < 0)
+ return 0;
+
+ return off;
+}
+
+/*
+ * QCOW file might grow during a write operation. Not only data but metadata is
+ * also written at the end of the file. Therefore it is necessary to ensure
+ * every write is committed to disk. Hence we use uses qcow_pwrite_sync() to
+ * synchronize the in-core state of QCOW image to disk.
+ *
+ * We also try to restore the image to a consistent state if the metdata