|
11 | 11 | #include <sys/stat.h> |
12 | 12 | #include <sys/types.h> |
13 | 13 |
|
14 | | -int kvm, vmfd; |
15 | | -struct kvm_run *run; |
16 | | -struct kvm_regs regs; |
17 | | -struct kvm_sregs sregs; |
18 | | -int vcpufd; |
| 14 | +static int kvm, vmfd; |
| 15 | +static struct kvm_run *run; |
| 16 | +static struct kvm_regs regs; |
| 17 | +static struct kvm_sregs sregs; |
| 18 | +static int vcpufd; |
19 | 19 |
|
20 | 20 | int |
21 | 21 | vmm_create(void) |
22 | 22 | { |
23 | 23 | int ret; |
24 | 24 |
|
25 | 25 | if ((kvm = open("/dev/kvm", O_RDWR | O_CLOEXEC)) < 0) |
26 | | - return VMM_ERROR; |
| 26 | + return VMM_ENOTSUP; |
27 | 27 |
|
28 | 28 | /* check API version */ |
29 | 29 | if ((ret = ioctl(kvm, KVM_GET_API_VERSION, NULL)) < 0) |
@@ -65,7 +65,6 @@ vmm_cpu_create() |
65 | 65 | /* Map the shared kvm_run structure and following data. */ |
66 | 66 | if ((mmap_size = ioctl(kvm, KVM_GET_VCPU_MMAP_SIZE, NULL)) < 0) |
67 | 67 | return VMM_ERROR; |
68 | | - assert(mmap_size < sizeof(*run)) |
69 | 68 | if ((run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpufd, 0)) == 0) |
70 | 69 | return VMM_ERROR; |
71 | 70 |
|
@@ -191,6 +190,72 @@ vmm_cpu_write_register(vmm_x64_reg_t reg, uint64_t value) |
191 | 190 | return 0; |
192 | 191 | } |
193 | 192 |
|
| 193 | +int |
| 194 | +vmm_cpu_read_register(vmm_x64_reg_t reg, uint64_t *value) |
| 195 | +{ |
| 196 | + if (ioctl(vcpufd, KVM_GET_REGS, ®s) < 0) |
| 197 | + return VMM_ERROR; |
| 198 | + if (ioctl(vcpufd, KVM_GET_SREGS, &sregs) < 0) |
| 199 | + return VMM_ERROR; |
| 200 | + |
| 201 | + switch (reg) { |
| 202 | + case VMM_X64_RIP: *value = regs.rip; break; |
| 203 | + case VMM_X64_RFLAGS: *value = regs.rflags; break; |
| 204 | + case VMM_X64_RAX: *value = regs.rax; break; |
| 205 | + case VMM_X64_RBX: *value = regs.rbx; break; |
| 206 | + case VMM_X64_RCX: *value = regs.rcx; break; |
| 207 | + case VMM_X64_RDX: *value = regs.rdx; break; |
| 208 | + case VMM_X64_RSI: *value = regs.rsi; break; |
| 209 | + case VMM_X64_RDI: *value = regs.rdi; break; |
| 210 | + case VMM_X64_RSP: *value = regs.rsp; break; |
| 211 | + case VMM_X64_RBP: *value = regs.rbp; break; |
| 212 | + case VMM_X64_R8: |
| 213 | + case VMM_X64_R9: |
| 214 | + case VMM_X64_R10: |
| 215 | + case VMM_X64_R11: |
| 216 | + case VMM_X64_R12: |
| 217 | + case VMM_X64_R13: |
| 218 | + case VMM_X64_R14: |
| 219 | + case VMM_X64_R15: |
| 220 | + case VMM_X64_CS: |
| 221 | + case VMM_X64_SS: |
| 222 | + case VMM_X64_DS: |
| 223 | + case VMM_X64_ES: |
| 224 | + case VMM_X64_FS: |
| 225 | + case VMM_X64_GS: |
| 226 | + case VMM_X64_IDT_BASE: |
| 227 | + case VMM_X64_IDT_LIMIT: |
| 228 | + case VMM_X64_GDT_BASE: |
| 229 | + case VMM_X64_GDT_LIMIT: |
| 230 | + case VMM_X64_LDTR: |
| 231 | + case VMM_X64_LDT_BASE: |
| 232 | + case VMM_X64_LDT_LIMIT: |
| 233 | + case VMM_X64_LDT_AR: |
| 234 | + case VMM_X64_TR: |
| 235 | + case VMM_X64_TSS_BASE: |
| 236 | + case VMM_X64_TSS_LIMIT: |
| 237 | + case VMM_X64_TSS_AR: |
| 238 | + case VMM_X64_CR0: |
| 239 | + case VMM_X64_CR1: |
| 240 | + case VMM_X64_CR2: |
| 241 | + case VMM_X64_CR3: |
| 242 | + case VMM_X64_CR4: |
| 243 | + case VMM_X64_DR0: |
| 244 | + case VMM_X64_DR1: |
| 245 | + case VMM_X64_DR2: |
| 246 | + case VMM_X64_DR3: |
| 247 | + case VMM_X64_DR4: |
| 248 | + case VMM_X64_DR5: |
| 249 | + case VMM_X64_DR6: |
| 250 | + case VMM_X64_DR7: |
| 251 | + case VMM_X64_TPR: |
| 252 | + case VMM_X64_XCR0: |
| 253 | + default: |
| 254 | + assert(false); |
| 255 | + } |
| 256 | + return 0; |
| 257 | +} |
| 258 | + |
194 | 259 | int |
195 | 260 | vmm_get(int id, uint64_t *value) |
196 | 261 | { |
|
0 commit comments