Permalink
Browse files

HVM-335 kvm_subr.c has reached the end of the line

  • Loading branch information...
rmustacc committed Jun 8, 2011
1 parent fc81a70 commit 27b148dd0083b94a2c6a46dfd46c02eb557cc9e3
Showing with 465 additions and 517 deletions.
  1. +4 −6 Makefile
  2. +90 −1 kvm.c
  3. +46 −0 kvm_mmu.c
  4. +0 −496 kvm_subr.c
  5. +325 −5 kvm_x86.c
  6. +0 −9 msr.h
View
@@ -20,11 +20,10 @@ HEADERS= \
kvm.h \
kvm_bitops.h
-kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h kvm_bitops.h kvm_subr.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c
+kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h kvm_bitops.h kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_x86.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_emulate.c
- $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_subr.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_irq.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_i8254.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_lapic.c
@@ -40,7 +39,6 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h kvm_bitops.h kvm_su
$(CTFCONVERT) -i -L VERSION kvm.o
$(CTFCONVERT) -i -L VERSION kvm_x86.o
$(CTFCONVERT) -i -L VERSION kvm_emulate.o
- $(CTFCONVERT) -i -L VERSION kvm_subr.o
$(CTFCONVERT) -i -L VERSION kvm_irq.o
$(CTFCONVERT) -i -L VERSION kvm_i8254.o
$(CTFCONVERT) -i -L VERSION kvm_lapic.o
@@ -53,8 +51,8 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h kvm_bitops.h kvm_su
$(CTFCONVERT) -i -L VERSION kvm_irq_comm.o
$(CTFCONVERT) -i -L VERSION kvm_cache_regs.o
$(CTFCONVERT) -i -L VERSION kvm_bitops.o
- $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o
- $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o
+ $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o
+ $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o
kvm.so: kvm_mdb.c
gcc -m64 -shared \
@@ -67,7 +65,7 @@ install: kvm
@pfexec cp kvm.conf /usr/kernel/drv
check:
- @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_subr.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c $(HEADERS)
+ @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c $(HEADERS)
@./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c
load: install
View
91 kvm.c
@@ -96,6 +96,34 @@ static kmutex_t kvm_lock;
static int ignore_msrs = 0;
static unsigned long empty_zero_page[PAGESIZE / sizeof (unsigned long)];
+int
+kvm_xcall_func(kvm_xcall_t func, void *arg)
+{
+ if (func != NULL)
+ (*func)(arg);
+
+ return (0);
+}
+
+void
+kvm_xcall(processorid_t cpu, kvm_xcall_t func, void *arg)
+{
+ cpuset_t set;
+
+ CPUSET_ZERO(set);
+
+ if (cpu == KVM_CPUALL) {
+ CPUSET_ALL(set);
+ } else {
+ CPUSET_ADD(set, cpu);
+ }
+
+ kpreempt_disable();
+ xc_sync((xc_arg_t)func, (xc_arg_t)arg, 0, CPUSET2BV(set),
+ (xc_func_t) kvm_xcall_func);
+ kpreempt_enable();
+}
+
void
kvm_user_return_notifier_register(struct kvm_vcpu *vcpu,
struct kvm_user_return_notifier *urn)
@@ -143,6 +171,12 @@ kvm_ctx_restore(void *arg)
kvm_arch_vcpu_load(vcpu, cpu);
}
+void
+kvm_migrate_timers(struct kvm_vcpu *vcpu)
+{
+ set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
+}
+
#ifdef XXX_KVM_DECLARATION
#define pfn_valid(pfn) ((pfn < physmax) && (pfn != PFN_INVALID))
#else
@@ -181,6 +215,17 @@ vcpu_load(struct kvm_vcpu *vcpu)
kpreempt_enable();
}
+struct kvm_vcpu *
+kvm_get_vcpu(struct kvm *kvm, int i)
+{
+#ifdef XXX
+ smp_rmb();
+#else
+ XXX_KVM_PROBE;
+#endif
+ return (kvm->vcpus[i]);
+}
+
void
vcpu_put(struct kvm_vcpu *vcpu)
{
@@ -480,13 +525,18 @@ kvm_free_physmem(struct kvm *kvm)
kmem_free(kvm->memslots, sizeof (struct kvm_memslots));
}
-
void
kvm_get_kvm(struct kvm *kvm)
{
atomic_inc_32(&kvm->users_count);
}
+unsigned long
+kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+{
+ return (BT_SIZEOFMAP(memslot->npages));
+}
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -1149,6 +1199,12 @@ mark_page_dirty(struct kvm *kvm, gfn_t gfn)
}
}
+int
+kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id);
+}
+
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
@@ -1659,6 +1715,39 @@ kvm_guest_enter(void)
#endif
}
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long
+find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(64-1)) {
+ if (~(tmp = *(p++)))
+ goto found;
+ result += 64;
+ size -= 64;
+ }
+ if (!size)
+ return (result);
+
+ tmp = (*p) | (~0UL << size);
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return (result + size); /* Nope. */
+found:
+ return (result + ffz(tmp));
+}
+
+int
+zero_constructor(void *buf, void *arg, int tags)
+{
+ bzero(buf, (size_t)arg);
+ return (0);
+}
+
static int
kvm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
View
@@ -3028,3 +3028,49 @@ kvm_avlmmucmp(const void *arg1, const void *arg2)
ASSERT(mp1->kmp_avlspt == mp2->kmp_avlspt);
return (0);
}
+
+inline page_t *
+compound_head(page_t *page)
+{
+ /* XXX - linux links page_t together. */
+ return (page);
+}
+
+inline void
+get_page(page_t *page)
+{
+ page = compound_head(page);
+}
+
+page_t *
+pfn_to_page(pfn_t pfn)
+{
+ return (page_numtopp_nolock(pfn));
+}
+
+page_t *
+alloc_page(size_t size, int flag)
+{
+ caddr_t page_addr;
+ pfn_t pfn;
+ page_t *pp;
+
+ if ((page_addr = kmem_zalloc(size, flag)) == NULL)
+ return ((page_t *)NULL);
+
+ pp = page_numtopp_nolock(hat_getpfnum(kas.a_hat, page_addr));
+ return (pp);
+}
+
+/*
+ * Often times we have pages that correspond to addresses that are in a users
+ * virtual address space. Rather than trying to constantly map them in and out
+ * of our address space we instead go through and use the kpm segment to
+ * facilitate this for us. This always returns an address that is always in the
+ * kernel's virtual address space.
+ */
+caddr_t
+page_address(page_t *page)
+{
+ return (hat_kpm_mapin_pfn(page->p_pagenum));
+}
Oops, something went wrong.

0 comments on commit 27b148d

Please sign in to comment.