Skip to content
Fetching contributors…
Cannot retrieve contributors at this time
315 lines (239 sloc) 6.75 KB
# This module largely lifted from Mini-OS - ACW
#define __ASSEMBLY__ 1
#include <arch.h>
#include <xen/arch-x86_64.h>
#include <xen/features.h>
#if __XEN_LATEST_INTERFACE_VERSION__ >= 0x00030205
#include <xen/elfnote.h>
#include <elfnote.h>
#endif
.section __xen_guest
.ascii "GUEST_OS=HaLVM"
.ascii ",XEN_VER=xen-3.0"
.ascii ",VIRT_BASE=0x00400000" /* &_text from kernel-x86_64.lds */
.ascii ",ELF_PADDR_OFFSET=0x00400000"
.ascii ",HYPERCALL_PAGE=0x1"
.ascii ",LOADER=generic"
.byte 0
.text
#define ENTRY(X) .globl X ; X :
.globl _start, shared_info, hypercall_page, xen_features
_start:
cld
fldcw newfpucw(%rip)
movq stack_start(%rip),%rsp
movq %rsi,%rdi
call c_start
.data
stack_start:
.quad initial_tos
newfpucw:
.word 0x037f
xen_features:
.fill (XENFEAT_NR_SUBMAPS * 32)
.org 0x1000
hypercall_page:
.org 0x2000
.text
/* Offsets into shared_info_t. */
#define evtchn_upcall_pending /* 0 */
#define evtchn_upcall_mask 1
NMI_MASK = 0x80000000
#define RDI 112
#define ORIG_RAX 120 /* + error_code */
#define EFLAGS 144
.macro RESTORE_ALL
movq (%rsp),%r11
movq 1*8(%rsp),%r10
movq 2*8(%rsp),%r9
movq 3*8(%rsp),%r8
movq 4*8(%rsp),%rax
movq 5*8(%rsp),%rcx
movq 6*8(%rsp),%rdx
movq 7*8(%rsp),%rsi
movq 8*8(%rsp),%rdi
addq $9*8+8,%rsp
.endm
.macro HYPERVISOR_IRET flag
testl $NMI_MASK,2*8(%rsp)
jnz 2f
testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
jnz 1f
/* Direct iret to kernel space. Correct CS and SS. */
orb $3,1*8(%rsp)
orb $3,4*8(%rsp)
1: iretq
2: /* Slow iret via hypervisor. */
andl $~NMI_MASK, 16(%rsp)
pushq $\flag
jmp hypercall_page + (__HYPERVISOR_iret * 32)
.endm
/*
* Exception entry point. This expects an error code/orig_rax on the stack
* and the exception handler in %rax.
*/
ENTRY(error_entry)
/* rdi slot contains rax, oldrax contains error code */
cld
subq $14*8,%rsp
movq %rsi,13*8(%rsp)
movq 14*8(%rsp),%rsi /* load rax from rdi slot */
movq %rdx,12*8(%rsp)
movq %rcx,11*8(%rsp)
movq %rsi,10*8(%rsp) /* store rax */
movq %r8, 9*8(%rsp)
movq %r9, 8*8(%rsp)
movq %r10,7*8(%rsp)
movq %r11,6*8(%rsp)
movq %rbx,5*8(%rsp)
movq %rbp,4*8(%rsp)
movq %r12,3*8(%rsp)
movq %r13,2*8(%rsp)
movq %r14,1*8(%rsp)
movq %r15,(%rsp)
error_call_handler:
movq %rdi, RDI(%rsp)
movq %rsp,%rdi
movq ORIG_RAX(%rsp),%rsi # get error code
movq $-1,ORIG_RAX(%rsp)
call *%rax
jmp error_exit
.macro zeroentry sym
movq (%rsp),%rcx
movq 8(%rsp),%r11
addq $0x10,%rsp /* skip rcx and r11 */
pushq $0 /* push error code/oldrax */
pushq %rax /* push real oldrax to the rdi slot */
leaq \sym(%rip),%rax
jmp error_entry
.endm
.macro errorentry sym
movq (%rsp),%rcx
movq 8(%rsp),%r11
addq $0x10,%rsp /* rsp points to the error code */
pushq %rax
leaq \sym(%rip),%rax
jmp error_entry
.endm
#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg)
#define XEN_PUT_VCPU_INFO_fixup
#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
XEN_LOCKED_BLOCK_EVENTS(reg) ; \
XEN_PUT_VCPU_INFO(reg)
#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
XEN_PUT_VCPU_INFO(reg)
ENTRY(hypervisor_callback)
zeroentry hypervisor_callback2
ENTRY(hypervisor_callback2)
movq %rdi, %rsp
11: movq %gs:8,%rax
incl %gs:0
cmovzq %rax,%rsp
pushq %rdi
call do_hypervisor_callback
popq %rsp
decl %gs:0
jmp error_exit
restore_all_enable_events:
XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
scrit: /**** START OF CRITICAL REGION ****/
XEN_TEST_PENDING(%rsi)
jnz 14f # process more events if necessary...
XEN_PUT_VCPU_INFO(%rsi)
RESTORE_ALL
HYPERVISOR_IRET 0
14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
XEN_PUT_VCPU_INFO(%rsi)
subq $6*8,%rsp
movq %rbx,5*8(%rsp)
movq %rbp,4*8(%rsp)
movq %r12,3*8(%rsp)
movq %r13,2*8(%rsp)
movq %r14,1*8(%rsp)
movq %r15,(%rsp)
movq %rsp,%rdi # set the argument again
jmp 11b
ecrit: /**** END OF CRITICAL REGION ****/
retint_kernel:
retint_restore_args:
movl EFLAGS-6*8(%rsp), %eax
shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
XEN_GET_VCPU_INFO(%rsi)
andb evtchn_upcall_mask(%rsi),%al
andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
jnz restore_all_enable_events # != 0 => enable event delivery
XEN_PUT_VCPU_INFO(%rsi)
RESTORE_ALL
HYPERVISOR_IRET 0
error_exit:
movq (%rsp),%r15
movq 1*8(%rsp),%r14
movq 2*8(%rsp),%r13
movq 3*8(%rsp),%r12
movq 4*8(%rsp),%rbp
movq 5*8(%rsp),%rbx
addq $6*8,%rsp
XEN_BLOCK_EVENTS(%rsi)
jmp retint_kernel
ENTRY(failsafe_callback)
popq %rcx
popq %r11
iretq
ENTRY(coprocessor_error)
zeroentry do_coprocessor_error
ENTRY(simd_coprocessor_error)
zeroentry do_simd_coprocessor_error
ENTRY(device_not_available)
zeroentry do_device_not_available
ENTRY(debug)
zeroentry do_debug
ENTRY(int3)
zeroentry do_int3
ENTRY(overflow)
zeroentry do_overflow
ENTRY(bounds)
zeroentry do_bounds
ENTRY(invalid_op)
zeroentry do_invalid_op
ENTRY(coprocessor_segment_overrun)
zeroentry do_coprocessor_segment_overrun
ENTRY(invalid_TSS)
errorentry do_invalid_TSS
ENTRY(segment_not_present)
errorentry do_segment_not_present
/* runs on exception stack */
ENTRY(stack_segment)
errorentry do_stack_segment
ENTRY(general_protection)
errorentry do_general_protection
ENTRY(alignment_check)
errorentry do_alignment_check
ENTRY(divide_error)
zeroentry do_divide_error
ENTRY(spurious_interrupt_bug)
zeroentry do_spurious_interrupt_bug
ENTRY(machine_check)
errorentry do_machine_check
ENTRY(page_fault)
errorentry do_page_fault
.data
.align 0x1000
.global stack,initial_tos
stack:
.fill HALVM_STACK_SIZE
.align 0x1000
initial_tos:
#if __XEN_LATEST_INTERFACE_VERSION__ >= 0x00030205
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "HaLVM")
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, 0x00400000)
ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0x00400000)
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, _start)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
#endif
Something went wrong with that request. Please try again.