Skip to content

Commit

Permalink
Compiler warning in dmap and smpg
Browse files Browse the repository at this point in the history
  • Loading branch information
lxylxy123456 committed Nov 10, 2021
1 parent 7e15193 commit a55102a
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 39 deletions.
2 changes: 1 addition & 1 deletion xmhf/src/xmhf-core/include/arch/x86/_acpi.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ typedef struct {
u8 checksum;
u8 oemid[6];
u8 revision;
u32 rsdtaddress;
uintptr_t rsdtaddress;
u32 length;
u64 xsdtaddress;
u8 xchecksum;
Expand Down
2 changes: 1 addition & 1 deletion xmhf/src/xmhf-core/include/arch/x86/_svm_eap.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ struct _svm_eap {
u32 dev_hdr_reg; //DEV header register (32-bit)
u32 dev_fnidx_reg; //DEV function/index register (32-bit)
u32 dev_data_reg; //DEV data register (32-bit)
u32 dev_bitmap_vaddr; //DEV bitmap virtual address
uintptr_t dev_bitmap_vaddr; //DEV bitmap virtual address
};


Expand Down
8 changes: 7 additions & 1 deletion xmhf/src/xmhf-core/include/arch/x86/_vmx.h
Original file line number Diff line number Diff line change
Expand Up @@ -598,10 +598,16 @@ static inline u32 __vmx_vmwrite(u32 encoding, u32 value){
return status;
}

static inline void __vmx_vmread(u32 encoding, u32 *value){
static inline void __vmx_vmread(unsigned long encoding, unsigned long *value){
#ifdef __X86_64__
__asm__ __volatile__("vmread %%rax, %%rbx\n\t"
: "=b"(*value)
: "a"(encoding));
#else /* !__X86_64__ */
__asm__ __volatile__("vmread %%eax, %%ebx\n\t"
: "=b"(*value)
: "a"(encoding));
#endif /* __X86_64__ */
}

static inline u32 __vmx_vmclear(u64 vmcs){
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,9 +243,9 @@ static u32 svm_eap_initialize(u32 dev_bitmap_paddr, u32 dev_bitmap_vaddr){
//that lies within the initially DMA-protected SL region to protect
//the runtime physical memory.
//The runtime then re-initializes DEV once it gets control
static u32 svm_eap_early_initialize(u32 protected_buffer_paddr,
u32 protected_buffer_vaddr, u32 memregion_paddr_start,
u32 memregion_size){
static u32 svm_eap_early_initialize(uintptr_t protected_buffer_paddr,
uintptr_t protected_buffer_vaddr, uintptr_t memregion_paddr_start,
uintptr_t memregion_size){
u32 dev_bitmap_paddr = 0;


Expand Down Expand Up @@ -305,7 +305,7 @@ static u32 svm_eap_early_initialize(u32 protected_buffer_paddr,
//now make sure the protected buffer (4K in our case) is set to all 1's
//effectively preventing DMA reads and writes from memregion_paligned_paddr_start
//to memregion_paligned_paddr_start + 128M
memset((void *)((u32)protected_buffer_vaddr+(u32)offset), 0xFF, PAGE_SIZE_4K);
memset((void *)(protected_buffer_vaddr+(u32)offset), 0xFF, PAGE_SIZE_4K);
}

return dev_bitmap_paddr;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,16 +72,16 @@ static u32 l_vtd_pts_vaddr=0;

//------------------------------------------------------------------------------
//setup VT-d DMA protection page tables
static void _vtd_setuppagetables(u32 vtd_pdpt_paddr, u32 vtd_pdpt_vaddr,
u32 vtd_pdts_paddr, u32 vtd_pdts_vaddr,
u32 vtd_pts_paddr, u32 vtd_pts_vaddr){
static void _vtd_setuppagetables(uintptr_t vtd_pdpt_paddr, uintptr_t vtd_pdpt_vaddr,
uintptr_t vtd_pdts_paddr, uintptr_t vtd_pdts_vaddr,
uintptr_t vtd_pts_paddr, uintptr_t vtd_pts_vaddr){

u32 pdptphysaddr, pdtphysaddr, ptphysaddr;
uintptr_t pdptphysaddr, pdtphysaddr, ptphysaddr;
u32 i,j,k;
pdpt_t pdpt;
pdt_t pdt;
pt_t pt;
u32 physaddr=0;
uintptr_t physaddr=0;

pdptphysaddr=vtd_pdpt_paddr;
pdtphysaddr=vtd_pdts_paddr;
Expand Down Expand Up @@ -135,8 +135,8 @@ static void _vtd_setuppagetables(u32 vtd_pdpt_paddr, u32 vtd_pdpt_vaddr,
//each CE points to a PDPT type paging structure.
//in our implementation, every CE will point to a single PDPT type paging
//structure for the whole system
static void _vtd_setupRETCET(u32 vtd_pdpt_paddr, u32 vtd_ret_paddr, u32 vtd_ret_vaddr, u32 vtd_cet_paddr, u32 vtd_cet_vaddr){
u32 retphysaddr, cetphysaddr;
static void _vtd_setupRETCET(uintptr_t vtd_pdpt_paddr, uintptr_t vtd_ret_paddr, uintptr_t vtd_ret_vaddr, uintptr_t vtd_cet_paddr, uintptr_t vtd_cet_vaddr){
uintptr_t retphysaddr, cetphysaddr;
u32 i, j;
u64 *value;

Expand Down Expand Up @@ -186,7 +186,7 @@ static void _vtd_setupRETCET(u32 vtd_pdpt_paddr, u32 vtd_ret_paddr, u32 vtd_ret_
//we ensure that every entry in the RET is 0 which means that the DRHD will
//not allow any DMA requests for PCI bus 0-255 (Sec 3.3.2, IVTD Spec. v1.2)
//we zero out the CET just for sanity
static void _vtd_setupRETCET_bootstrap(u32 vtd_ret_paddr, u32 vtd_ret_vaddr, u32 vtd_cet_paddr, u32 vtd_cet_vaddr){
static void _vtd_setupRETCET_bootstrap(uintptr_t vtd_ret_paddr, uintptr_t vtd_ret_vaddr, uintptr_t vtd_cet_paddr, uintptr_t vtd_cet_vaddr){

//sanity check that RET and CET are page-aligned
HALT_ON_ERRORCOND( !(vtd_ret_paddr & 0x00000FFFUL) && !(vtd_cet_paddr & 0x00000FFFUL) );
Expand Down Expand Up @@ -705,11 +705,11 @@ static u32 vmx_eap_initialize(u32 vtd_pdpt_paddr, u32 vtd_pdpt_vaddr,
ACPI_RSDP rsdp;
ACPI_RSDT rsdt;
u32 num_rsdtentries;
u32 rsdtentries[ACPI_MAX_RSDT_ENTRIES];
uintptr_t rsdtentries[ACPI_MAX_RSDT_ENTRIES];
u32 status;
VTD_DMAR dmar;
u32 i, dmarfound;
u32 dmaraddrphys, remappingstructuresaddrphys;
uintptr_t dmaraddrphys, remappingstructuresaddrphys;

#ifndef __XMHF_VERIFICATION__
//zero out rsdp and rsdt structures
Expand All @@ -730,7 +730,7 @@ static u32 vmx_eap_initialize(u32 vtd_pdpt_paddr, u32 vtd_pdpt_vaddr,
num_rsdtentries = (rsdt.length - sizeof(ACPI_RSDT))/ sizeof(u32);
HALT_ON_ERRORCOND(num_rsdtentries < ACPI_MAX_RSDT_ENTRIES);
xmhf_baseplatform_arch_flat_copy((u8 *)&rsdtentries, (u8 *)(rsdp.rsdtaddress + sizeof(ACPI_RSDT)),
sizeof(u32)*num_rsdtentries);
sizeof(rsdtentries[0])*num_rsdtentries);
printf("\n%s: RSDT entry list at %08x, len=%u", __FUNCTION__,
(rsdp.rsdtaddress + sizeof(ACPI_RSDT)), num_rsdtentries);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ static u32 processSIPI(VCPU *vcpu, u32 icr_low_value, u32 icr_high_value){
}

printf("\nCPU(0x%02x): found AP to pass SIPI; id=0x%02x, vcpu=0x%08x",
vcpu->id, dest_vcpu->id, (u32)dest_vcpu);
vcpu->id, dest_vcpu->id, (uintptr_t)dest_vcpu);


//send the sipireceived flag to trigger the AP to start the HVM
Expand Down Expand Up @@ -275,13 +275,13 @@ void xmhf_smpguest_arch_x86vmx_eventhandler_dbexception(VCPU *vcpu, struct regs


if(g_vmx_lapic_op == LAPIC_OP_WRITE){ //LAPIC write
u32 src_registeraddress, dst_registeraddress;
uintptr_t src_registeraddress, dst_registeraddress;
u32 value_tobe_written;

HALT_ON_ERRORCOND( (g_vmx_lapic_reg == LAPIC_ICR_LOW) || (g_vmx_lapic_reg == LAPIC_ICR_HIGH) );

src_registeraddress = (u32)&g_vmx_virtual_LAPIC_base + g_vmx_lapic_reg;
dst_registeraddress = (u32)g_vmx_lapic_base + g_vmx_lapic_reg;
src_registeraddress = (uintptr_t)&g_vmx_virtual_LAPIC_base + g_vmx_lapic_reg;
dst_registeraddress = (uintptr_t)g_vmx_lapic_base + g_vmx_lapic_reg;

#ifdef __XMHF_VERIFICATION__
//TODO: hardware modeling
Expand All @@ -293,7 +293,7 @@ void xmhf_smpguest_arch_x86vmx_eventhandler_dbexception(VCPU *vcpu, struct regs
#endif

#else
value_tobe_written= *((u32 *)src_registeraddress);
value_tobe_written= *((uintptr_t *)src_registeraddress);
#endif


Expand All @@ -308,7 +308,7 @@ void xmhf_smpguest_arch_x86vmx_eventhandler_dbexception(VCPU *vcpu, struct regs

}else if( (value_tobe_written & 0x00000F00) == 0x600 ){
//this is a STARTUP IPI
u32 icr_value_high = *((u32 *)((u32)&g_vmx_virtual_LAPIC_base + (u32)LAPIC_ICR_HIGH));
u32 icr_value_high = *((u32 *)((uintptr_t)&g_vmx_virtual_LAPIC_base + (u32)LAPIC_ICR_HIGH));
printf("\n0x%04x:0x%08x -> (ICR=0x%08x write) STARTUP IPI detected, value=0x%08x",
(u16)vcpu->vmcs.guest_CS_selector, (u32)vcpu->vmcs.guest_RIP, g_vmx_lapic_reg, value_tobe_written);

Expand All @@ -332,11 +332,11 @@ void xmhf_smpguest_arch_x86vmx_eventhandler_dbexception(VCPU *vcpu, struct regs
}

}else if( g_vmx_lapic_op == LAPIC_OP_READ){ //LAPIC read
u32 src_registeraddress;
uintptr_t src_registeraddress;
u32 value_read __attribute__((unused));
HALT_ON_ERRORCOND( (g_vmx_lapic_reg == LAPIC_ICR_LOW) || (g_vmx_lapic_reg == LAPIC_ICR_HIGH) );

src_registeraddress = (u32)&g_vmx_virtual_LAPIC_base + g_vmx_lapic_reg;
src_registeraddress = (uintptr_t)&g_vmx_virtual_LAPIC_base + g_vmx_lapic_reg;

//TODO: hardware modeling
#ifndef __XMHF_VERIFICATION__
Expand Down Expand Up @@ -464,8 +464,8 @@ void xmhf_smpguest_arch_x86vmx_endquiesce(VCPU *vcpu){
//note: we are in atomic processsing mode for this "vcpu"
void xmhf_smpguest_arch_x86vmx_eventhandler_nmiexception(VCPU *vcpu, struct regs *r){
u32 nmiinhvm; //1 if NMI originated from the HVM else 0 if within the hypervisor
u32 _vmx_vmcs_info_vmexit_interrupt_information;
u32 _vmx_vmcs_info_vmexit_reason;
unsigned long _vmx_vmcs_info_vmexit_interrupt_information;
unsigned long _vmx_vmcs_info_vmexit_reason;

(void)r;

Expand Down Expand Up @@ -542,7 +542,7 @@ u8 * xmhf_smpguest_arch_x86vmx_walk_pagetables(VCPU *vcpu, u32 vaddr){
pdt_t kpd;
pt_t kpt;
u32 pdpt_entry, pd_entry, pt_entry;
u32 tmp;
uintptr_t tmp;

// get fields from virtual addr
pdpt_index = pae_get_pdpt_index(vaddr);
Expand All @@ -552,18 +552,18 @@ u8 * xmhf_smpguest_arch_x86vmx_walk_pagetables(VCPU *vcpu, u32 vaddr){

//grab pdpt entry
tmp = pae_get_addr_from_32bit_cr3(kcr3);
kpdpt = (pdpt_t)((u32)tmp);
kpdpt = (pdpt_t)((uintptr_t)tmp);
pdpt_entry = kpdpt[pdpt_index];

//grab pd entry
tmp = pae_get_addr_from_pdpe(pdpt_entry);
kpd = (pdt_t)((u32)tmp);
kpd = (pdt_t)((uintptr_t)tmp);
pd_entry = kpd[pd_index];

if ( (pd_entry & _PAGE_PSE) == 0 ) {
// grab pt entry
tmp = (u32)pae_get_addr_from_pde(pd_entry);
kpt = (pt_t)((u32)tmp);
tmp = (uintptr_t)pae_get_addr_from_pde(pd_entry);
kpt = (pt_t)((uintptr_t)tmp);
pt_entry = kpt[pt_index];

// find physical page base addr from page table entry
Expand All @@ -575,7 +575,7 @@ u8 * xmhf_smpguest_arch_x86vmx_walk_pagetables(VCPU *vcpu, u32 vaddr){
paddr += (u64)offset;
}

return (u8 *)(u32)paddr;
return (u8 *)(uintptr_t)paddr;

}else{
//non-PAE 2 level paging used by guest
Expand All @@ -585,7 +585,7 @@ u8 * xmhf_smpguest_arch_x86vmx_walk_pagetables(VCPU *vcpu, u32 vaddr){
npdt_t kpd;
npt_t kpt;
u32 pd_entry, pt_entry;
u32 tmp;
uintptr_t tmp;

// get fields from virtual addr
pd_index = npae_get_pdt_index(vaddr);
Expand All @@ -594,13 +594,13 @@ u8 * xmhf_smpguest_arch_x86vmx_walk_pagetables(VCPU *vcpu, u32 vaddr){

// grab pd entry
tmp = npae_get_addr_from_32bit_cr3(kcr3);
kpd = (npdt_t)((u32)tmp);
kpd = (npdt_t)((uintptr_t)tmp);
pd_entry = kpd[pd_index];

if ( (pd_entry & _PAGE_PSE) == 0 ) {
// grab pt entry
tmp = (u32)npae_get_addr_from_pde(pd_entry);
kpt = (npt_t)((u32)tmp);
tmp = (uintptr_t)npae_get_addr_from_pde(pd_entry);
kpt = (npt_t)((uintptr_t)tmp);
pt_entry = kpt[pt_index];

// find physical page base addr from page table entry
Expand All @@ -612,6 +612,6 @@ u8 * xmhf_smpguest_arch_x86vmx_walk_pagetables(VCPU *vcpu, u32 vaddr){
paddr += (u64)offset;
}

return (u8 *)(u32)paddr;
return (u8 *)(uintptr_t)paddr;
}
}

0 comments on commit a55102a

Please sign in to comment.