diff --git a/riscv/insns/sb.h b/riscv/insns/sb.h index ab10b309..06b426bf 100644 --- a/riscv/insns/sb.h +++ b/riscv/insns/sb.h @@ -39,6 +39,5 @@ if (capability_access) { } else { uint64_t tmp_addr = RS1 + insn.s_imm(); - /*wip: rc update*/ MMU.store_uint8(tmp_addr, RS2); } diff --git a/riscv/insns/sd.h b/riscv/insns/sd.h index e8e3066c..8b6563a9 100644 --- a/riscv/insns/sd.h +++ b/riscv/insns/sd.h @@ -40,6 +40,5 @@ if (capability_access) { } else { uint64_t tmp_addr = RS1 + insn.s_imm(); - /*wip: rc update*/ MMU.store_uint64(tmp_addr, RS2); } diff --git a/riscv/insns/sh.h b/riscv/insns/sh.h index ffdda8ee..77868f5c 100644 --- a/riscv/insns/sh.h +++ b/riscv/insns/sh.h @@ -39,6 +39,5 @@ if (capability_access) { } else { uint64_t tmp_addr = RS1 + insn.s_imm(); - /*wip: rc update*/ MMU.store_uint16(tmp_addr, RS2); } diff --git a/riscv/insns/sw.h b/riscv/insns/sw.h index f85eb357..f6ef8645 100644 --- a/riscv/insns/sw.h +++ b/riscv/insns/sw.h @@ -39,6 +39,5 @@ if (capability_access) { } else { uint64_t tmp_addr = RS1 + insn.s_imm(); - /*wip: rc update*/ MMU.store_uint32(tmp_addr, RS2); } diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 5d87613d..2150b032 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -183,8 +183,9 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate } } -void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags) +bool mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, uint8_t* clen_bytes/*=NULL*/) { + bool virtual_mem_rc_update = false; reg_t paddr = translate(addr, len, STORE, xlate_flags); if (!matched_trigger) { @@ -198,11 +199,22 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ } if (auto host_addr = sim->addr_to_mem(paddr)) { - /* if a clen-bit data is stored, we treat the data being stored as a capability. + /* if a CLEN-bit value is stored, we treat the value being stored as a capability. * set the tag for every store access. + * load the CLEN-aligned value if the memory is a capability. */ if (proc) { - bool set_as_cap = (len == 16)? true : false; + /*update reference count for virtual memory access*/ + if (proc->is_normal_access()) { + bool mem_tag_is_cap = proc->getTag(paddr); + if (mem_tag_is_cap) { + auto host_addr_aligned = host_addr - addr + (addr & ~(uint64_t(CLENBYTES - 1))); + memcpy(clen_bytes, host_addr_aligned, CLENBYTES); + virtual_mem_rc_update = true; + } + } + /*set memory tag*/ + bool set_as_cap = (len == CLENBYTES)? true : false; proc->setTag(paddr, set_as_cap); } memcpy(host_addr, bytes, len); @@ -216,6 +228,8 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ } else if (!mmio_store(paddr, len, bytes)) { throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } + + return virtual_mem_rc_update; } tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type) diff --git a/riscv/mmu.h b/riscv/mmu.h index 2c1ab497..6a4dce97 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -97,7 +97,6 @@ class mmu_t #endif // template for functions that load an aligned value from memory - // corner case: proc == NULL #define load_func(type, prefix, xlate_flags) \ inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \ /*secure world can't access normal memory*/ \ @@ -196,11 +195,20 @@ class mmu_t if (!proc || proc->is_normal_access()) { \ if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ if (proc) WRITE_MEM(addr, val, size); \ - *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ if (proc) { \ uint64_t paddr = tlb_data[vpn % TLB_ENTRIES].target_offset + addr; \ - proc->setTag(paddr, false); \ + bool mem_tag_is_cap = proc->getTag(paddr); \ + if (mem_tag_is_cap) { \ + /*set memory tag*/ \ + proc->setTag(paddr, false); \ + /*update reference count*/ \ + uint64_t aligned_addr = addr & ~(uint64_t(CLENBYTES - 1)); \ + uint128_t tmp_val = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + aligned_addr)); \ + cap64_t tmp_cap; \ + proc->updateRC(tmp_cap.get_node_id(tmp_val), -1); \ + } \ } \ + *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ } \ else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ if (!matched_trigger) { \ @@ -209,15 +217,33 @@ class mmu_t throw *matched_trigger; \ } \ if (proc) WRITE_MEM(addr, val, size); \ - *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ if (proc) { \ uint64_t paddr = tlb_data[vpn % TLB_ENTRIES].target_offset + addr; \ - proc->setTag(paddr, false); \ + bool mem_tag_is_cap = proc->getTag(paddr); \ + if (mem_tag_is_cap) { \ + /*set memory tag*/ \ + proc->setTag(paddr, false); \ + /*update reference count*/ \ + uint64_t aligned_addr = addr & ~(uint64_t(CLENBYTES - 1)); \ + uint128_t tmp_val = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + aligned_addr)); \ + cap64_t tmp_cap; \ + proc->updateRC(tmp_cap.get_node_id(tmp_val), -1); \ + } \ } \ + *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ } \ else { \ target_endian target_val = to_target(val); \ - store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags)); \ + target_endian clen_val; \ + bool virtual_mem_rc_update = store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), (uint8_t*)&clen_val); \ + if (virtual_mem_rc_update) { \ + if (proc) { \ + /*update reference count*/ \ + uint128_t tmp_val = from_target(clen_val); \ + cap64_t tmp_cap; \ + proc->updateRC(tmp_cap.get_node_id(tmp_val), -1); \ + } \ + } \ if (proc) WRITE_MEM(addr, val, size); \ } \ } \ @@ -471,7 +497,7 @@ class mmu_t // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); - void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags); + bool store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, uint8_t* clen_bytes=NULL); bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type);