Skip to content

Commit

Permalink
#8 automatic rc update for virtual memory store operation
Browse files Browse the repository at this point in the history
  • Loading branch information
Mingkai-Li committed Sep 3, 2023
1 parent e3e3166 commit 886a394
Show file tree
Hide file tree
Showing 6 changed files with 50 additions and 14 deletions.
1 change: 0 additions & 1 deletion riscv/insns/sb.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,5 @@ if (capability_access) {
}
else {
uint64_t tmp_addr = RS1 + insn.s_imm();
/*wip: rc update*/
MMU.store_uint8(tmp_addr, RS2);
}
1 change: 0 additions & 1 deletion riscv/insns/sd.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,5 @@ if (capability_access) {
}
else {
uint64_t tmp_addr = RS1 + insn.s_imm();
/*wip: rc update*/
MMU.store_uint64(tmp_addr, RS2);
}
1 change: 0 additions & 1 deletion riscv/insns/sh.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,5 @@ if (capability_access) {
}
else {
uint64_t tmp_addr = RS1 + insn.s_imm();
/*wip: rc update*/
MMU.store_uint16(tmp_addr, RS2);
}
1 change: 0 additions & 1 deletion riscv/insns/sw.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,5 @@ if (capability_access) {
}
else {
uint64_t tmp_addr = RS1 + insn.s_imm();
/*wip: rc update*/
MMU.store_uint32(tmp_addr, RS2);
}
20 changes: 17 additions & 3 deletions riscv/mmu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,9 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate
}
}

void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags)
bool mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, uint8_t* clen_bytes/*=NULL*/)
{
bool virtual_mem_rc_update = false;
reg_t paddr = translate(addr, len, STORE, xlate_flags);

if (!matched_trigger) {
Expand All @@ -198,11 +199,22 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_
}

if (auto host_addr = sim->addr_to_mem(paddr)) {
/* if a clen-bit data is stored, we treat the data being stored as a capability.
/* if a CLEN-bit value is stored, we treat the value being stored as a capability.
* set the tag for every store access.
* load the CLEN-aligned value if the memory is a capability.
*/
if (proc) {
bool set_as_cap = (len == 16)? true : false;
/*update reference count for virtual memory access*/
if (proc->is_normal_access()) {
bool mem_tag_is_cap = proc->getTag(paddr);
if (mem_tag_is_cap) {
auto host_addr_aligned = host_addr - addr + (addr & ~(uint64_t(CLENBYTES - 1)));
memcpy(clen_bytes, host_addr_aligned, CLENBYTES);
virtual_mem_rc_update = true;
}
}
/*set memory tag*/
bool set_as_cap = (len == CLENBYTES)? true : false;
proc->setTag(paddr, set_as_cap);
}
memcpy(host_addr, bytes, len);
Expand All @@ -216,6 +228,8 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_
} else if (!mmio_store(paddr, len, bytes)) {
throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}

return virtual_mem_rc_update;
}

tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
Expand Down
40 changes: 33 additions & 7 deletions riscv/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ class mmu_t
#endif

// template for functions that load an aligned value from memory
// corner case: proc == NULL
#define load_func(type, prefix, xlate_flags) \
inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \
/*secure world can't access normal memory*/ \
Expand Down Expand Up @@ -196,11 +195,20 @@ class mmu_t
if (!proc || proc->is_normal_access()) { \
if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \
if (proc) WRITE_MEM(addr, val, size); \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
if (proc) { \
uint64_t paddr = tlb_data[vpn % TLB_ENTRIES].target_offset + addr; \
proc->setTag(paddr, false); \
bool mem_tag_is_cap = proc->getTag(paddr); \
if (mem_tag_is_cap) { \
/*set memory tag*/ \
proc->setTag(paddr, false); \
/*update reference count*/ \
uint64_t aligned_addr = addr & ~(uint64_t(CLENBYTES - 1)); \
uint128_t tmp_val = from_target(*(target_endian<uint128_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + aligned_addr)); \
cap64_t tmp_cap; \
proc->updateRC(tmp_cap.get_node_id(tmp_val), -1); \
} \
} \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
if (!matched_trigger) { \
Expand All @@ -209,15 +217,33 @@ class mmu_t
throw *matched_trigger; \
} \
if (proc) WRITE_MEM(addr, val, size); \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
if (proc) { \
uint64_t paddr = tlb_data[vpn % TLB_ENTRIES].target_offset + addr; \
proc->setTag(paddr, false); \
bool mem_tag_is_cap = proc->getTag(paddr); \
if (mem_tag_is_cap) { \
/*set memory tag*/ \
proc->setTag(paddr, false); \
/*update reference count*/ \
uint64_t aligned_addr = addr & ~(uint64_t(CLENBYTES - 1)); \
uint128_t tmp_val = from_target(*(target_endian<uint128_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + aligned_addr)); \
cap64_t tmp_cap; \
proc->updateRC(tmp_cap.get_node_id(tmp_val), -1); \
} \
} \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
else { \
target_endian<type##_t> target_val = to_target(val); \
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags)); \
target_endian<uint128_t> clen_val; \
bool virtual_mem_rc_update = store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), (uint8_t*)&clen_val); \
if (virtual_mem_rc_update) { \
if (proc) { \
/*update reference count*/ \
uint128_t tmp_val = from_target(clen_val); \
cap64_t tmp_cap; \
proc->updateRC(tmp_cap.get_node_id(tmp_val), -1); \
} \
} \
if (proc) WRITE_MEM(addr, val, size); \
} \
} \
Expand Down Expand Up @@ -471,7 +497,7 @@ class mmu_t
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags);
bool store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, uint8_t* clen_bytes=NULL);
bool mmio_load(reg_t addr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes);
bool mmio_ok(reg_t addr, access_type type);
Expand Down

0 comments on commit 886a394

Please sign in to comment.