Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
initial mmu support for sq remaps
  • Loading branch information
Stefanos Kornilios Mitsis Poiitidis authored and inolen committed Jul 10, 2017
1 parent e06f450 commit 3fade7f
Show file tree
Hide file tree
Showing 13 changed files with 312 additions and 22 deletions.
1 change: 1 addition & 0 deletions CMakeLists.txt
Expand Up @@ -210,6 +210,7 @@ set(RELIB_SOURCES
src/guest/sh4/sh4_dbg.c
src/guest/sh4/sh4_dmac.c
src/guest/sh4/sh4_intc.c
src/guest/sh4/sh4_mmu.c
src/guest/sh4/sh4_tmu.c
src/guest/debugger.c
src/guest/dreamcast.c
Expand Down
30 changes: 28 additions & 2 deletions src/guest/sh4/sh4.c
Expand Up @@ -165,6 +165,7 @@ static int sh4_init(struct device *dev) {
sh4->guest->mem = as_translate(sh4->memory_if->space, 0x0);
sh4->guest->space = sh4->memory_if->space;
sh4->guest->invalid_instr = &sh4_invalid_instr;
sh4->guest->load_tlb = &sh4_mmu_load_tlb;
sh4->guest->sq_prefetch = &sh4_ccn_sq_prefetch;
sh4->guest->sleep = &sh4_sleep;
sh4->guest->sr_updated = &sh4_sr_updated;
Expand Down Expand Up @@ -212,6 +213,10 @@ void sh4_reset(struct sh4 *sh4, uint32_t pc) {
#include "guest/sh4/sh4_regs.inc"
#undef SH4_REG

/* reset tlb */
memset(sh4->utlb_sq_map, 0, sizeof(sh4->utlb_sq_map));
memset(sh4->utlb, 0, sizeof(sh4->utlb));

/* reset interrupts */
sh4_intc_reprioritize(sh4);

Expand Down Expand Up @@ -351,16 +356,37 @@ AM_BEGIN(struct sh4, sh4_data_map)
AM_RANGE(0x80000000, 0x9fffffff) AM_MIRROR(0x00000000) /* p1 */
AM_RANGE(0xa0000000, 0xbfffffff) AM_MIRROR(0x00000000) /* p2 */
AM_RANGE(0xc0000000, 0xdfffffff) AM_MIRROR(0x00000000) /* p3 */
AM_RANGE(0xe0000000, 0xffffffff) AM_MIRROR(0x00000000) /* p4 */

/* internal cache and sq only accessible through p4 */
/* internal cache is only accessible through p0, not any of the mirrors */

This comment has been minimized.

Copy link
@skmp

skmp Jul 10, 2017

Contributor

I think this is not P4, it is somehow the end of P3. I have no idea how it works on the hardware. I think if the cache index* mode is disabled, then this directs to area7?
(I guess something more to test)

AM_RANGE(0x7c000000, 0x7fffffff) AM_HANDLE("sh4 cache",
(mmio_read_cb)&sh4_ccn_cache_read,
(mmio_write_cb)&sh4_ccn_cache_write,
NULL, NULL)

/* p4 area */
AM_RANGE(0xe0000000, 0xe3ffffff) AM_HANDLE("sh4 sq",
(mmio_read_cb)&sh4_ccn_sq_read,
(mmio_write_cb)&sh4_ccn_sq_write,
NULL, NULL)
AM_RANGE(0xf0000000, 0xf1ffffff) AM_HANDLE("sh4 icache",
(mmio_read_cb)&sh4_ccn_icache_read,
(mmio_write_cb)&sh4_ccn_icache_write,
NULL, NULL)
AM_RANGE(0xf2000000, 0xf3ffffff) AM_HANDLE("sh4 itlb",
(mmio_read_cb)&sh4_mmu_itlb_read,
(mmio_write_cb)&sh4_mmu_itlb_write,
NULL, NULL)
AM_RANGE(0xf4000000, 0xf5ffffff) AM_HANDLE("sh4 ocache",
(mmio_read_cb)&sh4_ccn_ocache_read,
(mmio_write_cb)&sh4_ccn_ocache_write,
NULL, NULL)
AM_RANGE(0xf6000000, 0xf7ffffff) AM_HANDLE("sh4 utlb",
(mmio_read_cb)&sh4_mmu_utlb_read,
(mmio_write_cb)&sh4_mmu_utlb_write,
NULL, NULL)
AM_RANGE(0xfc000000, 0xffffffff) AM_HANDLE("sh4 reg",
(mmio_read_cb)&sh4_reg_read,
(mmio_write_cb)&sh4_reg_write,
NULL, NULL)
AM_END();
/* clang-format on */
31 changes: 30 additions & 1 deletion src/guest/sh4/sh4.h
Expand Up @@ -36,6 +36,11 @@ struct sh4_dtr {
int size;
};

struct sh4_tlb_entry {
union pteh hi;
union ptel lo;
};

struct sh4 {
struct device;

Expand All @@ -61,6 +66,10 @@ struct sh4 {
uint64_t requested_interrupts;
/* pending interrupts moved to context for fast jit access */

/* mmu */
uint32_t utlb_sq_map[64];

This comment has been minimized.

Copy link
@skmp

skmp Jul 10, 2017

Contributor

Maybe it should be more obvious that this is not a full map?

struct sh4_tlb_entry utlb[64];

/* tmu */
struct timer *tmu_timers[3];
};
Expand All @@ -71,14 +80,24 @@ DECLARE_COUNTER(sh4_instrs);

AM_DECLARE(sh4_data_map);

/* ccn */
void sh4_ccn_sq_prefetch(void *data, uint32_t addr);
uint32_t sh4_ccn_cache_read(struct sh4 *sh4, uint32_t addr, uint32_t data_mask);
void sh4_ccn_cache_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask);
uint32_t sh4_ccn_sq_read(struct sh4 *sh4, uint32_t addr, uint32_t data_mask);
void sh4_ccn_sq_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask);

uint32_t sh4_ccn_icache_read(struct sh4 *sh4, uint32_t addr,

This comment has been minimized.

Copy link
@skmp

skmp Jul 10, 2017

Contributor

I would name these "array_read/array_write" to make it clear they are not cached reads but they read the cache arrays directly

uint32_t data_mask);
void sh4_ccn_icache_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask);
uint32_t sh4_ccn_ocache_read(struct sh4 *sh4, uint32_t addr,
uint32_t data_mask);
void sh4_ccn_ocache_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask);

/* dbg */
int sh4_dbg_num_registers(struct device *dev);
void sh4_dbg_step(struct device *dev);
void sh4_dbg_add_breakpoint(struct device *dev, int type, uint32_t addr);
Expand All @@ -91,10 +110,20 @@ int sh4_dbg_invalid_instr(struct sh4 *sh4);

void sh4_dmac_ddt(struct sh4 *sh, struct sh4_dtr *dtr);

/* intc */
void sh4_intc_update_pending(struct sh4 *sh4);
void sh4_intc_check_pending(void *data);
void sh4_intc_reprioritize(struct sh4 *sh4);

/* mmu */
void sh4_mmu_load_tlb(void *data);
uint32_t sh4_mmu_itlb_read(struct sh4 *sh4, uint32_t addr, uint32_t data_mask);

This comment has been minimized.

Copy link
@skmp

skmp Jul 10, 2017

Contributor

(Same comment as for array_read to indicate that those read the tlb arrays, though semantics are less confusing for these)

uint32_t sh4_mmu_utlb_read(struct sh4 *sh4, uint32_t addr, uint32_t data_mask);
void sh4_mmu_itlb_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask);
void sh4_mmu_utlb_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask);

struct sh4 *sh4_create(struct dreamcast *dc);
void sh4_destroy(struct sh4 *sh4);
void sh4_debug_menu(struct sh4 *sh4);
Expand Down
66 changes: 60 additions & 6 deletions src/guest/sh4/sh4_ccn.c
@@ -1,6 +1,12 @@
#include "guest/sh4/sh4.h"
#include "jit/jit.h"

#if 0
#define LOG_CCN LOG_INFO
#else
#define LOG_CCN(...)
#endif

/* with OIX, bit 25, rather than bit 13, determines which 4kb bank to use */
#define CACHE_OFFSET(addr, OIX) \
((OIX ? ((addr & 0x2000000) >> 13) : ((addr & 0x2000) >> 1)) | (addr & 0xfff))
Expand Down Expand Up @@ -29,12 +35,27 @@ void sh4_ccn_sq_prefetch(void *data, uint32_t addr) {
DCHECK(addr >= 0xe0000000 && addr <= 0xe3ffffff);

struct sh4 *sh4 = data;
uint32_t dst = addr & 0x03ffffe0;

uint32_t dst = 0x0;
uint32_t sqi = (addr & 0x20) >> 5;
if (sqi) {
dst |= (*sh4->QACR1 & 0x1c) << 24;

if (sh4->MMUCR->AT) {
/* get upper 12 bits from UTLB */

This comment has been minimized.

Copy link
@skmp

skmp Jul 10, 2017

Contributor

IT might be worth noting that this is a hack/partial tlb support that works only in some cases. Normally a full tlb lookup needs to be performed here.

uint32_t vpn = addr >> 20;
dst = sh4->utlb_sq_map[vpn & 0x3f];

/* get lower 20 bits from original address */
dst |= addr & 0xfffe0;
} else {
dst |= (*sh4->QACR0 & 0x1c) << 24;
/* get upper 6 bits from QACR* registers */
if (sqi) {
dst = (*sh4->QACR1 & 0x1c) << 24;
} else {
dst = (*sh4->QACR0 & 0x1c) << 24;
}

/* get lower 26 bits from original address */
dst |= addr & 0x3ffffe0;
}

as_memcpy_to_guest(sh4->memory_if->space, dst, sh4->ctx.sq[sqi], 32);
Expand Down Expand Up @@ -69,10 +90,43 @@ void sh4_ccn_sq_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
sh4->ctx.sq[sqi][idx] = data;
}

uint32_t sh4_ccn_icache_read(struct sh4 *sh4, uint32_t addr,
uint32_t data_mask) {
LOG_CCN("sh4_ccn_icache_read 0x%08x", addr);

/* return an invalid entry */
return 0;
}

void sh4_ccn_icache_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask) {
LOG_CCN("sh4_ccn_icache_write 0x%08x", addr);

/* ignore */
}

uint32_t sh4_ccn_ocache_read(struct sh4 *sh4, uint32_t addr,
uint32_t data_mask) {
LOG_CCN("sh4_ccn_ocache_read 0x%08x", addr);

/* return an invalid entry */
return 0;
}

void sh4_ccn_ocache_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask) {
LOG_CCN("sh4_ccn_ocache_write 0x%08x", addr);

/* ignore */
}

REG_W32(sh4_cb, MMUCR) {
struct sh4 *sh4 = dc->sh4;
if (value) {
LOG_FATAL("MMU not currently supported");

sh4->MMUCR->full = value;

if (sh4->MMUCR->AT) {
LOG_WARNING("MMU not fully supported");
}
}

Expand Down
123 changes: 123 additions & 0 deletions src/guest/sh4/sh4_mmu.c
@@ -0,0 +1,123 @@
#include "guest/sh4/sh4.h"

#if 0
#define LOG_MMU LOG_INFO
#else
#define LOG_MMU(...)
#endif

#define TLB_INDEX(addr) (((addr) >> 8) & 0x3f)

#define PAGE_SIZE(entry) (((entry)->lo.SZ1 << 1) | (entry)->lo.SZ0)

enum {
PAGE_SIZE_1KB,
PAGE_SIZE_4KB,
PAGE_SIZE_64KB,
PAGE_SIZE_1MB,
};

static void sh4_mmu_utlb_sync(struct sh4 *sh4, struct sh4_tlb_entry *entry) {
int n = entry - sh4->utlb;

/* check if entry maps to sq region [0xe0000000, 0xe3ffffff] */
if ((entry->hi.VPN & (0xfc000000 >> 10)) == (0xe0000000 >> 10)) {
/* assume page size is 1MB
FIXME support all page sizes */
uint32_t vpn = entry->hi.VPN >> 10;
uint32_t ppn = entry->lo.PPN << 10;

sh4->utlb_sq_map[vpn & 0x3f] = ppn;

LOG_INFO("sh4_mmu_utlb_sync sq map (%d) 0x%x -> 0x%x", n, vpn, ppn);
} else {
LOG_FATAL("sh4_mmu_utlb_sync memory mapping not supported");
}
}

static void sh4_mmu_translate(struct sh4 *sh4, uint32_t addr) {}

void sh4_mmu_load_tlb(void *data) {
struct sh4 *sh4 = data;

uint32_t n = sh4->MMUCR->URC;
struct sh4_tlb_entry *entry = &sh4->utlb[n];
entry->lo = *sh4->PTEL;
entry->hi = *sh4->PTEH;

sh4_mmu_utlb_sync(sh4, entry);
}

uint32_t sh4_mmu_itlb_read(struct sh4 *sh4, uint32_t addr, uint32_t data_mask) {
if (addr < 0x01000000) {
LOG_MMU("sh4_mmu_itlb_read address array %08x", addr);
} else {
LOG_MMU("sh4_mmu_itlb_read data array %08x", addr);
}

/* return an invalid entry */
return 0;
}

uint32_t sh4_mmu_utlb_read(struct sh4 *sh4, uint32_t addr, uint32_t data_mask) {
if (addr < 0x01000000) {
LOG_MMU("sh4_mmu_utlb_read address array %08x", addr);

struct sh4_tlb_entry *entry = &sh4->utlb[TLB_INDEX(addr)];
uint32_t data = entry->hi.full;
data |= entry->lo.D << 9;
data |= entry->lo.V << 8;
return data;
} else {
if (addr & 0x800000) {
LOG_FATAL("sh4_mmu_utlb_read data array 2 %08x", addr);
} else {
LOG_MMU("sh4_mmu_utlb_read data array 1 %08x", addr);

struct sh4_tlb_entry *entry = &sh4->utlb[TLB_INDEX(addr)];
uint32_t data = entry->lo.full;
return data;
}
}
}

void sh4_mmu_itlb_write(struct sh4 *sh4, uint32_t addr, uint32_t data,
uint32_t data_mask) {
if (addr < 0x01000000) {
LOG_MMU("sh4_mmu_itlb_write address array %08x %08x", addr, data);
} else {
LOG_MMU("sh4_mmu_itlb_write data array %08x %08x", addr, data);
}

/* ignore */
}

void sh4_mmu_utlb_write(struct sh4 *sh4, uint32_t addr, uint32_t data,

This comment has been minimized.

Copy link
@skmp

skmp Jul 10, 2017

Contributor

How about further spiting the read/write maps to address/data from the memmap?

uint32_t data_mask) {
if (addr < 0x01000000) {
if (addr & 0x80) {
LOG_FATAL("sh4_mmu_utlb_write address array (associative) %08x %08x",
addr, data);
} else {
LOG_MMU("sh4_mmu_utlb_write address array %08x %08x", addr, data);

struct sh4_tlb_entry *entry = &sh4->utlb[TLB_INDEX(addr)];
entry->hi.full = data & 0xfffffcff;
entry->lo.D = (data >> 9) & 1;
entry->lo.V = (data >> 8) & 1;

sh4_mmu_utlb_sync(sh4, entry);
}
} else {
if (addr & 0x800000) {
LOG_FATAL("sh4_mmu_utlb_write data array 2 %08x %08x", addr, data);
} else {
LOG_MMU("sh4_mmu_utlb_write data array 1 %08x %08x", addr, data);

struct sh4_tlb_entry *entry = &sh4->utlb[TLB_INDEX(addr)];
entry->lo.full = data;

sh4_mmu_utlb_sync(sh4, entry);
}
}
}
6 changes: 3 additions & 3 deletions src/guest/sh4/sh4_regs.inc
@@ -1,9 +1,9 @@
/* ADDR NAME DEFAULT TYPE */
SH4_REG(0xff000000, PTEH, 0x00000000, uint32_t)
SH4_REG(0xff000004, PTEL, 0x00000000, uint32_t)
SH4_REG(0xff000000, PTEH, 0x00000000, union pteh)
SH4_REG(0xff000004, PTEL, 0x00000000, union ptel)
SH4_REG(0xff000008, TTB, 0x00000000, uint32_t)
SH4_REG(0xff00000c, TEA, 0x00000000, uint32_t)
SH4_REG(0xff000010, MMUCR, 0x00000000, uint32_t)
SH4_REG(0xff000010, MMUCR, 0x00000000, union mmucr)
SH4_REG(0xff000014, BASRA, 0x00000000, uint32_t)
SH4_REG(0xff000018, BASRB, 0x00000000, uint32_t)
SH4_REG(0xff00001c, CCR, 0x00000000, union ccr)
Expand Down

0 comments on commit 3fade7f

Please sign in to comment.