Skip to content

Commit

Permalink
sparc32 does not support unaligned 32 bit reads or writes.
Browse files Browse the repository at this point in the history
Our bytecode is 2-byte aligned, but we need to read 4-byte values. sparc will
SIGBUG if asked to perform a misaligned read.

We also need to add the same hack as s390 to get a definition of
AO_fetch_compare_and_swap_full

The memcmpy() makes (almost) no difference on architectures that can read
32 bit values "misaligned" - optimisers know how to inline the memcpy as a
regular register load. Checking the assembler output on x64_64, it causes
code change in one place, which seems to be the register allocator picking
different registers. On ARM, it causes a lot of loads to be commented
"unaligned", and causes one LDM to be replaced by two LDRs, an LDH to
be replaced by LDR and UXTH, and seems to perturb the register allocator in
one other place.
  • Loading branch information
nwc10 committed Sep 5, 2020
1 parent 590bac4 commit 92ef5d7
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 12 deletions.
15 changes: 12 additions & 3 deletions src/core/interp.c
Expand Up @@ -37,9 +37,18 @@ MVM_STATIC_INLINE MVMuint16 check_lex(MVMThreadContext *tc, MVMFrame *f, MVMuint
#endif
#define GET_I16(pc, idx) *((MVMint16 *)(pc + idx))
#define GET_UI16(pc, idx) *((MVMuint16 *)(pc + idx))
#define GET_I32(pc, idx) *((MVMint32 *)(pc + idx))
#define GET_UI32(pc, idx) *((MVMuint32 *)(pc + idx))
#define GET_N32(pc, idx) *((MVMnum32 *)(pc + idx))

MVM_STATIC_INLINE MVMint32 GET_I32(const MVMuint8 *pc, MVMint32 idx) {
MVMint32 retval;
memcpy(&retval, pc + idx, sizeof(retval));
return retval;
}

MVM_STATIC_INLINE MVMuint32 GET_UI32(const MVMuint8 *pc, MVMint32 idx) {
MVMuint32 retval;
memcpy(&retval, pc + idx, sizeof(retval));
return retval;
}

#define NEXT_OP (op = *(MVMuint16 *)(cur_op), cur_op += 2, op)

Expand Down
9 changes: 6 additions & 3 deletions src/core/validation.c
Expand Up @@ -11,9 +11,12 @@
#define GET_REG(pc, idx) *((MVMuint16 *)(pc + idx))
#define GET_I16(pc, idx) *((MVMint16 *)(pc + idx))
#define GET_UI16(pc, idx) *((MVMuint16 *)(pc + idx))
#define GET_I32(pc, idx) *((MVMint32 *)(pc + idx))
#define GET_UI32(pc, idx) *((MVMuint32 *)(pc + idx))
#define GET_N32(pc, idx) *((MVMnum32 *)(pc + idx))

MVM_STATIC_INLINE MVMuint32 GET_UI32(const MVMuint8 *pc, MVMint32 idx) {
MVMuint32 retval;
memcpy(&retval, pc + idx, sizeof(retval));
return retval;
}

#define MSG(val, msg) "Bytecode validation error at offset %" PRIu32 \
", instruction %" PRIu32 ":\n" msg, \
Expand Down
3 changes: 2 additions & 1 deletion src/moar.h
Expand Up @@ -259,7 +259,8 @@ MVM_PUBLIC void MVM_vm_event_subscription_configure(MVMThreadContext *tc, MVMObj
/* Returns absolute executable path. */
MVM_PUBLIC int MVM_exepath(char* buffer, size_t* size);

#if defined(__s390__)
/* Seems that both 32 and 64 bit sparc need this crutch */
#if defined(__s390__) || defined(__sparc__)
AO_t AO_fetch_compare_and_swap_emulation(volatile AO_t *addr, AO_t old_val, AO_t new_val);
# define AO_fetch_compare_and_swap_full(addr, old, newval) \
AO_fetch_compare_and_swap_emulation(addr, old, newval)
Expand Down
4 changes: 2 additions & 2 deletions src/spesh/codegen.c
Expand Up @@ -380,8 +380,8 @@ MVMSpeshCode * MVM_spesh_codegen(MVMThreadContext *tc, MVMSpeshGraph *g) {

/* Fixup labels we were too early for. */
for (i = 0; i < ws->num_fixups; i++)
*((MVMuint32 *)(ws->bytecode + ws->fixup_locations[i])) =
ws->bb_offsets[ws->fixup_bbs[i]->idx];
memcpy((ws->bytecode + ws->fixup_locations[i]),
ws->bb_offsets + ws->fixup_bbs[i]->idx, sizeof(MVMuint32));

/* Ensure all handlers that are reachable got fixed up. */
for (i = 0; i < g->num_handlers; i++) {
Expand Down
21 changes: 18 additions & 3 deletions src/spesh/graph.c
Expand Up @@ -10,9 +10,24 @@
#define GET_UI8(pc, idx) *((MVMuint8 *)((pc) + (idx)))
#define GET_I16(pc, idx) *((MVMint16 *)((pc) + (idx)))
#define GET_UI16(pc, idx) *((MVMuint16 *)((pc) + (idx)))
#define GET_I32(pc, idx) *((MVMint32 *)((pc) + (idx)))
#define GET_UI32(pc, idx) *((MVMuint32 *)((pc) + (idx)))
#define GET_N32(pc, idx) *((MVMnum32 *)((pc) + (idx)))

MVM_STATIC_INLINE MVMint32 GET_I32(const MVMuint8 *pc, MVMint32 idx) {
MVMint32 retval;
memcpy(&retval, pc + idx, sizeof(retval));
return retval;
}

MVM_STATIC_INLINE MVMuint32 GET_UI32(const MVMuint8 *pc, MVMint32 idx) {
MVMuint32 retval;
memcpy(&retval, pc + idx, sizeof(retval));
return retval;
}

MVM_STATIC_INLINE MVMuint32 GET_N32(const MVMuint8 *pc, MVMint32 idx) {
MVMnum32 retval;
memcpy(&retval, pc + idx, sizeof(retval));
return retval;
}

/* Allocate a piece of memory from the spesh graph's region
* allocator. Deallocated when the spesh graph is. */
Expand Down

0 comments on commit 92ef5d7

Please sign in to comment.