From a6cec097850700a78378a1f520c863c4044aaa76 Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Wed, 14 Jul 2021 12:42:29 -0700 Subject: [PATCH 1/8] Return if fixnums impossible --- yjit_codegen.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/yjit_codegen.c b/yjit_codegen.c index 2b07eed54f2f9d..87227cfc08a984 100644 --- a/yjit_codegen.c +++ b/yjit_codegen.c @@ -1665,6 +1665,26 @@ guard_two_fixnums(ctx_t* ctx, uint8_t* side_exit) val_type_t arg1_type = ctx_get_opnd_type(ctx, OPND_STACK(0)); val_type_t arg0_type = ctx_get_opnd_type(ctx, OPND_STACK(1)); + if (arg0_type.is_heap || arg1_type.is_heap) { + jmp_ptr(cb, side_exit); + return; + } + + if (arg0_type.type != ETYPE_FIXNUM && arg0_type.type != ETYPE_UNKNOWN) { + jmp_ptr(cb, side_exit); + return; + } + + if (arg1_type.type != ETYPE_FIXNUM && arg1_type.type != ETYPE_UNKNOWN) { + jmp_ptr(cb, side_exit); + return; + } + + RUBY_ASSERT(!arg0_type.is_heap); + RUBY_ASSERT(!arg1_type.is_heap); + RUBY_ASSERT(arg0_type.type == ETYPE_FIXNUM || arg0_type.type == ETYPE_UNKNOWN); + RUBY_ASSERT(arg1_type.type == ETYPE_FIXNUM || arg1_type.type == ETYPE_UNKNOWN); + // Get stack operands without popping them x86opnd_t arg1 = ctx_stack_opnd(ctx, 0); x86opnd_t arg0 = ctx_stack_opnd(ctx, 1); From c3d4fd93006a0d315a72b2cd39aeefdfb883fe1e Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Wed, 14 Jul 2021 11:52:00 -0700 Subject: [PATCH 2/8] Rename to ctx_upgrade_opnd_type --- yjit_codegen.c | 22 +++++++++++----------- yjit_core.c | 18 +++++++++++++----- yjit_core.h | 2 +- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/yjit_codegen.c b/yjit_codegen.c index 87227cfc08a984..ab6bdfd7d6e9cc 100644 --- a/yjit_codegen.c +++ b/yjit_codegen.c @@ -1235,7 +1235,7 @@ gen_set_ivar(jitstate_t *jit, ctx_t *ctx, const int max_chain_depth, VALUE compt ADD_COMMENT(cb, "guard value is immediate"); test(cb, REG1, imm_opnd(RUBY_IMMEDIATE_MASK)); jz_ptr(cb, COUNTED_EXIT(side_exit, setivar_val_heapobject)); - ctx_set_opnd_type(ctx, OPND_STACK(0), TYPE_IMM); + ctx_upgrade_opnd_type(ctx, OPND_STACK(0), TYPE_IMM); } // Pop the value to write @@ -1702,8 +1702,8 @@ guard_two_fixnums(ctx_t* ctx, uint8_t* side_exit) } // Set stack types in context - ctx_set_opnd_type(ctx, OPND_STACK(0), TYPE_FIXNUM); - ctx_set_opnd_type(ctx, OPND_STACK(1), TYPE_FIXNUM); + ctx_upgrade_opnd_type(ctx, OPND_STACK(0), TYPE_FIXNUM); + ctx_upgrade_opnd_type(ctx, OPND_STACK(1), TYPE_FIXNUM); } // Conditional move operation used by comparison operators @@ -2439,7 +2439,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ cmp(cb, REG0, imm_opnd(Qnil)); jit_chain_guard(JCC_JNE, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_NIL); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_NIL); } } else if (known_klass == rb_cTrueClass) { @@ -2451,7 +2451,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ cmp(cb, REG0, imm_opnd(Qtrue)); jit_chain_guard(JCC_JNE, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_TRUE); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_TRUE); } } else if (known_klass == rb_cFalseClass) { @@ -2464,7 +2464,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ test(cb, REG0, REG0); jit_chain_guard(JCC_JNZ, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_FALSE); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_FALSE); } } else if (known_klass == rb_cInteger && FIXNUM_P(sample_instance)) { @@ -2477,7 +2477,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ ADD_COMMENT(cb, "guard object is fixnum"); test(cb, REG0, imm_opnd(RUBY_FIXNUM_FLAG)); jit_chain_guard(JCC_JZ, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_FIXNUM); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_FIXNUM); } } else if (known_klass == rb_cSymbol && STATIC_SYM_P(sample_instance)) { @@ -2491,7 +2491,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ STATIC_ASSERT(special_shift_is_8, RUBY_SPECIAL_SHIFT == 8); cmp(cb, REG0_8, imm_opnd(RUBY_SYMBOL_FLAG)); jit_chain_guard(JCC_JNE, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_STATIC_SYMBOL); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_STATIC_SYMBOL); } } else if (known_klass == rb_cFloat && FLONUM_P(sample_instance)) { @@ -2505,7 +2505,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ and(cb, REG1, imm_opnd(RUBY_FLONUM_MASK)); cmp(cb, REG1, imm_opnd(RUBY_FLONUM_FLAG)); jit_chain_guard(JCC_JNE, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_FLONUM); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_FLONUM); } } else if (FL_TEST(known_klass, FL_SINGLETON) && sample_instance == rb_attr_get(known_klass, id__attached__)) { @@ -2538,7 +2538,7 @@ jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_ cmp(cb, REG0, imm_opnd(Qnil)); jit_chain_guard(JCC_JBE, jit, ctx, max_chain_depth, side_exit); - ctx_set_opnd_type(ctx, insn_opnd, TYPE_HEAP); + ctx_upgrade_opnd_type(ctx, insn_opnd, TYPE_HEAP); } x86opnd_t klass_opnd = mem_opnd(64, REG0, offsetof(struct RBasic, klass)); @@ -3083,7 +3083,7 @@ gen_send_iseq(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const r ctx_set_local_type(&callee_ctx, arg_idx, arg_type); } val_type_t recv_type = ctx_get_opnd_type(ctx, OPND_STACK(argc)); - ctx_set_opnd_type(&callee_ctx, OPND_SELF, recv_type); + ctx_upgrade_opnd_type(&callee_ctx, OPND_SELF, recv_type); // The callee might change locals through Kernel#binding and other means. ctx_clear_local_types(ctx); diff --git a/yjit_core.c b/yjit_core.c index b3b8964f99ac12..e6010d71236578 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -154,13 +154,20 @@ ctx_get_opnd_type(const ctx_t* ctx, insn_opnd_t opnd) rb_bug("unreachable"); } +int type_diff(val_type_t src, val_type_t dst); +#define UPGRADE_TYPE(dest, src) do { \ + RUBY_ASSERT(type_diff((src), (dest)) != INT_MAX); \ + (dest) = (src); \ +} while (false) + + /** Set the type of an instruction operand */ -void ctx_set_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) +void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) { if (opnd.is_self) { - ctx->self_type = type; + UPGRADE_TYPE(ctx->self_type, type); return; } @@ -173,16 +180,17 @@ void ctx_set_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) switch (mapping.kind) { case TEMP_SELF: - ctx->self_type = type; + UPGRADE_TYPE(ctx->self_type, type); break; case TEMP_STACK: - ctx->temp_types[ctx->stack_size - 1 - opnd.idx] = type; + int stack_index = ctx->stack_size - 1 - opnd.idx; + UPGRADE_TYPE(ctx->temp_types[stack_index], type); break; case TEMP_LOCAL: RUBY_ASSERT(mapping.idx < MAX_LOCAL_TYPES); - ctx->local_types[mapping.idx] = type; + UPGRADE_TYPE(ctx->local_types[mapping.idx], type); break; } } diff --git a/yjit_core.h b/yjit_core.h index 5f5383ace9b02a..4605923f50e89b 100644 --- a/yjit_core.h +++ b/yjit_core.h @@ -259,7 +259,7 @@ x86opnd_t ctx_stack_push_local(ctx_t* ctx, size_t local_idx); x86opnd_t ctx_stack_pop(ctx_t* ctx, size_t n); x86opnd_t ctx_stack_opnd(ctx_t* ctx, int32_t idx); val_type_t ctx_get_opnd_type(const ctx_t* ctx, insn_opnd_t opnd); -void ctx_set_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type); +void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type); void ctx_set_local_type(ctx_t* ctx, size_t idx, val_type_t type); void ctx_clear_local_types(ctx_t* ctx); int ctx_diff(const ctx_t* src, const ctx_t* dst); From 1c8fad6d5f3053ee5a73292b9bc27799c50d8ae7 Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Wed, 14 Jul 2021 11:36:33 -0700 Subject: [PATCH 3/8] Introduce ctx_{get,set}_opnd_mapping --- yjit_codegen.c | 38 ++++++++--------- yjit_core.c | 108 +++++++++++++++++++++++++++++++++++-------------- yjit_core.h | 11 +++++ 3 files changed, 106 insertions(+), 51 deletions(-) diff --git a/yjit_codegen.c b/yjit_codegen.c index ab6bdfd7d6e9cc..828e6af418bdb3 100644 --- a/yjit_codegen.c +++ b/yjit_codegen.c @@ -551,11 +551,11 @@ static codegen_status_t gen_dup(jitstate_t* jit, ctx_t* ctx) { // Get the top value and its type - val_type_t dup_type = ctx_get_opnd_type(ctx, OPND_STACK(0)); x86opnd_t dup_val = ctx_stack_pop(ctx, 0); + temp_type_mapping_t mapping = ctx_get_opnd_mapping(ctx, OPND_STACK(0)); // Push the same value on top - x86opnd_t loc0 = ctx_stack_push(ctx, dup_type); + x86opnd_t loc0 = ctx_stack_push_mapping(ctx, mapping); mov(cb, REG0, dup_val); mov(cb, loc0, REG0); @@ -573,17 +573,16 @@ gen_dupn(jitstate_t* jit, ctx_t* ctx) return YJIT_CANT_COMPILE; } - val_type_t type1 = ctx_get_opnd_type(ctx, OPND_STACK(1)); x86opnd_t opnd1 = ctx_stack_opnd(ctx, 1); - - val_type_t type0 = ctx_get_opnd_type(ctx, OPND_STACK(0)); x86opnd_t opnd0 = ctx_stack_opnd(ctx, 0); + temp_type_mapping_t mapping1 = ctx_get_opnd_mapping(ctx, OPND_STACK(1)); + temp_type_mapping_t mapping0 = ctx_get_opnd_mapping(ctx, OPND_STACK(0)); - x86opnd_t dst1 = ctx_stack_push(ctx, type1); + x86opnd_t dst1 = ctx_stack_push_mapping(ctx, mapping1); mov(cb, REG0, opnd1); mov(cb, dst1, REG0); - x86opnd_t dst0 = ctx_stack_push(ctx, type0); + x86opnd_t dst0 = ctx_stack_push_mapping(ctx, mapping0); mov(cb, REG0, opnd0); mov(cb, dst0, REG0); @@ -594,21 +593,19 @@ gen_dupn(jitstate_t* jit, ctx_t* ctx) static codegen_status_t gen_swap(jitstate_t* jit, ctx_t* ctx) { - val_type_t type0 = ctx_get_opnd_type(ctx, OPND_STACK(0)); x86opnd_t opnd0 = ctx_stack_opnd(ctx, 0); - - val_type_t type1 = ctx_get_opnd_type(ctx, OPND_STACK(1)); x86opnd_t opnd1 = ctx_stack_opnd(ctx, 1); + temp_type_mapping_t mapping0 = ctx_get_opnd_mapping(ctx, OPND_STACK(0)); + temp_type_mapping_t mapping1 = ctx_get_opnd_mapping(ctx, OPND_STACK(1)); mov(cb, REG0, opnd0); mov(cb, REG1, opnd1); - - ctx_set_opnd_type(ctx, OPND_STACK(0), type1); - ctx_set_opnd_type(ctx, OPND_STACK(1), type0); - mov(cb, opnd0, REG1); mov(cb, opnd1, REG0); + ctx_set_opnd_mapping(ctx, OPND_STACK(0), mapping1); + ctx_set_opnd_mapping(ctx, OPND_STACK(1), mapping0); + return YJIT_KEEP_COMPILING; } @@ -618,16 +615,15 @@ gen_setn(jitstate_t* jit, ctx_t* ctx) { rb_num_t n = (rb_num_t)jit_get_arg(jit, 0); - // Get the top value and its type - val_type_t top_type = ctx_get_opnd_type(ctx, OPND_STACK(0)); + // Set the destination x86opnd_t top_val = ctx_stack_pop(ctx, 0); - - // Set the destination and its type - ctx_set_opnd_type(ctx, OPND_STACK(n), top_type); x86opnd_t dst_opnd = ctx_stack_opnd(ctx, (int32_t)n); mov(cb, REG0, top_val); mov(cb, dst_opnd, REG0); + temp_type_mapping_t mapping = ctx_get_opnd_mapping(ctx, OPND_STACK(0)); + ctx_set_opnd_mapping(ctx, OPND_STACK(n), mapping); + return YJIT_KEEP_COMPILING; } @@ -638,10 +634,10 @@ gen_topn(jitstate_t* jit, ctx_t* ctx) int32_t n = (int32_t)jit_get_arg(jit, 0); // Get top n type / operand - val_type_t top_n_type = ctx_get_opnd_type(ctx, OPND_STACK(n)); x86opnd_t top_n_val = ctx_stack_opnd(ctx, n); + temp_type_mapping_t mapping = ctx_get_opnd_mapping(ctx, OPND_STACK(n)); - x86opnd_t loc0 = ctx_stack_push(ctx, top_n_type); + x86opnd_t loc0 = ctx_stack_push_mapping(ctx, mapping); mov(cb, REG0, top_n_val); mov(cb, loc0, REG0); diff --git a/yjit_core.c b/yjit_core.c index e6010d71236578..b241f438c9d5a4 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -22,16 +22,18 @@ ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes) } /* -Push one new value on the temp stack +Push one new value on the temp stack with an explicit mapping Return a pointer to the new stack top */ x86opnd_t -ctx_stack_push(ctx_t* ctx, val_type_t type) +ctx_stack_push_mapping(ctx_t* ctx, temp_type_mapping_t mapping) { - // Keep track of the type of the value + // Keep track of the type and mapping of the value if (ctx->stack_size < MAX_TEMP_TYPES) { - ctx->temp_mapping[ctx->stack_size] = MAP_STACK; - ctx->temp_types[ctx->stack_size] = type; + ctx->temp_mapping[ctx->stack_size] = mapping.mapping; + ctx->temp_types[ctx->stack_size] = mapping.type; + + RUBY_ASSERT(mapping.mapping.kind != TEMP_LOCAL || mapping.mapping.idx < MAX_LOCAL_TYPES); } ctx->stack_size += 1; @@ -42,24 +44,26 @@ ctx_stack_push(ctx_t* ctx, val_type_t type) return mem_opnd(64, REG_SP, offset); } + +/* +Push one new value on the temp stack +Return a pointer to the new stack top +*/ +x86opnd_t +ctx_stack_push(ctx_t* ctx, val_type_t type) +{ + temp_type_mapping_t mapping = { MAP_STACK, type }; + return ctx_stack_push_mapping(ctx, mapping); +} + /* Push the self value on the stack */ x86opnd_t ctx_stack_push_self(ctx_t* ctx) { - // Keep track of the type of the value - if (ctx->stack_size < MAX_TEMP_TYPES) { - ctx->temp_mapping[ctx->stack_size] = MAP_SELF; - ctx->temp_types[ctx->stack_size] = ctx->self_type; - } - - ctx->stack_size += 1; - ctx->sp_offset += 1; - - // SP points just above the topmost value - int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE); - return mem_opnd(64, REG_SP, offset); + temp_type_mapping_t mapping = { MAP_SELF, TYPE_UNKNOWN }; + return ctx_stack_push_mapping(ctx, mapping); } /* @@ -68,17 +72,15 @@ Push a local variable on the stack x86opnd_t ctx_stack_push_local(ctx_t* ctx, size_t local_idx) { - // Keep track of the type of the value - if (ctx->stack_size < MAX_TEMP_TYPES && local_idx < MAX_LOCAL_TYPES) { - ctx->temp_mapping[ctx->stack_size] = (temp_mapping_t){ .kind = TEMP_LOCAL, .idx = local_idx }; + if (local_idx >= MAX_LOCAL_TYPES) { + return ctx_stack_push(ctx, TYPE_UNKNOWN); } - ctx->stack_size += 1; - ctx->sp_offset += 1; - - // SP points just above the topmost value - int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE); - return mem_opnd(64, REG_SP, offset); + temp_type_mapping_t mapping = { + (temp_mapping_t){ .kind = TEMP_LOCAL, .idx = local_idx }, + TYPE_UNKNOWN + }; + return ctx_stack_push_mapping(ctx, mapping); } /* @@ -132,7 +134,7 @@ ctx_get_opnd_type(const ctx_t* ctx, insn_opnd_t opnd) if (opnd.is_self) return ctx->self_type; - if (ctx->stack_size > MAX_TEMP_TYPES) + if (ctx->stack_size >= MAX_TEMP_TYPES) return TYPE_UNKNOWN; RUBY_ASSERT(opnd.idx < ctx->stack_size); @@ -171,11 +173,13 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) return; } - if (ctx->stack_size > MAX_TEMP_TYPES) + if (ctx->stack_size >= MAX_TEMP_TYPES) return; RUBY_ASSERT(opnd.idx < ctx->stack_size); - temp_mapping_t mapping = ctx->temp_mapping[ctx->stack_size - 1 - opnd.idx]; + int stack_index = ctx->stack_size - 1 - opnd.idx; + RUBY_ASSERT(stack_index < MAX_TEMP_TYPES); + temp_mapping_t mapping = ctx->temp_mapping[stack_index]; switch (mapping.kind) { @@ -184,7 +188,6 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) break; case TEMP_STACK: - int stack_index = ctx->stack_size - 1 - opnd.idx; UPGRADE_TYPE(ctx->temp_types[stack_index], type); break; @@ -195,6 +198,51 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) } } +temp_type_mapping_t +ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd) +{ + temp_type_mapping_t type_mapping; + type_mapping.type = ctx_get_opnd_type(ctx, opnd); + + if (opnd.is_self) { + type_mapping.mapping = MAP_SELF; + return type_mapping; + } + + RUBY_ASSERT(opnd.idx < ctx->stack_size); + int stack_idx = ctx->stack_size - 1 - opnd.idx; + + if (stack_idx < MAX_TEMP_TYPES) { + type_mapping.mapping = ctx->temp_mapping[stack_idx]; + } else { + // We can't know the source of this stack operand, so we assume it is + // a stack-only temporary. type will be UNKNOWN + RUBY_ASSERT(type_mapping.type.type == ETYPE_UNKNOWN); + type_mapping.mapping = MAP_STACK; + } + + return type_mapping; +} + +void +ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping) +{ + // self is always MAP_SELF + RUBY_ASSERT(!opnd.is_self); + + RUBY_ASSERT(opnd.idx < ctx->stack_size); + int stack_idx = ctx->stack_size - 1 - opnd.idx; + + // If outside of tracked range, do nothing + if (stack_idx >= MAX_TEMP_TYPES) + return; + + ctx->temp_mapping[stack_idx] = type_mapping.mapping; + + // Only used when mapping == MAP_STACK + ctx->temp_types[stack_idx] = type_mapping.type; +} + /** Set the type of a local variable */ diff --git a/yjit_core.h b/yjit_core.h index 4605923f50e89b..031327a1ffdb66 100644 --- a/yjit_core.h +++ b/yjit_core.h @@ -105,6 +105,13 @@ STATIC_ASSERT(temp_mapping_size, sizeof(temp_mapping_t) == 1); // Temp value is actually self #define MAP_SELF ( (temp_mapping_t) { .kind = TEMP_SELF } ) +// Represents both the type and mapping +typedef struct { + temp_mapping_t mapping; + val_type_t type; +} temp_type_mapping_t; +STATIC_ASSERT(temp_type_mapping_size, sizeof(temp_type_mapping_t) == 2); + // Operand to a bytecode instruction typedef struct yjit_insn_opnd { @@ -253,6 +260,7 @@ typedef struct yjit_block_version // Context object methods x86opnd_t ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes); +x86opnd_t ctx_stack_push_mapping(ctx_t* ctx, temp_type_mapping_t mapping); x86opnd_t ctx_stack_push(ctx_t* ctx, val_type_t type); x86opnd_t ctx_stack_push_self(ctx_t* ctx); x86opnd_t ctx_stack_push_local(ctx_t* ctx, size_t local_idx); @@ -264,6 +272,9 @@ void ctx_set_local_type(ctx_t* ctx, size_t idx, val_type_t type); void ctx_clear_local_types(ctx_t* ctx); int ctx_diff(const ctx_t* src, const ctx_t* dst); +temp_type_mapping_t ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd); +void ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping); + block_t* find_block_version(blockid_t blockid, const ctx_t* ctx); block_t* gen_block_version(blockid_t blockid, const ctx_t* ctx, rb_execution_context_t *ec); uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec); From d69868f9c6fdbe63206e58ed14c2caa227323a70 Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Tue, 27 Jul 2021 23:35:01 -0700 Subject: [PATCH 4/8] Make ctx_diff aware of mappings --- yjit_core.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/yjit_core.c b/yjit_core.c index b241f438c9d5a4..a4fe04d4aa41a5 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -356,9 +356,22 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst) // For each value on the temp stack for (size_t i = 0; i < src->stack_size; ++i) { - val_type_t t_src = ctx_get_opnd_type(src, OPND_STACK(i)); - val_type_t t_dst = ctx_get_opnd_type(dst, OPND_STACK(i)); - int temp_diff = type_diff(t_src, t_dst); + temp_type_mapping_t m_src = ctx_get_opnd_mapping(src, OPND_STACK(i)); + temp_type_mapping_t m_dst = ctx_get_opnd_mapping(dst, OPND_STACK(i)); + + if (m_dst.mapping.kind != m_src.mapping.kind) { + if (m_dst.mapping.kind == TEMP_STACK) { + // We can safely drop information about the source of the temp + // stack operand. + diff += 1; + } else { + return INT_MAX; + } + } else if (m_dst.mapping.idx != m_src.mapping.idx) { + return INT_MAX; + } + + int temp_diff = type_diff(m_src.type, m_dst.type); if (temp_diff == INT_MAX) return INT_MAX; From fa4d3a091466b5cdd8c8672486778f0fe59558cc Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Tue, 27 Jul 2021 23:38:24 -0700 Subject: [PATCH 5/8] Fix ctx_clear_local_types --- yjit_core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yjit_core.c b/yjit_core.c index a4fe04d4aa41a5..6df84608ff4ad3 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -260,13 +260,14 @@ void ctx_clear_local_types(ctx_t* ctx) { // When clearing local types we must detach any stack mappings to those // locals. Even if local values may have changed, stack values will not. - for (int i = 0; i < ctx->stack_size && i < MAX_LOCAL_TYPES; i++) { + for (int i = 0; i < MAX_TEMP_TYPES; i++) { temp_mapping_t *mapping = &ctx->temp_mapping[i]; if (mapping->kind == TEMP_LOCAL) { RUBY_ASSERT(mapping->idx < MAX_LOCAL_TYPES); ctx->temp_types[i] = ctx->local_types[mapping->idx]; *mapping = MAP_STACK; } + RUBY_ASSERT(mapping->kind == TEMP_STACK || mapping->kind == TEMP_SELF); } memset(&ctx->local_types, 0, sizeof(ctx->local_types)); } From 508dee20a7560be2fd6cf90a8a1db50a4e6ba74a Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Wed, 28 Jul 2021 20:05:54 -0700 Subject: [PATCH 6/8] Add regression test of invalid mapping merge This should have referenced MAX_TEMP_TYPES, not MAX_LOCAL_TYPES. --- test/ruby/test_yjit.rb | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/test/ruby/test_yjit.rb b/test/ruby/test_yjit.rb index 22b78a4e2d214a..145305ad09c740 100644 --- a/test/ruby/test_yjit.rb +++ b/test/ruby/test_yjit.rb @@ -141,12 +141,24 @@ def fib(n) RUBY end + def test_ctx_different_mappings + # regression test simplified from URI::Generic#hostname= + assert_compiles(<<~'RUBY', frozen_string_literal: true) + def foo(v) + !(v&.start_with?('[')) && v&.index(':') + end + + foo(nil) + foo("example.com") + RUBY + end + def assert_no_exits(script) assert_compiles(script) end ANY = Object.new - def assert_compiles(test_script, insns: [], min_calls: 1, stdout: nil, exits: {}, result: ANY) + def assert_compiles(test_script, insns: [], min_calls: 1, stdout: nil, exits: {}, result: ANY, frozen_string_literal: nil) reset_stats = <<~RUBY YJIT.runtime_stats YJIT.reset_stats! @@ -183,6 +195,7 @@ def collect_iseqs(iseq) RUBY script = <<~RUBY + #{"# frozen_string_literal: true" if frozen_string_literal} _test_proc = proc { #{test_script} } From 66ca75936e93a71a5c6269ab67a65990ef8abba9 Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Thu, 29 Jul 2021 16:40:07 -0700 Subject: [PATCH 7/8] Improve comments for mapping functions --- yjit_core.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/yjit_core.c b/yjit_core.c index 6df84608ff4ad3..b9a2e67d2b8739 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -164,7 +164,10 @@ int type_diff(val_type_t src, val_type_t dst); /** -Set the type of an instruction operand +Upgrade (or "learn") the type of an instruction operand +This value must be compatible and at least as specific as the previously known type. +If this value originated from self, or an lvar, the learned type will be +propagated back to its source. */ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) { @@ -198,6 +201,11 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) } } +/* +Get both the type and mapping (where the value originates) of an operand. +This is can be used with ctx_stack_push_mapping or ctx_set_opnd_mapping to copy +a stack value's type while maintaining the mapping. +*/ temp_type_mapping_t ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd) { @@ -224,6 +232,9 @@ ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd) return type_mapping; } +/* +Overwrite both the type and mapping of a stack operand. +*/ void ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping) { From 9cd04b0ffa7b79f181700d02cefa3b02b7173ebc Mon Sep 17 00:00:00 2001 From: John Hawthorn Date: Sun, 1 Aug 2021 13:05:51 -0700 Subject: [PATCH 8/8] Allow upgrading first N types when stack is large --- yjit_core.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/yjit_core.c b/yjit_core.c index b9a2e67d2b8739..ff6c220562c08f 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -176,13 +176,14 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) return; } - if (ctx->stack_size >= MAX_TEMP_TYPES) + RUBY_ASSERT(opnd.idx < ctx->stack_size); + int stack_idx = ctx->stack_size - 1 - opnd.idx; + + // If outside of tracked range, do nothing + if (stack_idx >= MAX_TEMP_TYPES) return; - RUBY_ASSERT(opnd.idx < ctx->stack_size); - int stack_index = ctx->stack_size - 1 - opnd.idx; - RUBY_ASSERT(stack_index < MAX_TEMP_TYPES); - temp_mapping_t mapping = ctx->temp_mapping[stack_index]; + temp_mapping_t mapping = ctx->temp_mapping[stack_idx]; switch (mapping.kind) { @@ -191,7 +192,7 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type) break; case TEMP_STACK: - UPGRADE_TYPE(ctx->temp_types[stack_index], type); + UPGRADE_TYPE(ctx->temp_types[stack_idx], type); break; case TEMP_LOCAL: