diff --git a/lib/ruby_vm/rjit/compiler.rb b/lib/ruby_vm/rjit/compiler.rb index fb8932c45840f3..357fe7d734e446 100644 --- a/lib/ruby_vm/rjit/compiler.rb +++ b/lib/ruby_vm/rjit/compiler.rb @@ -257,7 +257,7 @@ def compile_block(asm, jit:, pc: jit.iseq.body.iseq_encoded.to_i, ctx: Context.n end incr_counter(:compiled_block_count) - set_block(iseq, block) + add_block(iseq, block) end def leave_exit @@ -274,35 +274,52 @@ def incr_counter(name) end def list_blocks(iseq, pc) - rjit_blocks(iseq)[pc].values + rjit_blocks(iseq)[pc] end # @param [Integer] pc # @param [RubyVM::RJIT::Context] ctx # @return [RubyVM::RJIT::Block,NilClass] def find_block(iseq, pc, ctx) - rjit_blocks(iseq)[pc][ctx] + src = ctx + rjit_blocks(iseq)[pc].find do |block| + dst = block.ctx + + # Can only lookup the first version in the chain + if dst.chain_depth != 0 + next false + end + + # Blocks with depth > 0 always produce new versions + # Sidechains cannot overlap + if src.chain_depth != 0 + next false + end + + src.stack_size == dst.stack_size && + src.sp_offset == dst.sp_offset + end end # @param [RubyVM::RJIT::Block] block - def set_block(iseq, block) - rjit_blocks(iseq)[block.pc][block.ctx] = block + def add_block(iseq, block) + rjit_blocks(iseq)[block.pc] << block end # @param [RubyVM::RJIT::Block] block def remove_block(iseq, block) - rjit_blocks(iseq)[block.pc].delete(block.ctx) + rjit_blocks(iseq)[block.pc].delete(block) end def rjit_blocks(iseq) # Guard against ISEQ GC at random moments unless C.imemo_type_p(iseq, C.imemo_iseq) - return Hash.new { |h, k| h[k] = {} } + return Hash.new { |h, k| h[k] = [] } end unless iseq.body.rjit_blocks - iseq.body.rjit_blocks = Hash.new { |h, k| h[k] = {} } + iseq.body.rjit_blocks = Hash.new { |blocks, pc| blocks[pc] = [] } # For some reason, rb_rjit_iseq_mark didn't protect this Hash # from being freed. So we rely on GC_REFS to keep the Hash. GC_REFS << iseq.body.rjit_blocks diff --git a/lib/ruby_vm/rjit/insn_compiler.rb b/lib/ruby_vm/rjit/insn_compiler.rb index 5afcc173fd052f..a9226f9e41cce2 100644 --- a/lib/ruby_vm/rjit/insn_compiler.rb +++ b/lib/ruby_vm/rjit/insn_compiler.rb @@ -1249,7 +1249,7 @@ def jump(jit, ctx, asm) end pc = jit.pc + C.VALUE.size * (jit.insn.len + jump_offset) - stub_next_block(jit.iseq, pc, ctx, asm) + jit_direct_jump(jit.iseq, pc, ctx, asm) EndBlock end @@ -2782,7 +2782,7 @@ def jump_to_next_insn(jit, ctx, asm) jit.record_boundary_patch_point = false end - stub_next_block(jit.iseq, next_pc, reset_depth, asm, comment: 'jump_to_next_insn') + jit_direct_jump(jit.iseq, next_pc, reset_depth, asm, comment: 'jump_to_next_insn') end # rb_vm_check_ints @@ -3246,7 +3246,7 @@ def jit_call_iseq_setup_normal(jit, ctx, asm, cme, flags, argc, iseq, block_hand # Jump to a stub for the callee ISEQ callee_ctx = Context.new pc = (iseq.body.iseq_encoded + opt_pc).to_i - stub_next_block(iseq, pc, callee_ctx, asm) + jit_direct_jump(iseq, pc, callee_ctx, asm) EndBlock end @@ -3898,10 +3898,14 @@ def shape_too_complex?(obj) # @param asm [RubyVM::RJIT::Assembler] def defer_compilation(jit, ctx, asm) # Make a stub to compile the current insn - stub_next_block(jit.iseq, jit.pc, ctx, asm, comment: 'defer_compilation') + if ctx.chain_depth != 0 + raise "double defer!" + end + ctx.chain_depth += 1 + jit_direct_jump(jit.iseq, jit.pc, ctx, asm, comment: 'defer_compilation') end - def stub_next_block(iseq, pc, ctx, asm, comment: 'stub_next_block') + def jit_direct_jump(iseq, pc, ctx, asm, comment: 'jit_direct_jump') branch_stub = BranchStub.new( iseq:, shape: Default,