diff --git a/ext/objspace/objspace_dump.c b/ext/objspace/objspace_dump.c index 80732d0282d384..f90ad89b5a6ed7 100644 --- a/ext/objspace/objspace_dump.c +++ b/ext/objspace/objspace_dump.c @@ -801,7 +801,7 @@ shape_id_i(shape_id_t shape_id, void *data) if (RSHAPE_TYPE(shape_id) != SHAPE_ROOT) { dump_append(dc, ", \"parent_id\":"); - dump_append_lu(dc, RSHAPE_PARENT(shape_id)); + dump_append_lu(dc, RSHAPE_PARENT_RAW_ID(shape_id)); } dump_append(dc, ", \"depth\":"); diff --git a/jit.c b/jit.c index 74a042d45d5b39..e68758368a45fb 100644 --- a/jit.c +++ b/jit.c @@ -442,3 +442,15 @@ rb_yarv_ary_entry_internal(VALUE ary, long offset) { return rb_ary_entry_internal(ary, offset); } + +void +rb_set_cfp_pc(struct rb_control_frame_struct *cfp, const VALUE *pc) +{ + cfp->pc = pc; +} + +void +rb_set_cfp_sp(struct rb_control_frame_struct *cfp, VALUE *sp) +{ + cfp->sp = sp; +} diff --git a/shape.h b/shape.h index a418dc78218693..2d13c9b762b615 100644 --- a/shape.h +++ b/shape.h @@ -271,11 +271,18 @@ rb_shape_root(size_t heap_id) } static inline shape_id_t -RSHAPE_PARENT(shape_id_t shape_id) +RSHAPE_PARENT_RAW_ID(shape_id_t shape_id) { return RSHAPE(shape_id)->parent_id; } +static inline bool +RSHAPE_DIRECT_CHILD_P(shape_id_t parent_id, shape_id_t child_id) +{ + return (parent_id & SHAPE_ID_FLAGS_MASK) == (child_id & SHAPE_ID_FLAGS_MASK) && + RSHAPE(child_id)->parent_id == (parent_id & SHAPE_ID_OFFSET_MASK); +} + static inline enum shape_type RSHAPE_TYPE(shape_id_t shape_id) { diff --git a/variable.c b/variable.c index 2bd9b3de4921ca..4f924813fa6286 100644 --- a/variable.c +++ b/variable.c @@ -1774,7 +1774,9 @@ general_ivar_set(VALUE obj, ID id, VALUE val, void *data, shape_resize_fields_func(obj, RSHAPE_CAPACITY(current_shape_id), RSHAPE_CAPACITY(next_shape_id), data); } - RUBY_ASSERT(RSHAPE_TYPE_P(next_shape_id, SHAPE_IVAR)); + RUBY_ASSERT(RSHAPE_TYPE_P(next_shape_id, SHAPE_IVAR), + "next_shape_id: 0x%" PRIx32 " RSHAPE_TYPE(next_shape_id): %d", + next_shape_id, (int)RSHAPE_TYPE(next_shape_id)); RUBY_ASSERT(index == (RSHAPE_INDEX(next_shape_id))); set_shape_id_func(obj, next_shape_id, data); } diff --git a/vm_insnhelper.c b/vm_insnhelper.c index d5eb84e691d4b5..0fef0e0976981e 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1473,7 +1473,7 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_i RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID); } else if (dest_shape_id != INVALID_SHAPE_ID) { - if (shape_id == RSHAPE_PARENT(dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { + if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id)); } else { @@ -1514,14 +1514,11 @@ vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t i VM_ASSERT(!rb_ractor_shareable_p(obj)); } else if (dest_shape_id != INVALID_SHAPE_ID) { - shape_id_t source_shape_id = RSHAPE_PARENT(dest_shape_id); - - if (shape_id == source_shape_id && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { + if (RSHAPE_DIRECT_CHILD_P(shape_id, dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID); RBASIC_SET_SHAPE_ID(obj, dest_shape_id); - RUBY_ASSERT(rb_shape_get_next_iv_shape(source_shape_id, id) == dest_shape_id); RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id)); } else { diff --git a/yjit.c b/yjit.c index 46f89e2020c3f1..f83a330bd6f927 100644 --- a/yjit.c +++ b/yjit.c @@ -499,18 +499,6 @@ rb_yjit_str_simple_append(VALUE str1, VALUE str2) return rb_str_cat(str1, RSTRING_PTR(str2), RSTRING_LEN(str2)); } -void -rb_set_cfp_pc(struct rb_control_frame_struct *cfp, const VALUE *pc) -{ - cfp->pc = pc; -} - -void -rb_set_cfp_sp(struct rb_control_frame_struct *cfp, VALUE *sp) -{ - cfp->sp = sp; -} - extern VALUE *rb_vm_base_ptr(struct rb_control_frame_struct *cfp); // YJIT needs this function to never allocate and never raise diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index eeabbf594df9bb..c8a58f424e385f 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -1202,8 +1202,6 @@ extern "C" { pub fn rb_yjit_iseq_builtin_attrs(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint; pub fn rb_yjit_builtin_function(iseq: *const rb_iseq_t) -> *const rb_builtin_function; pub fn rb_yjit_str_simple_append(str1: VALUE, str2: VALUE) -> VALUE; - pub fn rb_set_cfp_pc(cfp: *mut rb_control_frame_struct, pc: *const VALUE); - pub fn rb_set_cfp_sp(cfp: *mut rb_control_frame_struct, sp: *mut VALUE); pub fn rb_vm_base_ptr(cfp: *mut rb_control_frame_struct) -> *mut VALUE; pub fn rb_yarv_str_eql_internal(str1: VALUE, str2: VALUE) -> VALUE; pub fn rb_str_neq_internal(str1: VALUE, str2: VALUE) -> VALUE; @@ -1330,4 +1328,6 @@ extern "C" { pub fn rb_IMEMO_TYPE_P(imemo: VALUE, imemo_type: imemo_type) -> ::std::os::raw::c_int; pub fn rb_assert_cme_handle(handle: VALUE); pub fn rb_yarv_ary_entry_internal(ary: VALUE, offset: ::std::os::raw::c_long) -> VALUE; + pub fn rb_set_cfp_pc(cfp: *mut rb_control_frame_struct, pc: *const VALUE); + pub fn rb_set_cfp_sp(cfp: *mut rb_control_frame_struct, sp: *mut VALUE); } diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs index 607bc560d2f7d5..e7ebc1414af86e 100644 --- a/zjit/src/codegen.rs +++ b/zjit/src/codegen.rs @@ -1,6 +1,6 @@ use std::cell::Cell; use std::rc::Rc; -use std::ffi::{c_int}; +use std::ffi::{c_int, c_void}; use crate::asm::Label; use crate::backend::current::{Reg, ALLOC_REGS}; @@ -105,53 +105,33 @@ fn gen_iseq_entry_point(iseq: IseqPtr) -> *const u8 { let code_ptr = gen_iseq_entry_point_body(cb, iseq); // Always mark the code region executable if asm.compile() has been used. - // We need to do this even if code_ptr is null because, whether gen_entry() - // or gen_iseq() fails or not, gen_function() has already used asm.compile(). + // We need to do this even if code_ptr is null because, whether gen_entry() or + // gen_function_stub() fails or not, gen_function() has already used asm.compile(). cb.mark_all_executable(); - code_ptr + code_ptr.map_or(std::ptr::null(), |ptr| ptr.raw_ptr(cb)) } /// Compile an entry point for a given ISEQ -fn gen_iseq_entry_point_body(cb: &mut CodeBlock, iseq: IseqPtr) -> *const u8 { +fn gen_iseq_entry_point_body(cb: &mut CodeBlock, iseq: IseqPtr) -> Option { // Compile ISEQ into High-level IR - let function = match compile_iseq(iseq) { - Some(function) => function, - None => return std::ptr::null(), - }; + let function = compile_iseq(iseq)?; // Compile the High-level IR let Some((start_ptr, gc_offsets, jit)) = gen_function(cb, iseq, &function) else { debug!("Failed to compile iseq: gen_function failed: {}", iseq_get_location(iseq, 0)); - return std::ptr::null(); + return None; }; // Compile an entry point to the JIT code let Some(entry_ptr) = gen_entry(cb, iseq, &function, start_ptr) else { debug!("Failed to compile iseq: gen_entry failed: {}", iseq_get_location(iseq, 0)); - return std::ptr::null(); + return None; }; - let mut branch_iseqs = jit.branch_iseqs; - - // Recursively compile callee ISEQs - let caller_iseq = iseq; - while let Some((branch, iseq)) = branch_iseqs.pop() { - // Disable profiling. This will be the last use of the profiling information for the ISEQ. - unsafe { rb_zjit_profile_disable(iseq); } - - // Compile the ISEQ - let Some((callee_ptr, callee_branch_iseqs)) = gen_iseq(cb, iseq) else { - // Failed to compile the callee. Bail out of compiling this graph of ISEQs. - debug!("Failed to compile iseq: could not compile callee: {} -> {}", - iseq_get_location(caller_iseq, 0), iseq_get_location(iseq, 0)); - return std::ptr::null(); - }; - let callee_addr = callee_ptr.raw_ptr(cb); - branch.regenerate(cb, |asm| { - asm.ccall(callee_addr, vec![]); - }); - branch_iseqs.extend(callee_branch_iseqs); + // Stub callee ISEQs for JIT-to-JIT calls + for (branch, callee_iseq) in jit.branch_iseqs.into_iter() { + gen_iseq_branch(cb, callee_iseq, iseq, branch)?; } // Remember the block address to reuse it later @@ -160,7 +140,27 @@ fn gen_iseq_entry_point_body(cb: &mut CodeBlock, iseq: IseqPtr) -> *const u8 { append_gc_offsets(iseq, &gc_offsets); // Return a JIT code address - entry_ptr.raw_ptr(cb) + Some(entry_ptr) +} + +/// Stub a branch for a JIT-to-JIT call +fn gen_iseq_branch(cb: &mut CodeBlock, iseq: IseqPtr, caller_iseq: IseqPtr, branch: Rc) -> Option<()> { + // Compile a function stub + let Some((stub_ptr, gc_offsets)) = gen_function_stub(cb, iseq, branch.clone()) else { + // Failed to compile the stub. Bail out of compiling the caller ISEQ. + debug!("Failed to compile iseq: could not compile stub: {} -> {}", + iseq_get_location(caller_iseq, 0), iseq_get_location(iseq, 0)); + return None; + }; + append_gc_offsets(iseq, &gc_offsets); + + // Update the JIT-to-JIT call to call the stub + let stub_addr = stub_ptr.raw_ptr(cb); + branch.regenerate(cb, |asm| { + asm_comment!(asm, "call function stub: {}", iseq_get_location(iseq, 0)); + asm.ccall(stub_addr, vec![]); + }); + Some(()) } /// Write an entry to the perf map in /tmp @@ -244,7 +244,11 @@ fn gen_function(cb: &mut CodeBlock, iseq: IseqPtr, function: &Function) -> Optio let reverse_post_order = function.rpo(); for &block_id in reverse_post_order.iter() { let block = function.block(block_id); - asm_comment!(asm, "Block: {block_id}({})", block.params().map(|param| format!("{param}")).collect::>().join(", ")); + asm_comment!( + asm, "{block_id}({}): {}", + block.params().map(|param| format!("{param}")).collect::>().join(", "), + iseq_get_location(iseq, block.insn_idx), + ); // Write a label to jump to the basic block let label = jit.get_label(&mut asm, block_id); @@ -1263,6 +1267,127 @@ fn max_num_params(function: &Function) -> usize { }).max().unwrap_or(0) } +#[cfg(target_arch = "x86_64")] +macro_rules! c_callable { + ($(#[$outer:meta])* + fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => { + $(#[$outer])* + extern "sysv64" fn $f $args $(-> $ret)? $body + }; +} +#[cfg(target_arch = "aarch64")] +macro_rules! c_callable { + ($(#[$outer:meta])* + fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => { + $(#[$outer])* + extern "C" fn $f $args $(-> $ret)? $body + }; +} +pub(crate) use c_callable; + +c_callable! { + /// Generated code calls this function with the SysV calling convention. + /// See [gen_function_stub]. + fn function_stub_hit(iseq: IseqPtr, branch_ptr: *const c_void, ec: EcPtr, sp: *mut VALUE) -> *const u8 { + with_vm_lock(src_loc!(), || { + // Get a pointer to compiled code or the side-exit trampoline + let cb = ZJITState::get_code_block(); + let code_ptr = if let Some(code_ptr) = function_stub_hit_body(cb, iseq, branch_ptr) { + code_ptr + } else { + // gen_push_frame() doesn't set PC and SP, so we need to set them for side-exit + // TODO: We could generate code that sets PC/SP. Note that we'd still need to handle OOM. + let cfp = unsafe { get_ec_cfp(ec) }; + let pc = unsafe { rb_iseq_pc_at_idx(iseq, 0) }; // TODO: handle opt_pc once supported + unsafe { rb_set_cfp_pc(cfp, pc) }; + unsafe { rb_set_cfp_sp(cfp, sp) }; + + // Exit to the interpreter + ZJITState::get_stub_exit() + }; + + cb.mark_all_executable(); + code_ptr.raw_ptr(cb) + }) + } +} + +/// Compile an ISEQ for a function stub +fn function_stub_hit_body(cb: &mut CodeBlock, iseq: IseqPtr, branch_ptr: *const c_void) -> Option { + // Compile the stubbed ISEQ + let Some((code_ptr, branch_iseqs)) = gen_iseq(cb, iseq) else { + debug!("Failed to compile iseq: gen_iseq failed: {}", iseq_get_location(iseq, 0)); + return None; + }; + + // Stub callee ISEQs for JIT-to-JIT calls + for (branch, callee_iseq) in branch_iseqs.into_iter() { + gen_iseq_branch(cb, callee_iseq, iseq, branch)?; + } + + // Update the stub to call the code pointer + let branch = unsafe { Rc::from_raw(branch_ptr as *const Branch) }; + let code_addr = code_ptr.raw_ptr(cb); + branch.regenerate(cb, |asm| { + asm_comment!(asm, "call compiled function: {}", iseq_get_location(iseq, 0)); + asm.ccall(code_addr, vec![]); + }); + + Some(code_ptr) +} + +/// Compile a stub for an ISEQ called by SendWithoutBlockDirect +/// TODO: Consider creating a trampoline to share some of the code among function stubs +fn gen_function_stub(cb: &mut CodeBlock, iseq: IseqPtr, branch: Rc) -> Option<(CodePtr, Vec)> { + let mut asm = Assembler::new(); + asm_comment!(asm, "Stub: {}", iseq_get_location(iseq, 0)); + + // Maintain alignment for x86_64, and set up a frame for arm64 properly + asm.frame_setup(&[], 0); + + asm_comment!(asm, "preserve argument registers"); + for ® in ALLOC_REGS.iter() { + asm.cpush(Opnd::Reg(reg)); + } + const { assert!(ALLOC_REGS.len() % 2 == 0, "x86_64 would need to push one more if we push an odd number of regs"); } + + // Compile the stubbed ISEQ + let branch_addr = Rc::into_raw(branch); + let jump_addr = asm_ccall!(asm, function_stub_hit, + Opnd::Value(iseq.into()), + Opnd::const_ptr(branch_addr as *const u8), + EC, + SP + ); + asm.mov(Opnd::Reg(Assembler::SCRATCH_REG), jump_addr); + + asm_comment!(asm, "restore argument registers"); + for ® in ALLOC_REGS.iter().rev() { + asm.cpop_into(Opnd::Reg(reg)); + } + + // Discard the current frame since the JIT function will set it up again + asm.frame_teardown(&[]); + + // Jump to SCRATCH_REG so that cpop_all() doesn't clobber it + asm.jmp_opnd(Opnd::Reg(Assembler::SCRATCH_REG)); + asm.compile(cb) +} + +/// Generate a trampoline that is used when a function stub fails to compile the ISEQ +pub fn gen_stub_exit(cb: &mut CodeBlock) -> Option { + let mut asm = Assembler::new(); + + asm_comment!(asm, "exit from function stub"); + asm.frame_teardown(lir::JIT_PRESERVED_REGS); + asm.cret(Qundef.into()); + + asm.compile(cb).map(|(code_ptr, gc_offsets)| { + assert_eq!(gc_offsets.len(), 0); + code_ptr + }) +} + impl Assembler { /// Make a C call while marking the start and end positions of it fn ccall_with_branch(&mut self, fptr: *const u8, opnds: Vec, branch: &Rc) -> Opnd { diff --git a/zjit/src/cruby.rs b/zjit/src/cruby.rs index 582bd49c965ddb..afa3ddfb4989c8 100644 --- a/zjit/src/cruby.rs +++ b/zjit/src/cruby.rs @@ -715,7 +715,7 @@ pub fn iseq_name(iseq: IseqPtr) -> String { // Location is the file defining the method, colon, method name. // Filenames are sometimes internal strings supplied to eval, // so be careful with them. -pub fn iseq_get_location(iseq: IseqPtr, pos: u16) -> String { +pub fn iseq_get_location(iseq: IseqPtr, pos: u32) -> String { let iseq_path = unsafe { rb_iseq_path(iseq) }; let iseq_lineno = unsafe { rb_iseq_line_no(iseq, pos as usize) }; diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs index 10f12798f66dad..7fe1a0406ad9d8 100644 --- a/zjit/src/cruby_bindings.inc.rs +++ b/zjit/src/cruby_bindings.inc.rs @@ -1015,4 +1015,6 @@ unsafe extern "C" { pub fn rb_IMEMO_TYPE_P(imemo: VALUE, imemo_type: imemo_type) -> ::std::os::raw::c_int; pub fn rb_assert_cme_handle(handle: VALUE); pub fn rb_yarv_ary_entry_internal(ary: VALUE, offset: ::std::os::raw::c_long) -> VALUE; + pub fn rb_set_cfp_pc(cfp: *mut rb_control_frame_struct, pc: *const VALUE); + pub fn rb_set_cfp_sp(cfp: *mut rb_control_frame_struct, sp: *mut VALUE); } diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs index 6d45383713643b..61514c88769d87 100644 --- a/zjit/src/hir.rs +++ b/zjit/src/hir.rs @@ -796,6 +796,8 @@ impl std::fmt::Display for Insn { /// An extended basic block in a [`Function`]. #[derive(Default, Debug)] pub struct Block { + /// The index of the first YARV instruction for the Block in the ISEQ + pub insn_idx: u32, params: Vec, insns: Vec, } @@ -1024,9 +1026,11 @@ impl Function { } } - fn new_block(&mut self) -> BlockId { + fn new_block(&mut self, insn_idx: u32) -> BlockId { let id = BlockId(self.blocks.len()); - self.blocks.push(Block::default()); + let mut block = Block::default(); + block.insn_idx = insn_idx; + self.blocks.push(block); id } @@ -2543,7 +2547,7 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result { if insn_idx == 0 { todo!("Separate entry block for param/self/..."); } - insn_idx_to_block.insert(insn_idx, fun.new_block()); + insn_idx_to_block.insert(insn_idx, fun.new_block(insn_idx)); } // Iteratively fill out basic blocks using a queue @@ -3243,7 +3247,7 @@ mod rpo_tests { fn jump() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let exit = function.new_block(); + let exit = function.new_block(0); function.push_insn(entry, Insn::Jump(BranchEdge { target: exit, args: vec![] })); let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); function.push_insn(entry, Insn::Return { val }); @@ -3254,8 +3258,8 @@ mod rpo_tests { fn diamond_iftrue() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); - let exit = function.new_block(); + let side = function.new_block(0); + let exit = function.new_block(0); function.push_insn(side, Insn::Jump(BranchEdge { target: exit, args: vec![] })); let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); function.push_insn(entry, Insn::IfTrue { val, target: BranchEdge { target: side, args: vec![] } }); @@ -3269,8 +3273,8 @@ mod rpo_tests { fn diamond_iffalse() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); - let exit = function.new_block(); + let side = function.new_block(0); + let exit = function.new_block(0); function.push_insn(side, Insn::Jump(BranchEdge { target: exit, args: vec![] })); let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); function.push_insn(entry, Insn::IfFalse { val, target: BranchEdge { target: side, args: vec![] } }); @@ -3325,7 +3329,7 @@ mod validation_tests { fn iftrue_mismatch_args() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); + let side = function.new_block(0); let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); function.push_insn(entry, Insn::IfTrue { val, target: BranchEdge { target: side, args: vec![val, val, val] } }); assert_matches_err(function.validate(), ValidationError::MismatchedBlockArity(entry, 0, 3)); @@ -3335,7 +3339,7 @@ mod validation_tests { fn iffalse_mismatch_args() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); + let side = function.new_block(0); let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); function.push_insn(entry, Insn::IfFalse { val, target: BranchEdge { target: side, args: vec![val, val, val] } }); assert_matches_err(function.validate(), ValidationError::MismatchedBlockArity(entry, 0, 3)); @@ -3345,7 +3349,7 @@ mod validation_tests { fn jump_mismatch_args() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); + let side = function.new_block(0); let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); function.push_insn(entry, Insn::Jump ( BranchEdge { target: side, args: vec![val, val, val] } )); assert_matches_err(function.validate(), ValidationError::MismatchedBlockArity(entry, 0, 3)); @@ -3377,8 +3381,8 @@ mod validation_tests { // This tests that one branch is missing a definition which fails. let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); - let exit = function.new_block(); + let side = function.new_block(0); + let exit = function.new_block(0); let v0 = function.push_insn(side, Insn::Const { val: Const::Value(VALUE::fixnum_from_usize(3)) }); function.push_insn(side, Insn::Jump(BranchEdge { target: exit, args: vec![] })); let val1 = function.push_insn(entry, Insn::Const { val: Const::CBool(false) }); @@ -3396,8 +3400,8 @@ mod validation_tests { // This tests that both branches with a definition succeeds. let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); - let exit = function.new_block(); + let side = function.new_block(0); + let exit = function.new_block(0); let v0 = function.push_insn(entry, Insn::Const { val: Const::Value(VALUE::fixnum_from_usize(3)) }); function.push_insn(side, Insn::Jump(BranchEdge { target: exit, args: vec![] })); let val = function.push_insn(entry, Insn::Const { val: Const::CBool(false) }); @@ -3437,7 +3441,7 @@ mod validation_tests { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; let val = function.push_insn(entry, Insn::Const { val: Const::Value(Qnil) }); - let exit = function.new_block(); + let exit = function.new_block(0); function.push_insn(entry, Insn::Jump(BranchEdge { target: exit, args: vec![] })); function.push_insn_id(exit, val); function.push_insn(exit, Insn::Return { val }); @@ -3532,8 +3536,8 @@ mod infer_tests { fn diamond_iffalse_merge_fixnum() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); - let exit = function.new_block(); + let side = function.new_block(0); + let exit = function.new_block(0); let v0 = function.push_insn(side, Insn::Const { val: Const::Value(VALUE::fixnum_from_usize(3)) }); function.push_insn(side, Insn::Jump(BranchEdge { target: exit, args: vec![v0] })); let val = function.push_insn(entry, Insn::Const { val: Const::CBool(false) }); @@ -3551,8 +3555,8 @@ mod infer_tests { fn diamond_iffalse_merge_bool() { let mut function = Function::new(std::ptr::null()); let entry = function.entry_block; - let side = function.new_block(); - let exit = function.new_block(); + let side = function.new_block(0); + let exit = function.new_block(0); let v0 = function.push_insn(side, Insn::Const { val: Const::Value(Qtrue) }); function.push_insn(side, Insn::Jump(BranchEdge { target: exit, args: vec![v0] })); let val = function.push_insn(entry, Insn::Const { val: Const::CBool(false) }); diff --git a/zjit/src/state.rs b/zjit/src/state.rs index cd39e07c57aa42..79be91fd85e5c6 100644 --- a/zjit/src/state.rs +++ b/zjit/src/state.rs @@ -1,9 +1,11 @@ +use crate::codegen::gen_stub_exit; use crate::cruby::{self, rb_bug_panic_hook, rb_vm_insns_count, EcPtr, Qnil, VALUE}; use crate::cruby_methods; use crate::invariants::Invariants; use crate::asm::CodeBlock; use crate::options::get_option; use crate::stats::Counters; +use crate::virtualmem::CodePtr; #[allow(non_upper_case_globals)] #[unsafe(no_mangle)] @@ -30,6 +32,9 @@ pub struct ZJITState { /// Properties of core library methods method_annotations: cruby_methods::Annotations, + + /// Side-exit trampoline used when it fails to compile the ISEQ for a function stub + stub_exit: CodePtr, } /// Private singleton instance of the codegen globals @@ -39,7 +44,7 @@ impl ZJITState { /// Initialize the ZJIT globals pub fn init() { #[cfg(not(test))] - let cb = { + let mut cb = { use crate::cruby::*; use crate::options::*; @@ -76,7 +81,9 @@ impl ZJITState { CodeBlock::new(mem_block.clone(), get_option!(dump_disasm)) }; #[cfg(test)] - let cb = CodeBlock::new_dummy(); + let mut cb = CodeBlock::new_dummy(); + + let stub_exit = gen_stub_exit(&mut cb).unwrap(); // Initialize the codegen globals instance let zjit_state = ZJITState { @@ -85,6 +92,7 @@ impl ZJITState { invariants: Invariants::default(), assert_compiles: false, method_annotations: cruby_methods::init(), + stub_exit, }; unsafe { ZJIT_STATE = Some(zjit_state); } } @@ -160,6 +168,11 @@ impl ZJITState { true // If no restrictions, allow all ISEQs } } + + /// Return a code pointer to the side-exit trampoline for function stubs + pub fn get_stub_exit() -> CodePtr { + ZJITState::get_instance().stub_exit + } } /// Initialize ZJIT