diff --git a/doc/zjit.md b/doc/zjit.md index a45128adbdd438..0c50bd44120639 100644 --- a/doc/zjit.md +++ b/doc/zjit.md @@ -189,6 +189,33 @@ To run code snippets with ZJIT: You can also try https://www.rubyexplorer.xyz/ to view Ruby YARV disasm output with syntax highlighting in a way that can be easily shared with other team members. +## Understanding Ruby Stacks + +Ruby execution involves three distinct stacks and understanding them will help you understand ZJIT's implementation: + +### 1. Native Stack + +- **Purpose**: Return addresses and saved registers. ZJIT also uses it for some C functions' argument arrays +- **Management**: OS-managed, one per native thread +- **Growth**: Downward from high addresses +- **Constants**: `NATIVE_STACK_PTR`, `NATIVE_BASE_PTR` + +### 2. Ruby VM Stack + +The Ruby VM uses a single contiguous memory region (`ec->vm_stack`) containing two sub-stacks that grow toward each other. When they meet, stack overflow occurs. + +**Control Frame Stack:** + +- **Stores**: Frame metadata (`rb_control_frame_t` structures) +- **Growth**: Downward from `vm_stack + size` (high addresses) +- **Constants**: `CFP` + +**Value Stack:** + +- **Stores**: YARV bytecode operands (self, arguments, locals, temporaries) +- **Growth**: Upward from `vm_stack` (low addresses) +- **Constants**: `SP` + ## ZJIT Glossary This glossary contains terms that are helpful for understanding ZJIT. diff --git a/lib/error_highlight/core_ext.rb b/lib/error_highlight/core_ext.rb index d4f91e118567bc..e6cacbaf9e832a 100644 --- a/lib/error_highlight/core_ext.rb +++ b/lib/error_highlight/core_ext.rb @@ -3,7 +3,7 @@ module ErrorHighlight module CoreExt private def generate_snippet - if ArgumentError === self && message =~ /\A(?:wrong number of arguments|missing keyword|unknown keyword|no keywords accepted)\b/ + if ArgumentError === self && message =~ /\A(?:wrong number of arguments|missing keyword[s]?|unknown keyword[s]?|no keywords accepted)\b/ locs = self.backtrace_locations return "" if locs.size < 2 callee_loc, caller_loc = locs diff --git a/test/error_highlight/test_error_highlight.rb b/test/error_highlight/test_error_highlight.rb index 208dea8ea97128..43e232071f9371 100644 --- a/test/error_highlight/test_error_highlight.rb +++ b/test/error_highlight/test_error_highlight.rb @@ -44,7 +44,7 @@ def preprocess(msg) def assert_error_message(klass, expected_msg, &blk) omit unless klass < ErrorHighlight::CoreExt err = assert_raise(klass, &blk) - unless klass == ArgumentError && err.message =~ /\A(?:wrong number of arguments|missing keyword|unknown keyword|no keywords accepted)\b/ + unless klass == ArgumentError && err.message =~ /\A(?:wrong number of arguments|missing keyword[s]?|unknown keyword[s]?|no keywords accepted)\b/ spot = ErrorHighlight.spot(err) if spot assert_kind_of(Integer, spot[:first_lineno]) @@ -1502,6 +1502,27 @@ def test_missing_keyword end end + def test_missing_keywords # multiple missing keywords + lineno = __LINE__ + assert_error_message(ArgumentError, <<~END) do +missing keywords: :kw2, :kw3 (ArgumentError) + + caller: #{ __FILE__ }:#{ lineno + 16 } + | keyword_test(kw1: 1) + ^^^^^^^^^^^^ + callee: #{ __FILE__ }:#{ KEYWORD_TEST_LINENO } + #{ + MethodDefLocationSupported ? + "| def keyword_test(kw1:, kw2:, kw3:) + ^^^^^^^^^^^^" : + "(cannot highlight method definition; try Ruby 3.5 or later)" + } + END + + keyword_test(kw1: 1) + end + end + def test_unknown_keyword lineno = __LINE__ assert_error_message(ArgumentError, <<~END) do @@ -1523,6 +1544,27 @@ def test_unknown_keyword end end + def test_unknown_keywords + lineno = __LINE__ + assert_error_message(ArgumentError, <<~END) do +unknown keywords: :kw4, :kw5 (ArgumentError) + + caller: #{ __FILE__ }:#{ lineno + 16 } + | keyword_test(kw1: 1, kw2: 2, kw3: 3, kw4: 4, kw5: 5) + ^^^^^^^^^^^^ + callee: #{ __FILE__ }:#{ KEYWORD_TEST_LINENO } + #{ + MethodDefLocationSupported ? + "| def keyword_test(kw1:, kw2:, kw3:) + ^^^^^^^^^^^^" : + "(cannot highlight method definition; try Ruby 3.5 or later)" + } + END + + keyword_test(kw1: 1, kw2: 2, kw3: 3, kw4: 4, kw5: 5) + end + end + WRONG_NUBMER_OF_ARGUMENTS_TEST2_LINENO = __LINE__ + 1 def wrong_number_of_arguments_test2( long_argument_name_x, diff --git a/test/ruby/test_string.rb b/test/ruby/test_string.rb index f6adef6efc16da..adbfe328cccfbf 100644 --- a/test/ruby/test_string.rb +++ b/test/ruby/test_string.rb @@ -3777,6 +3777,36 @@ class MyError < StandardError; end end end + def test_encode_fallback_too_big_memory_leak + { + "hash" => <<~RUBY, + fallback = Hash.new { "\\uffee" } + RUBY + "proc" => <<~RUBY, + fallback = proc { "\\uffee" } + RUBY + "method" => <<~RUBY, + def my_method(_str) = "\\uffee" + fallback = method(:my_method) + RUBY + "aref" => <<~RUBY, + fallback = Object.new + def fallback.[](_str) = "\\uffee" + RUBY + }.each do |type, code| + assert_no_memory_leak([], '', <<~RUBY, "fallback type is #{type}", rss: true) + class MyError < StandardError; end + + #{code} + + 100_000.times do |i| + "\\ufffd".encode(Encoding::US_ASCII, fallback:) + rescue ArgumentError + end + RUBY + end + end + private def assert_bytesplice_result(expected, s, *args) diff --git a/transcode.c b/transcode.c index 09ecb1c6513171..20b92b66f7ae21 100644 --- a/transcode.c +++ b/transcode.c @@ -2432,6 +2432,7 @@ transcode_loop(const unsigned char **in_pos, unsigned char **out_pos, ret = rb_econv_insert_output(ec, (const unsigned char *)RSTRING_PTR(rep), RSTRING_LEN(rep), rb_enc_name(rb_enc_get(rep))); if ((int)ret == -1) { + rb_econv_close(ec); rb_raise(rb_eArgError, "too big fallback string"); } goto resume; diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs index d7f929a0219653..d762b14c911503 100644 --- a/zjit/src/backend/arm64/mod.rs +++ b/zjit/src/backend/arm64/mod.rs @@ -687,7 +687,7 @@ impl Assembler { /// Split instructions using scratch registers. To maximize the use of the register pool for /// VRegs, most splits should happen in [`Self::arm64_split`]. However, some instructions - /// need to be split with registers after `alloc_regs`, e.g. for `compile_side_exits`, so this + /// need to be split with registers after `alloc_regs`, e.g. for `compile_exits`, so this /// splits them and uses scratch registers for it. fn arm64_scratch_split(self) -> Assembler { let mut asm = Assembler::new_with_asm(&self); @@ -706,7 +706,7 @@ impl Assembler { asm.push_insn(Insn::RShift { out: SCRATCH0_OPND, opnd: out, shift: Opnd::UImm(63) }); } } - // For compile_side_exits, support splitting simple C arguments here + // For compile_exits, support splitting simple C arguments here Insn::CCall { opnds, .. } if !opnds.is_empty() => { for (i, opnd) in opnds.iter().enumerate() { asm.load_into(C_ARG_OPNDS[i], *opnd); @@ -716,7 +716,7 @@ impl Assembler { } &mut Insn::Lea { opnd, out } => { match (opnd, out) { - // Split here for compile_side_exits + // Split here for compile_exits (Opnd::Mem(_), Opnd::Mem(_)) => { asm.lea_into(SCRATCH0_OPND, opnd); asm.store(out, SCRATCH0_OPND); @@ -728,7 +728,7 @@ impl Assembler { } &mut Insn::IncrCounter { mem, value } => { // Convert Opnd::const_ptr into Opnd::Mem. - // It's split here to support IncrCounter in compile_side_exits. + // It's split here to support IncrCounter in compile_exits. assert!(matches!(mem, Opnd::UImm(_))); asm.load_into(SCRATCH0_OPND, mem); asm.lea_into(SCRATCH0_OPND, Opnd::mem(64, SCRATCH0_OPND, 0)); @@ -872,7 +872,7 @@ impl Assembler { }); }, Target::SideExit { .. } => { - unreachable!("Target::SideExit should have been compiled by compile_side_exits") + unreachable!("Target::SideExit should have been compiled by compile_exits") }, }; } @@ -974,11 +974,14 @@ impl Assembler { // The write_pos for the last Insn::PatchPoint, if any let mut last_patch_pos: Option = None; + // Install a panic hook to dump Assembler with insn_idx on dev builds + let (_hook, mut hook_insn_idx) = AssemblerPanicHook::new(self, 0); + // For each instruction let mut insn_idx: usize = 0; while let Some(insn) = self.insns.get(insn_idx) { - // Dump Assembler with insn_idx if --zjit-dump-lir=panic is given - let _hook = AssemblerPanicHook::new(self, insn_idx); + // Update insn_idx that is shown on panic + hook_insn_idx.as_mut().map(|idx| idx.lock().map(|mut idx| *idx = insn_idx).unwrap()); match insn { Insn::Comment(text) => { @@ -1346,7 +1349,7 @@ impl Assembler { }); }, Target::SideExit { .. } => { - unreachable!("Target::SideExit should have been compiled by compile_side_exits") + unreachable!("Target::SideExit should have been compiled by compile_exits") }, }; }, @@ -1468,9 +1471,9 @@ impl Assembler { let mut asm = asm.alloc_regs(regs)?; asm_dump!(asm, alloc_regs); - // We put compile_side_exits after alloc_regs to avoid extending live ranges for VRegs spilled on side exits. - asm.compile_side_exits(); - asm_dump!(asm, compile_side_exits); + // We put compile_exits after alloc_regs to avoid extending live ranges for VRegs spilled on side exits. + asm.compile_exits(); + asm_dump!(asm, compile_exits); if use_scratch_reg { asm = asm.arm64_scratch_split(); @@ -1561,9 +1564,11 @@ mod tests { asm.store(Opnd::mem(64, SP, 0x10), val64); let side_exit = Target::SideExit { reason: SideExitReason::Interrupt, pc: 0 as _, stack: vec![], locals: vec![], label: None }; asm.push_insn(Insn::Joz(val64, side_exit)); + asm.parallel_mov(vec![(C_ARG_OPNDS[0], C_RET_OPND.with_num_bits(32)), (C_ARG_OPNDS[1], Opnd::mem(64, SP, -8))]); let val32 = asm.sub(Opnd::Value(Qtrue), Opnd::Imm(1)); asm.store(Opnd::mem(64, EC, 0x10).with_num_bits(32), val32.with_num_bits(32)); + asm.je(label); asm.cret(val64); asm.frame_teardown(JIT_PRESERVED_REGS); @@ -1574,8 +1579,10 @@ mod tests { v0 = Add x19, 0x40 Store [x21 + 0x10], v0 Joz Exit(Interrupt), v0 + ParallelMov x0 <- w0, x1 <- [x21 - 8] v1 = Sub Value(0x14), Imm(1) Store Mem32[x20 + 0x10], VReg32(v1) + Je bb0 CRet v0 FrameTeardown x19, x21, x20 "); diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs index e5707beb51bcf6..005df140b8a613 100644 --- a/zjit/src/backend/lir.rs +++ b/zjit/src/backend/lir.rs @@ -3,11 +3,11 @@ use std::fmt; use std::mem::take; use std::panic; use std::rc::Rc; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use crate::codegen::local_size_and_idx_to_ep_offset; use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32, vm_stack_canary}; use crate::hir::SideExitReason; -use crate::options::{TraceExits, debug, dump_lir_option, get_option}; +use crate::options::{TraceExits, debug, get_option}; use crate::cruby::VALUE; use crate::stats::{exit_counter_ptr, exit_counter_ptr_for_opcode, side_exit_counter, CompileError}; use crate::virtualmem::CodePtr; @@ -1701,7 +1701,11 @@ impl Assembler } /// Compile Target::SideExit and convert it into Target::CodePtr for all instructions - pub fn compile_side_exits(&mut self) { + pub fn compile_exits(&mut self) { + fn join_opnds(opnds: &Vec, delimiter: &str) -> String { + opnds.iter().map(|opnd| format!("{opnd}")).collect::>().join(delimiter) + } + let mut targets = HashMap::new(); for (idx, insn) in self.insns.iter().enumerate() { if let Some(target @ Target::SideExit { .. }) = insn.target() { @@ -1723,12 +1727,12 @@ impl Assembler // Restore the PC and the stack for regular side exits. We don't do this for // side exits right after JIT-to-JIT calls, which restore them before the call. - asm_comment!(self, "write stack slots: {stack:?}"); + asm_comment!(self, "write stack slots: {}", join_opnds(&stack, ", ")); for (idx, &opnd) in stack.iter().enumerate() { self.store(Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32), opnd); } - asm_comment!(self, "write locals: {locals:?}"); + asm_comment!(self, "write locals: {}", join_opnds(&locals, ", ")); for (idx, &opnd) in locals.iter().enumerate() { self.store(Opnd::mem(64, SP, (-local_size_and_idx_to_ep_offset(locals.len(), idx) - 1) * SIZEOF_VALUE_I32), opnd); } @@ -1789,6 +1793,24 @@ pub fn lir_string(asm: &Assembler) -> String { impl fmt::Display for Assembler { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Count the number of duplicated label names to disambiguate them if needed + let mut label_counts: HashMap<&String, usize> = HashMap::new(); + for label_name in self.label_names.iter() { + let counter = label_counts.entry(label_name).or_insert(0); + *counter += 1; + } + + /// Return a label name String. Suffix "_{label_idx}" if the label name is used multiple times. + fn label_name(asm: &Assembler, label_idx: usize, label_counts: &HashMap<&String, usize>) -> String { + let label_name = &asm.label_names[label_idx]; + let label_count = label_counts.get(&label_name).unwrap_or(&0); + if *label_count > 1 { + format!("{label_name}_{label_idx}") + } else { + label_name.to_string() + } + } + for insn in self.insns.iter() { match insn { Insn::Comment(comment) => { @@ -1798,7 +1820,7 @@ impl fmt::Display for Assembler { let &Target::Label(Label(label_idx)) = target else { panic!("unexpected target for Insn::Label: {target:?}"); }; - writeln!(f, " {}:", self.label_names[label_idx])?; + writeln!(f, " {}:", label_name(self, label_idx, &label_counts))?; } _ => { write!(f, " ")?; @@ -1808,6 +1830,7 @@ impl fmt::Display for Assembler { write!(f, "{out} = ")?; } + // Print the instruction name write!(f, "{}", insn.op())?; // Show slot_count for FrameSetup @@ -1822,7 +1845,7 @@ impl fmt::Display for Assembler { if let Some(target) = insn.target() { match target { Target::CodePtr(code_ptr) => write!(f, " {code_ptr:?}")?, - Target::Label(Label(label_idx)) => write!(f, " {}", self.label_names[*label_idx])?, + Target::Label(Label(label_idx)) => write!(f, " {}", label_name(self, *label_idx, &label_counts))?, Target::SideExit { reason, .. } => write!(f, " Exit({reason})")?, } } @@ -1841,21 +1864,9 @@ impl fmt::Display for Assembler { } } else if let Insn::ParallelMov { moves } = insn { // Print operands with a special syntax for ParallelMov - let mut moves_iter = moves.iter(); - if let Some((first_dst, first_src)) = moves_iter.next() { - write!(f, " {first_dst} <- {first_src}")?; - } - for (dst, src) in moves_iter { - write!(f, ", {dst} <- {src}")?; - } - } else { - let mut opnd_iter = insn.opnd_iter(); - if let Some(first_opnd) = opnd_iter.next() { - write!(f, " {first_opnd}")?; - } - for opnd in opnd_iter { - write!(f, ", {opnd}")?; - } + moves.iter().try_fold(" ", |prefix, (dst, src)| write!(f, "{prefix}{dst} <- {src}").and(Ok(", ")))?; + } else if insn.opnd_iter().count() > 0 { + insn.opnd_iter().try_fold(" ", |prefix, opnd| write!(f, "{prefix}{opnd}").and(Ok(", ")))?; } write!(f, "\n")?; @@ -2272,29 +2283,38 @@ pub struct AssemblerPanicHook { } impl AssemblerPanicHook { - pub fn new(asm: &Assembler, insn_idx: usize) -> Option> { - // Install a panic hook if --zjit-dump-lir=panic is specified. - if dump_lir_option!(panic) { + /// Install a panic hook to dump Assembler with insn_idx on dev builds. + /// This returns shared references to the previous hook and insn_idx. + /// It takes insn_idx as an argument so that you can manually use it + /// on non-emit passes that keep mutating the Assembler to be dumped. + pub fn new(asm: &Assembler, insn_idx: usize) -> (Option>, Option>>) { + if cfg!(debug_assertions) { // Wrap prev_hook with Arc to share it among the new hook and Self to be dropped. let prev_hook = panic::take_hook(); let panic_hook_ref = Arc::new(Self { prev_hook }); let weak_hook = Arc::downgrade(&panic_hook_ref); + // Wrap insn_idx with Arc to share it among the new hook and the caller mutating it. + let insn_idx = Arc::new(Mutex::new(insn_idx)); + let insn_idx_ref = insn_idx.clone(); + // Install a new hook to dump Assembler with insn_idx let asm = asm.clone(); panic::set_hook(Box::new(move |panic_info| { if let Some(panic_hook) = weak_hook.upgrade() { - // Dump Assembler, highlighting the insn_idx line - Self::dump_asm(&asm, insn_idx); + if let Ok(insn_idx) = insn_idx_ref.lock() { + // Dump Assembler, highlighting the insn_idx line + Self::dump_asm(&asm, *insn_idx); + } // Call the previous panic hook (panic_hook.prev_hook)(panic_info); } })); - Some(panic_hook_ref) + (Some(panic_hook_ref), Some(insn_idx)) } else { - None + (None, None) } } diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs index a2e27028e6a90a..14c0df8dd02e04 100644 --- a/zjit/src/backend/x86_64/mod.rs +++ b/zjit/src/backend/x86_64/mod.rs @@ -388,7 +388,7 @@ impl Assembler { /// Split instructions using scratch registers. To maximize the use of the register pool /// for VRegs, most splits should happen in [`Self::x86_split`]. However, some instructions - /// need to be split with registers after `alloc_regs`, e.g. for `compile_side_exits`, so + /// need to be split with registers after `alloc_regs`, e.g. for `compile_exits`, so /// this splits them and uses scratch registers for it. pub fn x86_scratch_split(self) -> Assembler { /// For some instructions, we want to be able to lower a 64-bit operand @@ -454,7 +454,7 @@ impl Assembler { } asm.push_insn(insn); } - // For compile_side_exits, support splitting simple C arguments here + // For compile_exits, support splitting simple C arguments here Insn::CCall { opnds, .. } if !opnds.is_empty() => { for (i, opnd) in opnds.iter().enumerate() { asm.load_into(C_ARG_OPNDS[i], *opnd); @@ -464,7 +464,7 @@ impl Assembler { } &mut Insn::Lea { opnd, out } => { match (opnd, out) { - // Split here for compile_side_exits + // Split here for compile_exits (Opnd::Mem(_), Opnd::Mem(_)) => { asm.lea_into(SCRATCH0_OPND, opnd); asm.store(out, SCRATCH0_OPND); @@ -481,7 +481,7 @@ impl Assembler { } } // Convert Opnd::const_ptr into Opnd::Mem. This split is done here to give - // a register for compile_side_exits. + // a register for compile_exits. &mut Insn::IncrCounter { mem, value } => { assert!(matches!(mem, Opnd::UImm(_))); asm.load_into(SCRATCH0_OPND, mem); @@ -493,7 +493,7 @@ impl Assembler { asm.mov(dst, src) } } - // Handle various operand combinations for spills on compile_side_exits. + // Handle various operand combinations for spills on compile_exits. &mut Insn::Store { dest, src } => { let Opnd::Mem(Mem { num_bits, .. }) = dest else { panic!("Unexpected Insn::Store destination in x86_scratch_split: {dest:?}"); @@ -590,11 +590,14 @@ impl Assembler { // The write_pos for the last Insn::PatchPoint, if any let mut last_patch_pos: Option = None; + // Install a panic hook to dump Assembler with insn_idx on dev builds + let (_hook, mut hook_insn_idx) = AssemblerPanicHook::new(self, 0); + // For each instruction let mut insn_idx: usize = 0; while let Some(insn) = self.insns.get(insn_idx) { - // Dump Assembler with insn_idx if --zjit-dump-lir=panic is given - let _hook = AssemblerPanicHook::new(self, insn_idx); + // Update insn_idx that is shown on panic + hook_insn_idx.as_mut().map(|idx| idx.lock().map(|mut idx| *idx = insn_idx).unwrap()); match insn { Insn::Comment(text) => { @@ -798,7 +801,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jmp_ptr(cb, code_ptr), Target::Label(label) => jmp_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } } @@ -806,7 +809,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => je_ptr(cb, code_ptr), Target::Label(label) => je_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } } @@ -814,7 +817,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jne_ptr(cb, code_ptr), Target::Label(label) => jne_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } } @@ -822,7 +825,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jl_ptr(cb, code_ptr), Target::Label(label) => jl_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } }, @@ -830,7 +833,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jg_ptr(cb, code_ptr), Target::Label(label) => jg_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } }, @@ -838,7 +841,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jge_ptr(cb, code_ptr), Target::Label(label) => jge_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } }, @@ -846,7 +849,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jbe_ptr(cb, code_ptr), Target::Label(label) => jbe_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } }, @@ -854,7 +857,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jb_ptr(cb, code_ptr), Target::Label(label) => jb_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } }, @@ -862,7 +865,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jz_ptr(cb, code_ptr), Target::Label(label) => jz_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } } @@ -870,7 +873,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jnz_ptr(cb, code_ptr), Target::Label(label) => jnz_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } } @@ -879,7 +882,7 @@ impl Assembler { match *target { Target::CodePtr(code_ptr) => jo_ptr(cb, code_ptr), Target::Label(label) => jo_label(cb, label), - Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exits"), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_exits"), } } @@ -966,9 +969,9 @@ impl Assembler { let mut asm = asm.alloc_regs(regs)?; asm_dump!(asm, alloc_regs); - // We put compile_side_exits after alloc_regs to avoid extending live ranges for VRegs spilled on side exits. - asm.compile_side_exits(); - asm_dump!(asm, compile_side_exits); + // We put compile_exits after alloc_regs to avoid extending live ranges for VRegs spilled on side exits. + asm.compile_exits(); + asm_dump!(asm, compile_exits); if use_scratch_regs { asm = asm.x86_scratch_split(); @@ -1025,9 +1028,11 @@ mod tests { asm.store(Opnd::mem(64, SP, 0x10), val64); let side_exit = Target::SideExit { reason: SideExitReason::Interrupt, pc: 0 as _, stack: vec![], locals: vec![], label: None }; asm.push_insn(Insn::Joz(val64, side_exit)); + asm.parallel_mov(vec![(C_ARG_OPNDS[0], C_RET_OPND.with_num_bits(32)), (C_ARG_OPNDS[1], Opnd::mem(64, SP, -8))]); let val32 = asm.sub(Opnd::Value(Qtrue), Opnd::Imm(1)); asm.store(Opnd::mem(64, EC, 0x10).with_num_bits(32), val32.with_num_bits(32)); + asm.je(label); asm.cret(val64); asm.frame_teardown(JIT_PRESERVED_REGS); @@ -1038,8 +1043,10 @@ mod tests { v0 = Add r13, 0x40 Store [rbx + 0x10], v0 Joz Exit(Interrupt), v0 + ParallelMov rdi <- eax, rsi <- [rbx - 8] v1 = Sub Value(0x14), Imm(1) Store Mem32[r12 + 0x10], VReg32(v1) + Je bb0 CRet v0 FrameTeardown r13, rbx, r12 "); diff --git a/zjit/src/options.rs b/zjit/src/options.rs index 44f4dbc7a475be..f6471b5461497f 100644 --- a/zjit/src/options.rs +++ b/zjit/src/options.rs @@ -160,44 +160,32 @@ pub enum DumpLIR { split, /// Dump LIR after alloc_regs alloc_regs, - /// Dump LIR after compile_side_exits - compile_side_exits, + /// Dump LIR after compile_exits + compile_exits, /// Dump LIR after {arch}_scratch_split scratch_split, - /// Dump LIR at panic on {arch}_emit - panic, } -/// All compiler stages for --zjit-dump-lir=all. This does NOT include DumpLIR::panic. +/// All compiler stages for --zjit-dump-lir=all. const DUMP_LIR_ALL: &[DumpLIR] = &[ DumpLIR::init, DumpLIR::split, DumpLIR::alloc_regs, - DumpLIR::compile_side_exits, + DumpLIR::compile_exits, DumpLIR::scratch_split, ]; /// Macro to dump LIR if --zjit-dump-lir is specified macro_rules! asm_dump { ($asm:expr, $target:ident) => { - if crate::options::dump_lir_option!($target) { - println!("LIR {}:\n{}", stringify!($target), $asm); - } - }; -} -pub(crate) use asm_dump; - -/// Macro to check if a particular dump_lir option is enabled -macro_rules! dump_lir_option { - ($target:ident) => { if let Some(crate::options::Options { dump_lir: Some(dump_lirs), .. }) = unsafe { crate::options::OPTIONS.as_ref() } { - dump_lirs.contains(&crate::options::DumpLIR::$target) - } else { - false + if dump_lirs.contains(&crate::options::DumpLIR::$target) { + println!("LIR {}:\n{}", stringify!($target), $asm); + } } }; } -pub(crate) use dump_lir_option; +pub(crate) use asm_dump; /// Macro to get an option value by name macro_rules! get_option { @@ -364,13 +352,12 @@ fn parse_option(str_ptr: *const std::os::raw::c_char) -> Option<()> { "init" => DumpLIR::init, "split" => DumpLIR::split, "alloc_regs" => DumpLIR::alloc_regs, - "compile_side_exits" => DumpLIR::compile_side_exits, + "compile_exits" => DumpLIR::compile_exits, "scratch_split" => DumpLIR::scratch_split, - "panic" => DumpLIR::panic, _ => { let valid_options = DUMP_LIR_ALL.iter().map(|opt| format!("{opt:?}")).collect::>().join(", "); eprintln!("invalid --zjit-dump-lir option: '{filter}'"); - eprintln!("valid --zjit-dump-lir options: all, {}, panic", valid_options); + eprintln!("valid --zjit-dump-lir options: all, {}", valid_options); return None; } };