diff --git a/NEWS.md b/NEWS.md
index 964bafacd65aa2..b3d04feacf6208 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -243,6 +243,22 @@ The following bundled gems are updated.
## JIT
+* YJIT
+ * YJIT stats
+ * `ratio_in_yjit` no longer works in the default build.
+ Use `--enable-yjit=stats` on `configure` to enable it on `--yjit-stats`.
+ * Add `invalidate_everything` to default stats, which is
+ incremented when every code is invalidated by TracePoint.
+ * Add `mem_size:` and `call_threshold:` options to `RubyVM::YJIT.enable`.
+* ZJIT
+ * Add an experimental method-based JIT compiler.
+ Use `--enable-zjit` on `configure` to enable the `--zjit` support.
+ * As of Ruby 3.5.0-preview2, ZJIT is not yet ready for speeding up most benchmarks.
+ Please refrain from evaluating ZJIT just yet. Stay tuned for the Ruby 3.5 release.
+* RJIT
+ * `--rjit` is removed. We will move the implementation of the third-party JIT API
+ to the [ruby/rjit](https://github.com/ruby/rjit) repository.
+
[Feature #17473]: https://bugs.ruby-lang.org/issues/17473
[Feature #18455]: https://bugs.ruby-lang.org/issues/18455
[Feature #19908]: https://bugs.ruby-lang.org/issues/19908
diff --git a/doc/string/insert.rdoc b/doc/string/insert.rdoc
new file mode 100644
index 00000000000000..d8252d5ec5fd81
--- /dev/null
+++ b/doc/string/insert.rdoc
@@ -0,0 +1,16 @@
+Inserts the given +other_string+ into +self+; returns +self+.
+
+If the given +index+ is non-negative, inserts +other_string+ at offset +index+:
+
+ 'foo'.insert(0, 'bar') # => "barfoo"
+ 'foo'.insert(1, 'bar') # => "fbaroo"
+ 'foo'.insert(3, 'bar') # => "foobar"
+ 'тест'.insert(2, 'bar') # => "теbarст" # Characters, not bytes.
+ 'こんにちは'.insert(2, 'bar') # => "こんbarにちは"
+
+If the +index+ is negative, counts backward from the end of +self+
+and inserts +other_string+ _after_ the offset:
+
+ 'foo'.insert(-2, 'bar') # => "fobaro"
+
+Related: see {Modifying}[rdoc-ref:String@Modifying].
diff --git a/string.c b/string.c
index 0329d2845aa22a..d873d93d8fdbd3 100644
--- a/string.c
+++ b/string.c
@@ -6056,19 +6056,9 @@ rb_str_aset_m(int argc, VALUE *argv, VALUE str)
/*
* call-seq:
- * insert(index, other_string) -> self
+ * insert(offset, other_string) -> self
*
- * Inserts the given +other_string+ into +self+; returns +self+.
- *
- * If the Integer +index+ is positive, inserts +other_string+ at offset +index+:
- *
- * 'foo'.insert(1, 'bar') # => "fbaroo"
- *
- * If the Integer +index+ is negative, counts backward from the end of +self+
- * and inserts +other_string+ at offset index+1
- * (that is, _after_ self[index]):
- *
- * 'foo'.insert(-2, 'bar') # => "fobaro"
+ * :include: doc/string/insert.rdoc
*
*/
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index b5b5a6984797d4..879aaf322577c3 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -220,6 +220,22 @@ def entry(a1, a2, a3, a4, a5, a6, a7, a8, a9)
}, call_threshold: 2
end
+ def test_send_exit_with_uninitialized_locals
+ assert_runs 'nil', %q{
+ def entry(init)
+ function_stub_exit(init)
+ end
+
+ def function_stub_exit(init)
+ uninitialized_local = 1 if init
+ uninitialized_local
+ end
+
+ entry(true) # profile and set 1 to the local slot
+ entry(false)
+ }, call_threshold: 2, allowed_iseqs: 'entry@-e:2'
+ end
+
def test_invokebuiltin
omit 'Test fails at the moment due to not handling optional parameters'
assert_compiles '["."]', %q{
@@ -1927,6 +1943,7 @@ def eval_with_jit(
zjit: true,
stats: false,
debug: true,
+ allowed_iseqs: nil,
timeout: 1000,
pipe_fd:
)
@@ -1936,6 +1953,12 @@ def eval_with_jit(
args << "--zjit-num-profiles=#{num_profiles}"
args << "--zjit-stats" if stats
args << "--zjit-debug" if debug
+ if allowed_iseqs
+ jitlist = Tempfile.new("jitlist")
+ jitlist.write(allowed_iseqs)
+ jitlist.close
+ args << "--zjit-allowed-iseqs=#{jitlist.path}"
+ end
end
args << "-e" << script_shell_encode(script)
pipe_r, pipe_w = IO.pipe
@@ -1955,6 +1978,7 @@ def eval_with_jit(
pipe_reader&.join(timeout)
pipe_r&.close
pipe_w&.close
+ jitlist&.unlink
end
def script_shell_encode(s)
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 3aca1bc24f35eb..e186c57745df07 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -2324,14 +2324,15 @@ vm_search_method_fastpath(VALUE cd_owner, struct rb_call_data *cd, VALUE klass)
return vm_search_method_slowpath0(cd_owner, cd, klass);
}
-static const struct rb_callcache *
+static const struct rb_callable_method_entry_struct *
vm_search_method(VALUE cd_owner, struct rb_call_data *cd, VALUE recv)
{
VALUE klass = CLASS_OF(recv);
VM_ASSERT(klass != Qfalse);
VM_ASSERT(RBASIC_CLASS(klass) == 0 || rb_obj_is_kind_of(klass, rb_cClass));
- return vm_search_method_fastpath(cd_owner, cd, klass);
+ const struct rb_callcache *cc = vm_search_method_fastpath(cd_owner, cd, klass);
+ return vm_cc_cme(cc);
}
#if __has_attribute(transparent_union)
@@ -2394,8 +2395,8 @@ static inline int
vm_method_cfunc_is(const rb_iseq_t *iseq, CALL_DATA cd, VALUE recv, cfunc_type func)
{
VM_ASSERT(iseq != NULL);
- const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
- return check_cfunc(vm_cc_cme(cc), func);
+ const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
+ return check_cfunc(cme, func);
}
#define check_cfunc(me, func) check_cfunc(me, make_cfunc_type(func))
@@ -6161,11 +6162,11 @@ vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
return recv;
}
- const struct rb_callcache *cc = vm_search_method((VALUE)iseq, cd, recv);
+ const struct rb_callable_method_entry_struct *cme = vm_search_method((VALUE)iseq, cd, recv);
switch (type) {
case T_SYMBOL:
- if (check_method_basic_definition(vm_cc_cme(cc))) {
+ if (check_method_basic_definition(cme)) {
// rb_sym_to_s() allocates a mutable string, but since we are only
// going to use this string for interpolation, it's fine to use the
// frozen string.
@@ -6174,7 +6175,7 @@ vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
break;
case T_MODULE:
case T_CLASS:
- if (check_cfunc(vm_cc_cme(cc), rb_mod_to_s)) {
+ if (check_cfunc(cme, rb_mod_to_s)) {
// rb_mod_to_s() allocates a mutable string, but since we are only
// going to use this string for interpolation, it's fine to use the
// frozen string.
@@ -6186,22 +6187,22 @@ vm_objtostring(const rb_iseq_t *iseq, VALUE recv, CALL_DATA cd)
}
break;
case T_NIL:
- if (check_cfunc(vm_cc_cme(cc), rb_nil_to_s)) {
+ if (check_cfunc(cme, rb_nil_to_s)) {
return rb_nil_to_s(recv);
}
break;
case T_TRUE:
- if (check_cfunc(vm_cc_cme(cc), rb_true_to_s)) {
+ if (check_cfunc(cme, rb_true_to_s)) {
return rb_true_to_s(recv);
}
break;
case T_FALSE:
- if (check_cfunc(vm_cc_cme(cc), rb_false_to_s)) {
+ if (check_cfunc(cme, rb_false_to_s)) {
return rb_false_to_s(recv);
}
break;
case T_FIXNUM:
- if (check_cfunc(vm_cc_cme(cc), rb_int_to_s)) {
+ if (check_cfunc(cme, rb_int_to_s)) {
return rb_fix_to_s(recv);
}
break;
diff --git a/vm_method.c b/vm_method.c
index 03fb79cddd0d87..fb217ef43de617 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -1822,6 +1822,25 @@ callable_method_entry_or_negative(VALUE klass, ID mid, VALUE *defined_class_ptr)
const rb_callable_method_entry_t *cme;
VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS);
+
+ /* Fast path: lock-free read from cache */
+ VALUE cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass));
+ if (cc_tbl) {
+ VALUE ccs_data;
+ if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) {
+ struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data;
+ VM_ASSERT(vm_ccs_p(ccs));
+
+ if (LIKELY(!METHOD_ENTRY_INVALIDATED(ccs->cme))) {
+ VM_ASSERT(ccs->cme->called_id == mid);
+ if (defined_class_ptr != NULL) *defined_class_ptr = ccs->cme->defined_class;
+ RB_DEBUG_COUNTER_INC(ccs_found);
+ return ccs->cme;
+ }
+ }
+ }
+
+ /* Slow path: need to lock and potentially populate cache */
RB_VM_LOCKING() {
cme = cached_callable_method_entry(klass, mid);
diff --git a/yjit/not_gmake.mk b/yjit/not_gmake.mk
index 3a2ca9281f9930..0d95d8ddf15947 100644
--- a/yjit/not_gmake.mk
+++ b/yjit/not_gmake.mk
@@ -12,21 +12,7 @@ yjit-static-lib:
$(Q) $(RUSTC) $(YJIT_RUSTC_ARGS)
# Assume GNU flavor LD and OBJCOPY. Works on FreeBSD 13, at least.
-$(YJIT_LIBOBJ): $(YJIT_LIBS)
+$(RUST_LIBOBJ): $(YJIT_LIBS)
$(ECHO) 'partial linking $(YJIT_LIBS) into $@'
$(Q) $(LD) -r -o $@ --whole-archive $(YJIT_LIBS)
-$(Q) $(OBJCOPY) --wildcard --keep-global-symbol='$(SYMBOL_PREFIX)rb_*' $(@)
-
-.PHONY: zjit-static-lib
-$(ZJIT_LIBS): zjit-static-lib
- $(empty)
-
-zjit-static-lib:
- $(ECHO) 'building Rust ZJIT (release mode)'
- $(Q) $(RUSTC) $(ZJIT_RUSTC_ARGS)
-
-# Assume GNU flavor LD and OBJCOPY. Works on FreeBSD 13, at least.
-$(ZJIT_LIBOBJ): $(ZJIT_LIBS)
- $(ECHO) 'partial linking $(ZJIT_LIBS) into $@'
- $(Q) $(LD) -r -o $@ --whole-archive $(ZJIT_LIBS)
- -$(Q) $(OBJCOPY) --wildcard --keep-global-symbol='$(SYMBOL_PREFIX)rb_*' $(@)
diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs
index 3b7742f16e8e8e..c60ec532856849 100644
--- a/zjit/src/backend/arm64/mod.rs
+++ b/zjit/src/backend/arm64/mod.rs
@@ -2059,4 +2059,93 @@ mod tests {
0x4: adds x1, x0, #1
"});
}
+
+ #[test]
+ fn test_reorder_c_args_no_cycle() {
+ crate::options::rb_zjit_prepare_options();
+ let (mut asm, mut cb) = setup_asm();
+
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[0], // mov x0, x0 (optimized away)
+ C_ARG_OPNDS[1], // mov x1, x1 (optimized away)
+ ]);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
+
+ assert_disasm!(cb, "100080d200023fd6", {"
+ 0x0: mov x16, #0
+ 0x4: blr x16
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_single_cycle() {
+ crate::options::rb_zjit_prepare_options();
+ let (mut asm, mut cb) = setup_asm();
+
+ // x0 and x1 form a cycle
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[1], // mov x0, x1
+ C_ARG_OPNDS[0], // mov x1, x0
+ C_ARG_OPNDS[2], // mov x2, x2 (optimized away)
+ ]);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
+
+ assert_disasm!(cb, "f00300aae00301aae10310aa100080d200023fd6", {"
+ 0x0: mov x16, x0
+ 0x4: mov x0, x1
+ 0x8: mov x1, x16
+ 0xc: mov x16, #0
+ 0x10: blr x16
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_two_cycles() {
+ crate::options::rb_zjit_prepare_options();
+ let (mut asm, mut cb) = setup_asm();
+
+ // x0 and x1 form a cycle, and x2 and rcx form another cycle
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[1], // mov x0, x1
+ C_ARG_OPNDS[0], // mov x1, x0
+ C_ARG_OPNDS[3], // mov x2, rcx
+ C_ARG_OPNDS[2], // mov rcx, x2
+ ]);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
+
+ assert_disasm!(cb, "f00302aae20303aae30310aaf00300aae00301aae10310aa100080d200023fd6", {"
+ 0x0: mov x16, x2
+ 0x4: mov x2, x3
+ 0x8: mov x3, x16
+ 0xc: mov x16, x0
+ 0x10: mov x0, x1
+ 0x14: mov x1, x16
+ 0x18: mov x16, #0
+ 0x1c: blr x16
+ "});
+ }
+
+ #[test]
+ fn test_reorder_c_args_large_cycle() {
+ crate::options::rb_zjit_prepare_options();
+ let (mut asm, mut cb) = setup_asm();
+
+ // x0, x1, and x2 form a cycle
+ asm.ccall(0 as _, vec![
+ C_ARG_OPNDS[1], // mov x0, x1
+ C_ARG_OPNDS[2], // mov x1, x2
+ C_ARG_OPNDS[0], // mov x2, x0
+ ]);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
+
+ assert_disasm!(cb, "f00300aae00301aae10302aae20310aa100080d200023fd6", {"
+ 0x0: mov x16, x0
+ 0x4: mov x0, x1
+ 0x8: mov x1, x2
+ 0xc: mov x2, x16
+ 0x10: mov x16, #0
+ 0x14: blr x16
+ "});
+ }
+
}
diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs
index be5bda052d1a5d..1bb4cd024b5295 100644
--- a/zjit/src/backend/lir.rs
+++ b/zjit/src/backend/lir.rs
@@ -1202,7 +1202,7 @@ impl Assembler
/// Append an instruction onto the current list of instructions and update
/// the live ranges of any instructions whose outputs are being used as
/// operands to this instruction.
- pub fn push_insn(&mut self, mut insn: Insn) {
+ pub fn push_insn(&mut self, insn: Insn) {
// Index of this instruction
let insn_idx = self.insns.len();
@@ -1214,7 +1214,7 @@ impl Assembler
}
// If we find any VReg from previous instructions, extend the live range to insn_idx
- let mut opnd_iter = insn.opnd_iter_mut();
+ let mut opnd_iter = insn.opnd_iter();
while let Some(opnd) = opnd_iter.next() {
match *opnd {
Opnd::VReg { idx, .. } |
@@ -1380,13 +1380,15 @@ impl Assembler
}
}
- // If the output VReg of this instruction is used by another instruction,
- // we need to allocate a register to it
+ // Allocate a register for the output operand if it exists
let vreg_idx = match insn.out_opnd() {
Some(Opnd::VReg { idx, .. }) => Some(*idx),
_ => None,
};
- if vreg_idx.is_some() && live_ranges[vreg_idx.unwrap()].end() != index {
+ if vreg_idx.is_some() {
+ if live_ranges[vreg_idx.unwrap()].end() == index {
+ debug!("Allocating a register for VReg({}) at instruction index {} even though it does not live past this index", vreg_idx.unwrap(), index);
+ }
// This is going to be the output operand that we will set on the
// instruction. CCall and LiveReg need to use a specific register.
let mut out_reg = match insn {
@@ -1466,6 +1468,18 @@ impl Assembler
}
}
+ // If we have an output that dies at its definition (it is unused), free up the
+ // register
+ if let Some(idx) = vreg_idx {
+ if live_ranges[idx].end() == index {
+ if let Some(reg) = reg_mapping[idx] {
+ pool.dealloc_reg(®);
+ } else {
+ unreachable!("no register allocated for insn {:?}", insn);
+ }
+ }
+ }
+
// Push instruction(s)
let is_ccall = matches!(insn, Insn::CCall { .. });
match insn {
diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs
index f15b32f9462ccb..2a02e1b725db14 100644
--- a/zjit/src/backend/x86_64/mod.rs
+++ b/zjit/src/backend/x86_64/mod.rs
@@ -1247,13 +1247,14 @@ mod tests {
#[test]
fn test_reorder_c_args_no_cycle() {
+ crate::options::rb_zjit_prepare_options();
let (mut asm, mut cb) = setup_asm();
asm.ccall(0 as _, vec![
C_ARG_OPNDS[0], // mov rdi, rdi (optimized away)
C_ARG_OPNDS[1], // mov rsi, rsi (optimized away)
]);
- asm.compile_with_num_regs(&mut cb, 0);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
assert_disasm!(cb, "b800000000ffd0", {"
0x0: mov eax, 0
@@ -1263,6 +1264,7 @@ mod tests {
#[test]
fn test_reorder_c_args_single_cycle() {
+ crate::options::rb_zjit_prepare_options();
let (mut asm, mut cb) = setup_asm();
// rdi and rsi form a cycle
@@ -1271,7 +1273,7 @@ mod tests {
C_ARG_OPNDS[0], // mov rsi, rdi
C_ARG_OPNDS[2], // mov rdx, rdx (optimized away)
]);
- asm.compile_with_num_regs(&mut cb, 0);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
assert_disasm!(cb, "4989f34889fe4c89dfb800000000ffd0", {"
0x0: mov r11, rsi
@@ -1284,6 +1286,7 @@ mod tests {
#[test]
fn test_reorder_c_args_two_cycles() {
+ crate::options::rb_zjit_prepare_options();
let (mut asm, mut cb) = setup_asm();
// rdi and rsi form a cycle, and rdx and rcx form another cycle
@@ -1293,7 +1296,7 @@ mod tests {
C_ARG_OPNDS[3], // mov rdx, rcx
C_ARG_OPNDS[2], // mov rcx, rdx
]);
- asm.compile_with_num_regs(&mut cb, 0);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
assert_disasm!(cb, "4989f34889fe4c89df4989cb4889d14c89dab800000000ffd0", {"
0x0: mov r11, rsi
@@ -1309,6 +1312,7 @@ mod tests {
#[test]
fn test_reorder_c_args_large_cycle() {
+ crate::options::rb_zjit_prepare_options();
let (mut asm, mut cb) = setup_asm();
// rdi, rsi, and rdx form a cycle
@@ -1317,7 +1321,7 @@ mod tests {
C_ARG_OPNDS[2], // mov rsi, rdx
C_ARG_OPNDS[0], // mov rdx, rdi
]);
- asm.compile_with_num_regs(&mut cb, 0);
+ asm.compile_with_num_regs(&mut cb, ALLOC_REGS.len());
assert_disasm!(cb, "4989f34889d64889fa4c89dfb800000000ffd0", {"
0x0: mov r11, rsi
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index f502801aff8be2..0549365666e5b7 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -1,6 +1,7 @@
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use std::ffi::{c_int, c_long, c_void};
+use std::slice;
use crate::asm::Label;
use crate::backend::current::{Reg, ALLOC_REGS};
@@ -1348,11 +1349,17 @@ fn local_idx_to_ep_offset(iseq: IseqPtr, local_idx: usize) -> i32 {
local_size_and_idx_to_ep_offset(local_size as usize, local_idx)
}
-/// Convert the number of locals and a local index to an offset in the EP
+/// Convert the number of locals and a local index to an offset from the EP
pub fn local_size_and_idx_to_ep_offset(local_size: usize, local_idx: usize) -> i32 {
local_size as i32 - local_idx as i32 - 1 + VM_ENV_DATA_SIZE as i32
}
+/// Convert the number of locals and a local index to an offset from the BP.
+/// We don't move the SP register after entry, so we often use SP as BP.
+pub fn local_size_and_idx_to_bp_offset(local_size: usize, local_idx: usize) -> i32 {
+ local_size_and_idx_to_ep_offset(local_size, local_idx) + 1
+}
+
/// Convert ISEQ into High-level IR
fn compile_iseq(iseq: IseqPtr) -> Option {
let mut function = match iseq_to_hir(iseq) {
@@ -1448,26 +1455,41 @@ c_callable! {
/// This function is expected to be called repeatedly when ZJIT fails to compile the stub.
/// We should be able to compile most (if not all) function stubs by side-exiting at unsupported
/// instructions, so this should be used primarily for cb.has_dropped_bytes() situations.
- fn function_stub_hit(iseq_call_ptr: *const c_void, ec: EcPtr, sp: *mut VALUE) -> *const u8 {
+ fn function_stub_hit(iseq_call_ptr: *const c_void, cfp: CfpPtr, sp: *mut VALUE) -> *const u8 {
with_vm_lock(src_loc!(), || {
- // gen_push_frame() doesn't set PC and SP, so we need to set them before exit.
+ // gen_push_frame() doesn't set PC, so we need to set them before exit.
// function_stub_hit_body() may allocate and call gc_validate_pc(), so we always set PC.
let iseq_call = unsafe { Rc::from_raw(iseq_call_ptr as *const RefCell) };
- let cfp = unsafe { get_ec_cfp(ec) };
- let pc = unsafe { rb_iseq_pc_at_idx(iseq_call.borrow().iseq, 0) }; // TODO: handle opt_pc once supported
+ let iseq = iseq_call.borrow().iseq;
+ let pc = unsafe { rb_iseq_pc_at_idx(iseq, 0) }; // TODO: handle opt_pc once supported
unsafe { rb_set_cfp_pc(cfp, pc) };
- unsafe { rb_set_cfp_sp(cfp, sp) };
+
+ // JIT-to-JIT calls don't set SP or fill nils to uninitialized (non-argument) locals.
+ // We need to set them if we side-exit from function_stub_hit.
+ fn spill_stack(iseq: IseqPtr, cfp: CfpPtr, sp: *mut VALUE) {
+ unsafe {
+ // Set SP which gen_push_frame() doesn't set
+ rb_set_cfp_sp(cfp, sp);
+
+ // Fill nils to uninitialized (non-argument) locals
+ let local_size = get_iseq_body_local_table_size(iseq) as usize;
+ let num_params = get_iseq_body_param_size(iseq) as usize;
+ let base = sp.offset(-local_size_and_idx_to_bp_offset(local_size, num_params) as isize);
+ slice::from_raw_parts_mut(base, local_size - num_params).fill(Qnil);
+ }
+ }
// If we already know we can't compile the ISEQ, fail early without cb.mark_all_executable().
// TODO: Alan thinks the payload status part of this check can happen without the VM lock, since the whole
// code path can be made read-only. But you still need the check as is while holding the VM lock in any case.
let cb = ZJITState::get_code_block();
- let payload = get_or_create_iseq_payload(iseq_call.borrow().iseq);
+ let payload = get_or_create_iseq_payload(iseq);
if cb.has_dropped_bytes() || payload.status == IseqStatus::CantCompile {
// We'll use this Rc again, so increment the ref count decremented by from_raw.
unsafe { Rc::increment_strong_count(iseq_call_ptr as *const RefCell); }
// Exit to the interpreter
+ spill_stack(iseq, cfp, sp);
return ZJITState::get_exit_trampoline().raw_ptr(cb);
}
@@ -1477,6 +1499,7 @@ c_callable! {
code_ptr
} else {
// Exit to the interpreter
+ spill_stack(iseq, cfp, sp);
ZJITState::get_exit_trampoline()
};
cb.mark_all_executable();
@@ -1540,7 +1563,7 @@ pub fn gen_function_stub_hit_trampoline(cb: &mut CodeBlock) -> Option {
const { assert!(ALLOC_REGS.len() % 2 == 0, "x86_64 would need to push one more if we push an odd number of regs"); }
// Compile the stubbed ISEQ
- let jump_addr = asm_ccall!(asm, function_stub_hit, SCRATCH_OPND, EC, SP);
+ let jump_addr = asm_ccall!(asm, function_stub_hit, SCRATCH_OPND, CFP, SP);
asm.mov(SCRATCH_OPND, jump_addr);
asm_comment!(asm, "restore argument registers");