From d2c30a3bae908772c1de453aad8686000f6a5096 Mon Sep 17 00:00:00 2001 From: Luke Gruber Date: Mon, 10 Nov 2025 20:32:30 -0500 Subject: [PATCH 01/10] Fix `thread_sched_wait_events` race (#15067) This race condition was found when calling `Thread#join` with a timeout inside a ractor. The race is between the polling thread waking up the thread and the `ubf` getting called (`ubf_event_waiting`). The error was that the ubf or polling thread would set the thread as ready, but then the other function would do the same. Fixes [Bug #21614] --- thread_pthread.c | 32 ++++++++++--------- thread_pthread_mn.c | 76 +++++++++++++++++++++++++-------------------- 2 files changed, 60 insertions(+), 48 deletions(-) diff --git a/thread_pthread.c b/thread_pthread.c index 64912a58dacfff..2eaa407f10ca94 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -2592,16 +2592,14 @@ ubf_threads_empty(void) static void ubf_wakeup_all_threads(void) { - if (!ubf_threads_empty()) { - rb_thread_t *th; - rb_native_mutex_lock(&ubf_list_lock); - { - ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) { - ubf_wakeup_thread(th); - } + rb_thread_t *th; + rb_native_mutex_lock(&ubf_list_lock); + { + ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) { + ubf_wakeup_thread(th); } - rb_native_mutex_unlock(&ubf_list_lock); } + rb_native_mutex_unlock(&ubf_list_lock); } #else /* USE_UBF_LIST */ @@ -2995,6 +2993,17 @@ timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now) return NULL; } +static void +timer_thread_wakeup_thread_locked(struct rb_thread_sched *sched, rb_thread_t *th) +{ + if (sched->running != th) { + thread_sched_to_ready_common(sched, th, true, false); + } + else { + // will be release the execution right + } +} + static void timer_thread_wakeup_thread(rb_thread_t *th) { @@ -3003,12 +3012,7 @@ timer_thread_wakeup_thread(rb_thread_t *th) thread_sched_lock(sched, th); { - if (sched->running != th) { - thread_sched_to_ready_common(sched, th, true, false); - } - else { - // will be release the execution right - } + timer_thread_wakeup_thread_locked(sched, th); } thread_sched_unlock(sched, th); } diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c index f3451eb3531328..8100fd534e461e 100644 --- a/thread_pthread_mn.c +++ b/thread_pthread_mn.c @@ -3,26 +3,25 @@ #if USE_MN_THREADS static void timer_thread_unregister_waiting(rb_thread_t *th, int fd, enum thread_sched_waiting_flag flags); +static void timer_thread_wakeup_thread_locked(struct rb_thread_sched *sched, rb_thread_t *th); static bool timer_thread_cancel_waiting(rb_thread_t *th) { bool canceled = false; - if (th->sched.waiting_reason.flags) { - rb_native_mutex_lock(&timer_th.waiting_lock); - { - if (th->sched.waiting_reason.flags) { - canceled = true; - ccan_list_del_init(&th->sched.waiting_reason.node); - if (th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write)) { - timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags); - } - th->sched.waiting_reason.flags = thread_sched_waiting_none; + rb_native_mutex_lock(&timer_th.waiting_lock); + { + if (th->sched.waiting_reason.flags) { + canceled = true; + ccan_list_del_init(&th->sched.waiting_reason.node); + if (th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write)) { + timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags); } + th->sched.waiting_reason.flags = thread_sched_waiting_none; } - rb_native_mutex_unlock(&timer_th.waiting_lock); } + rb_native_mutex_unlock(&timer_th.waiting_lock); return canceled; } @@ -41,10 +40,10 @@ ubf_event_waiting(void *ptr) th->unblock.func = NULL; th->unblock.arg = NULL; - bool canceled = timer_thread_cancel_waiting(th); - thread_sched_lock(sched, th); { + bool canceled = timer_thread_cancel_waiting(th); + if (sched->running == th) { RUBY_DEBUG_LOG("not waiting yet"); } @@ -68,16 +67,19 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd, volatile bool timedout = false, need_cancel = false; - if (timer_thread_register_waiting(th, fd, events, rel)) { - RUBY_DEBUG_LOG("wait fd:%d", fd); + if (ubf_set(th, ubf_event_waiting, (void *)th)) { + return false; + } + + thread_sched_lock(sched, th); + { + if (timer_thread_register_waiting(th, fd, events, rel)) { + RUBY_DEBUG_LOG("wait fd:%d", fd); - RB_VM_SAVE_MACHINE_CONTEXT(th); - ubf_set(th, ubf_event_waiting, (void *)th); + RB_VM_SAVE_MACHINE_CONTEXT(th); - RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th); + RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th); - thread_sched_lock(sched, th); - { if (th->sched.waiting_reason.flags == thread_sched_waiting_none) { // already awaken } @@ -95,21 +97,21 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd, } timedout = th->sched.waiting_reason.data.result == 0; - } - thread_sched_unlock(sched, th); - - if (need_cancel) { - timer_thread_cancel_waiting(th); - } - ubf_clear(th); // TODO: maybe it is already NULL? + if (need_cancel) { + timer_thread_cancel_waiting(th); + } - th->status = THREAD_RUNNABLE; - } - else { - RUBY_DEBUG_LOG("can not wait fd:%d", fd); - return false; + th->status = THREAD_RUNNABLE; + } + else { + RUBY_DEBUG_LOG("can not wait fd:%d", fd); + timedout = false; + } } + thread_sched_unlock(sched, th); + + ubf_clear(th); // TODO: maybe it is already NULL? VM_ASSERT(sched->running == th); @@ -972,6 +974,8 @@ timer_thread_polling(rb_vm_t *vm) (filter == EVFILT_READ) ? "read/" : "", (filter == EVFILT_WRITE) ? "write/" : ""); + struct rb_thread_sched *sched = TH_SCHED(th); + thread_sched_lock(sched, th); rb_native_mutex_lock(&timer_th.waiting_lock); { if (th->sched.waiting_reason.flags) { @@ -983,13 +987,14 @@ timer_thread_polling(rb_vm_t *vm) th->sched.waiting_reason.data.fd = -1; th->sched.waiting_reason.data.result = filter; - timer_thread_wakeup_thread(th); + timer_thread_wakeup_thread_locked(sched, th); } else { // already released } } rb_native_mutex_unlock(&timer_th.waiting_lock); + thread_sched_unlock(sched, th); } } #else @@ -1014,6 +1019,8 @@ timer_thread_polling(rb_vm_t *vm) (events & EPOLLERR) ? "err/" : "", (events & EPOLLHUP) ? "hup/" : ""); + struct rb_thread_sched *sched = TH_SCHED(th); + thread_sched_lock(sched, th); rb_native_mutex_lock(&timer_th.waiting_lock); { if (th->sched.waiting_reason.flags) { @@ -1025,13 +1032,14 @@ timer_thread_polling(rb_vm_t *vm) th->sched.waiting_reason.data.fd = -1; th->sched.waiting_reason.data.result = (int)events; - timer_thread_wakeup_thread(th); + timer_thread_wakeup_thread_locked(sched, th); } else { // already released } } rb_native_mutex_unlock(&timer_th.waiting_lock); + thread_sched_unlock(sched, th); } } #endif From 16c6f36039b14c983125db8144d791714035737b Mon Sep 17 00:00:00 2001 From: Luke Gruber Date: Mon, 10 Nov 2025 20:33:07 -0500 Subject: [PATCH 02/10] [DOC] Clarify `Thread#kill` documentation. (#15132) Mention that it is asynchronous and that the killed thread can still run a small amount of ruby code before exiting. --- thread.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/thread.c b/thread.c index 0419fdf0544288..5d75bf41228d3c 100644 --- a/thread.c +++ b/thread.c @@ -2935,7 +2935,10 @@ thread_raise_m(int argc, VALUE *argv, VALUE self) * * Terminates +thr+ and schedules another thread to be run, returning * the terminated Thread. If this is the main thread, or the last - * thread, exits the process. + * thread, exits the process. Note that the caller does not wait for + * the thread to terminate if the receiver is different from the currently + * running thread. The termination is asynchronous, and the thread can still + * run a small amount of ruby code before exiting. */ VALUE From 148fde27545ee35c8aab4ec7bca027184d79fbc4 Mon Sep 17 00:00:00 2001 From: Luke Gruber Date: Mon, 10 Nov 2025 21:52:43 -0500 Subject: [PATCH 03/10] Revert "ns_subclasses refcount accesses need to be atomic (#15083)" (#15138) This reverts commit 2998c8d6b99ec49925ebea42198b29c3e27b34a7. We need to find a better way to fix this bug. Even with this refcount change, errors were still being seen in CI. For now we need to remove this failing test. --- class.c | 2 +- internal/class.h | 12 ++++++------ test/ruby/test_class.rb | 15 --------------- 3 files changed, 7 insertions(+), 22 deletions(-) diff --git a/class.c b/class.c index 550e5422f5655b..b4b0f556cefbed 100644 --- a/class.c +++ b/class.c @@ -671,7 +671,7 @@ rb_class_classext_free_subclasses(rb_classext_t *ext, VALUE klass, bool replacin } VM_ASSERT( rb_box_subclasses_ref_count(anchor->box_subclasses) > 0, - "box_subclasses refcount (%p) %d", anchor->box_subclasses, rb_box_subclasses_ref_count(anchor->box_subclasses)); + "box_subclasses refcount (%lp) %d", anchor->box_subclasses, rb_box_subclasses_ref_count(anchor->box_subclasses)); st_delete(tbl, &box_id, NULL); rb_box_subclasses_ref_dec(anchor->box_subclasses); xfree(anchor); diff --git a/internal/class.h b/internal/class.h index d4306fc84d9f6a..f122d2f189c580 100644 --- a/internal/class.h +++ b/internal/class.h @@ -28,29 +28,29 @@ #endif struct rb_box_subclasses { - rb_atomic_t refcount; + long refcount; struct st_table *tbl; }; typedef struct rb_box_subclasses rb_box_subclasses_t; -static inline rb_atomic_t +static inline long rb_box_subclasses_ref_count(rb_box_subclasses_t *box_sub) { - return ATOMIC_LOAD_RELAXED(box_sub->refcount); + return box_sub->refcount; } static inline rb_box_subclasses_t * rb_box_subclasses_ref_inc(rb_box_subclasses_t *box_sub) { - RUBY_ATOMIC_FETCH_ADD(box_sub->refcount, 1); + box_sub->refcount++; return box_sub; } static inline void rb_box_subclasses_ref_dec(rb_box_subclasses_t *box_sub) { - rb_atomic_t was = RUBY_ATOMIC_FETCH_SUB(box_sub->refcount, 1); - if (was == 1) { + box_sub->refcount--; + if (box_sub->refcount == 0) { st_free_table(box_sub->tbl); xfree(box_sub); } diff --git a/test/ruby/test_class.rb b/test/ruby/test_class.rb index cb05751da16c16..f40817e7a1ef54 100644 --- a/test/ruby/test_class.rb +++ b/test/ruby/test_class.rb @@ -887,19 +887,4 @@ def test_method_table_assignment_just_after_class_init class C; end end; end - - def test_subclasses_refcount_in_ractors - assert_ractor "#{<<~"begin;"}\n#{<<~'end;'}" - begin; - rs = [] - 8.times do - rs << Ractor.new do - 5_000.times do - Class.new - end - end - end - rs.each(&:join) - end; - end end From fafecb43c527bca8a3493436c16aa7d63782bb0c Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Mon, 10 Nov 2025 22:21:10 -0500 Subject: [PATCH 04/10] Fix printf specificer. %lp doesn't make sense. Triggered -Wformat --- class.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/class.c b/class.c index b4b0f556cefbed..34bc7d100538e7 100644 --- a/class.c +++ b/class.c @@ -671,7 +671,7 @@ rb_class_classext_free_subclasses(rb_classext_t *ext, VALUE klass, bool replacin } VM_ASSERT( rb_box_subclasses_ref_count(anchor->box_subclasses) > 0, - "box_subclasses refcount (%lp) %d", anchor->box_subclasses, rb_box_subclasses_ref_count(anchor->box_subclasses)); + "box_subclasses refcount (%p) %ld", anchor->box_subclasses, rb_box_subclasses_ref_count(anchor->box_subclasses)); st_delete(tbl, &box_id, NULL); rb_box_subclasses_ref_dec(anchor->box_subclasses); xfree(anchor); From 6e6f5d3c32a709c891ac6aa7833376907a6c81b5 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Thu, 1 May 2025 19:07:50 +0900 Subject: [PATCH 05/10] Add test for [Bug #21265] The crash was fixed by a4dff09be79b52288a47658964d25e5aa84fc960 ("Fix resolving refined module-defined method"). I had a patch for this around for a few months but never merged it. Oops! --- test/ruby/test_refinement.rb | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/ruby/test_refinement.rb b/test/ruby/test_refinement.rb index bd61ccfe1d579a..bdc6667b8e81fe 100644 --- a/test/ruby/test_refinement.rb +++ b/test/ruby/test_refinement.rb @@ -2724,6 +2724,18 @@ def test_refined_module_method assert_equal :qux, meth.name end + def test_symbol_proc_from_using_scope + # assert_separately to contain the side effects of refining Kernel + assert_separately([], <<~RUBY) + class RefinedScope + using(Module.new { refine(Kernel) { def itself = 0 } }) + ITSELF = :itself.to_proc + end + + assert_equal(1, RefinedScope::ITSELF[1], "[Bug #21265]") + RUBY + end + private def eval_using(mod, s) From d9f0b5a5f91b03ab5043029b4ae19847e965aa1a Mon Sep 17 00:00:00 2001 From: Takashi Kokubun Date: Mon, 10 Nov 2025 20:07:35 -0800 Subject: [PATCH 06/10] ZJIT: Set cfp->sp on leaf calls with GC (#15137) Co-authored-by: Randy Stauner --- test/ruby/test_zjit.rb | 2 -- zjit/src/codegen.rs | 26 +++++++++++++++++++++----- zjit/src/hir.rs | 7 +++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb index ae1af5c2c038b6..88580d690ee815 100644 --- a/test/ruby/test_zjit.rb +++ b/test/ruby/test_zjit.rb @@ -3165,8 +3165,6 @@ def test(define) end def test_regression_cfp_sp_set_correctly_before_leaf_gc_call - omit 'reproduction for known, unresolved ZJIT bug' - assert_compiles ':ok', %q{ def check(l, r) return 1 unless l diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs index 5047de934da28b..f27e61831b9c1f 100644 --- a/zjit/src/codegen.rs +++ b/zjit/src/codegen.rs @@ -1851,6 +1851,8 @@ fn gen_incr_send_fallback_counter(asm: &mut Assembler, reason: SendFallbackReaso /// /// Unlike YJIT, we don't need to save the stack slots to protect them from GC /// because the backend spills all live registers onto the C stack on CCall. +/// However, to avoid marking uninitialized stack slots, this also updates SP, +/// which may have cfp->sp for a past frame or a past non-leaf call. fn gen_prepare_call_with_gc(asm: &mut Assembler, state: &FrameState, leaf: bool) { let opcode: usize = state.get_opcode().try_into().unwrap(); let next_pc: *const VALUE = unsafe { state.pc.offset(insn_len(opcode) as isize) }; @@ -1859,13 +1861,27 @@ fn gen_prepare_call_with_gc(asm: &mut Assembler, state: &FrameState, leaf: bool) asm_comment!(asm, "save PC to CFP"); asm.mov(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_PC), Opnd::const_ptr(next_pc)); + gen_save_sp(asm, state.stack_size()); if leaf { asm.expect_leaf_ccall(state.stack_size()); } } fn gen_prepare_leaf_call_with_gc(asm: &mut Assembler, state: &FrameState) { - gen_prepare_call_with_gc(asm, state, true); + // In gen_prepare_call_with_gc(), we update cfp->sp for leaf calls too. + // + // Here, cfp->sp may be pointing to either of the following: + // 1. cfp->sp for a past frame, which gen_push_frame() skips to initialize + // 2. cfp->sp set by gen_prepare_non_leaf_call() for the current frame + // + // When (1), to avoid marking dead objects, we need to set cfp->sp for the current frame. + // When (2), setting cfp->sp at gen_push_frame() and not updating cfp->sp here could lead to + // keeping objects longer than it should, so we set cfp->sp at every call of this function. + // + // We use state.without_stack() to pass stack_size=0 to gen_save_sp() because we don't write + // VM stack slots on leaf calls, which leaves those stack slots uninitialized. ZJIT keeps + // live objects on the C stack, so they are protected from GC properly. + gen_prepare_call_with_gc(asm, &state.without_stack(), true); } /// Save the current SP on the CFP @@ -1907,11 +1923,11 @@ fn gen_spill_stack(jit: &JITState, asm: &mut Assembler, state: &FrameState) { fn gen_prepare_non_leaf_call(jit: &JITState, asm: &mut Assembler, state: &FrameState) { // TODO: Lazily materialize caller frames when needed // Save PC for backtraces and allocation tracing + // and SP to avoid marking uninitialized stack slots gen_prepare_call_with_gc(asm, state, false); - // Save SP and spill the virtual stack in case it raises an exception + // Spill the virtual stack in case it raises an exception // and the interpreter uses the stack for handling the exception - gen_save_sp(asm, state.stack().len()); gen_spill_stack(jit, asm, state); // Spill locals in case the method looks at caller Bindings @@ -1958,8 +1974,8 @@ fn gen_push_frame(asm: &mut Assembler, argc: usize, state: &FrameState, frame: C asm_comment!(asm, "push callee control frame"); if let Some(iseq) = frame.iseq { - // cfp_opnd(RUBY_OFFSET_CFP_PC): written by the callee frame on side-exits or non-leaf calls - // cfp_opnd(RUBY_OFFSET_CFP_SP): written by the callee frame on side-exits or non-leaf calls + // cfp_opnd(RUBY_OFFSET_CFP_PC): written by the callee frame on side-exits, non-leaf calls, or calls with GC + // cfp_opnd(RUBY_OFFSET_CFP_SP): written by the callee frame on side-exits, non-leaf calls, or calls with GC asm.mov(cfp_opnd(RUBY_OFFSET_CFP_ISEQ), VALUE::from(iseq).into()); } else { // C frames don't have a PC and ISEQ in normal operation. diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs index 484063afd582c0..b27ccd13454938 100644 --- a/zjit/src/hir.rs +++ b/zjit/src/hir.rs @@ -4153,6 +4153,13 @@ impl FrameState { state.locals.clear(); state } + + /// Return itself without stack. Used by leaf calls with GC to reset SP to the base pointer. + pub fn without_stack(&self) -> Self { + let mut state = self.clone(); + state.stack.clear(); + state + } } /// Print adaptor for [`FrameState`]. See [`PtrPrintMap`]. From 71fecfa205f5ee2423f78877cca3a60bd07cbfd8 Mon Sep 17 00:00:00 2001 From: Randy Stauner Date: Mon, 10 Nov 2025 21:10:10 -0700 Subject: [PATCH 07/10] ZJIT: Rename things so that they aren't named "not_optimized_optimized" (#15135) These refer to "OptimizedMethodType" which is a subcategory of "MethodType::Optimized" so name them after the latter to avoid "not_optimized_optimized". --- zjit/src/codegen.rs | 2 +- zjit/src/hir.rs | 4 ++-- zjit/src/stats.rs | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs index f27e61831b9c1f..0a19035dc1584f 100644 --- a/zjit/src/codegen.rs +++ b/zjit/src/codegen.rs @@ -1835,7 +1835,7 @@ fn gen_incr_send_fallback_counter(asm: &mut Assembler, reason: SendFallbackReaso SendWithoutBlockNotOptimizedMethodType(method_type) => { gen_incr_counter(asm, send_without_block_fallback_counter_for_method_type(method_type)); } - SendWithoutBlockNotOptimizedOptimizedMethodType(method_type) => { + SendWithoutBlockNotOptimizedMethodTypeOptimized(method_type) => { gen_incr_counter(asm, send_without_block_fallback_counter_for_optimized_method_type(method_type)); } SendNotOptimizedMethodType(method_type) => { diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs index b27ccd13454938..9b4495c6e6aeec 100644 --- a/zjit/src/hir.rs +++ b/zjit/src/hir.rs @@ -592,7 +592,7 @@ pub enum SendFallbackReason { SendWithoutBlockCfuncNotVariadic, SendWithoutBlockCfuncArrayVariadic, SendWithoutBlockNotOptimizedMethodType(MethodType), - SendWithoutBlockNotOptimizedOptimizedMethodType(OptimizedMethodType), + SendWithoutBlockNotOptimizedMethodTypeOptimized(OptimizedMethodType), SendWithoutBlockDirectTooManyArgs, SendPolymorphic, SendMegamorphic, @@ -2534,7 +2534,7 @@ impl Function { }; self.make_equal_to(insn_id, replacement); } else { - self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedOptimizedMethodType(OptimizedMethodType::from(opt_type))); + self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodTypeOptimized(OptimizedMethodType::from(opt_type))); self.push_insn_id(block, insn_id); continue; } } else { diff --git a/zjit/src/stats.rs b/zjit/src/stats.rs index ec52662086e642..6fd0ac7bb02232 100644 --- a/zjit/src/stats.rs +++ b/zjit/src/stats.rs @@ -178,7 +178,7 @@ make_counters! { send_fallback_send_without_block_cfunc_not_variadic, send_fallback_send_without_block_cfunc_array_variadic, send_fallback_send_without_block_not_optimized_method_type, - send_fallback_send_without_block_not_optimized_optimized_method_type, + send_fallback_send_without_block_not_optimized_method_type_optimized, send_fallback_send_without_block_direct_too_many_args, send_fallback_send_polymorphic, send_fallback_send_megamorphic, @@ -471,8 +471,8 @@ pub fn send_fallback_counter(reason: crate::hir::SendFallbackReason) -> Counter SendWithoutBlockCfuncNotVariadic => send_fallback_send_without_block_cfunc_not_variadic, SendWithoutBlockCfuncArrayVariadic => send_fallback_send_without_block_cfunc_array_variadic, SendWithoutBlockNotOptimizedMethodType(_) => send_fallback_send_without_block_not_optimized_method_type, - SendWithoutBlockNotOptimizedOptimizedMethodType(_) - => send_fallback_send_without_block_not_optimized_optimized_method_type, + SendWithoutBlockNotOptimizedMethodTypeOptimized(_) + => send_fallback_send_without_block_not_optimized_method_type_optimized, SendWithoutBlockDirectTooManyArgs => send_fallback_send_without_block_direct_too_many_args, SendPolymorphic => send_fallback_send_polymorphic, SendMegamorphic => send_fallback_send_megamorphic, From d3138912b8019591573c671662c1a2d9930aa034 Mon Sep 17 00:00:00 2001 From: Aaron Patterson Date: Mon, 3 Nov 2025 10:29:06 -0800 Subject: [PATCH 08/10] [ruby/rubygems] build gems directly instead of shelling out I'm trying to speed up the bundler tests. The tests shell out a lot in order to build gems. We can build gems without creating a sub-process. This change reduced the test suite time from ~24 minutes, to about ~21 minutes on my machine. Once we have more of these "asset generation" routines done in the same process, I think we can start caching the outputs for further improvements https://github.com/ruby/rubygems/commit/ebf27056c6 --- spec/bundler/quality_spec.rb | 4 ++-- spec/bundler/support/builders.rb | 32 ++++++++++++++++++++++---------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/spec/bundler/quality_spec.rb b/spec/bundler/quality_spec.rb index 34e6c26272346d..bbd6517f21dbea 100644 --- a/spec/bundler/quality_spec.rb +++ b/spec/bundler/quality_spec.rb @@ -184,8 +184,8 @@ def check_for_specific_pronouns(filename) end it "can still be built" do - with_built_bundler do |_gem_path| - expect(err).to be_empty, "bundler should build as a gem without warnings, but\n#{err}" + with_built_bundler do |gem_path| + expect(File.exist?(gem_path)).to be true end end diff --git a/spec/bundler/support/builders.rb b/spec/bundler/support/builders.rb index 3ebb7e386490bf..ef525931938bc7 100644 --- a/spec/bundler/support/builders.rb +++ b/spec/bundler/support/builders.rb @@ -2,6 +2,8 @@ require "bundler/shared_helpers" require "shellwords" +require "fileutils" +require "rubygems/package" require_relative "build_metadata" @@ -423,21 +425,30 @@ def required_ruby_version=(*reqs) end class BundlerBuilder - attr_writer :required_ruby_version + SPEC_FILE = File.join File.dirname(__FILE__), "..", "..", "bundler.gemspec" + SPEC = Gem::Specification.load(SPEC_FILE) def initialize(context, name, version) raise "can only build bundler" unless name == "bundler" @context = context - @version = version || Bundler::VERSION + @spec = SPEC.dup + @spec.version = version || Bundler::VERSION + end + + def required_ruby_version + @spec.required_ruby_version + end + + def required_ruby_version=(x) + @spec.required_ruby_version = x end def _build(options = {}) - full_name = "bundler-#{@version}" + full_name = "bundler-#{@spec.version}" build_path = (options[:build_path] || @context.tmp) + full_name bundler_path = build_path + "#{full_name}.gem" - require "fileutils" FileUtils.mkdir_p build_path @context.shipped_files.each do |shipped_file| @@ -449,13 +460,14 @@ def _build(options = {}) FileUtils.cp File.expand_path(shipped_file, @context.source_root), target_shipped_file, preserve: true end - @context.replace_version_file(@version, dir: build_path) - @context.replace_changelog(@version, dir: build_path) if options[:released] - @context.replace_required_ruby_version(@required_ruby_version, dir: build_path) if @required_ruby_version + @context.replace_version_file(@spec.version, dir: build_path) + @context.replace_changelog(@spec.version, dir: build_path) if options[:released] - Spec::BuildMetadata.write_build_metadata(dir: build_path, version: @version) + Spec::BuildMetadata.write_build_metadata(dir: build_path, version: @spec.version.to_s) - @context.gem_command "build #{@context.relative_gemspec}", dir: build_path + Dir.chdir build_path do + Gem::Package.build(@spec) + end if block_given? yield(bundler_path) @@ -659,7 +671,7 @@ def _build(opts) elsif opts[:skip_validation] @context.gem_command "build --force #{@spec.name}", dir: lib_path else - @context.gem_command "build #{@spec.name}", dir: lib_path, allowed_warning: opts[:allowed_warning] + Dir.chdir(lib_path) { Gem::Package.build(@spec) } end gem_path = File.expand_path("#{@spec.full_name}.gem", lib_path) From c477f59e3a56316f1e6112ae9417cb597df51e2c Mon Sep 17 00:00:00 2001 From: Hiroshi SHIBATA Date: Tue, 11 Nov 2025 12:03:36 +0900 Subject: [PATCH 09/10] [ruby/rubygems] Use Spec::Path.relative_gemspec https://github.com/ruby/rubygems/commit/2142e405b0 --- spec/bundler/support/builders.rb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spec/bundler/support/builders.rb b/spec/bundler/support/builders.rb index ef525931938bc7..4eaf40e1bf2888 100644 --- a/spec/bundler/support/builders.rb +++ b/spec/bundler/support/builders.rb @@ -425,8 +425,7 @@ def required_ruby_version=(*reqs) end class BundlerBuilder - SPEC_FILE = File.join File.dirname(__FILE__), "..", "..", "bundler.gemspec" - SPEC = Gem::Specification.load(SPEC_FILE) + SPEC = Gem::Specification.load(Spec::Path.relative_gemspec) def initialize(context, name, version) raise "can only build bundler" unless name == "bundler" From ddaa56d549768777c5ea42b0d6a11a0c4394718d Mon Sep 17 00:00:00 2001 From: Randy Stauner Date: Mon, 10 Nov 2025 22:41:35 -0700 Subject: [PATCH 10/10] Fix bootstraptest runner driver message (#15072) This was a mistake; the code tested for RUBY_PATCHLEVEL but then instead of using it used RUBY_PLATFORM twice. --- bootstraptest/runner.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bootstraptest/runner.rb b/bootstraptest/runner.rb index a8e67f34968b58..8988ac20ce41f6 100755 --- a/bootstraptest/runner.rb +++ b/bootstraptest/runner.rb @@ -298,7 +298,7 @@ def main if defined?(RUBY_DESCRIPTION) puts "Driver is #{RUBY_DESCRIPTION}" elsif defined?(RUBY_PATCHLEVEL) - puts "Driver is ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE}#{RUBY_PLATFORM}) [#{RUBY_PLATFORM}]" + puts "Driver is ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE}#{RUBY_PATCHLEVEL}) [#{RUBY_PLATFORM}]" else puts "Driver is ruby #{RUBY_VERSION} (#{RUBY_RELEASE_DATE}) [#{RUBY_PLATFORM}]" end