diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb index 93c6686f0a8f4f..b1e9e3a79d02cb 100644 --- a/bootstraptest/test_ractor.rb +++ b/bootstraptest/test_ractor.rb @@ -2315,3 +2315,33 @@ def Warning.warn(msg) raise unless $msg.all?{/Ractor#take/ =~ it} $msg.size } + +# Cause lots of inline CC misses. +assert_equal 'ok', <<~'RUBY' + class A; def test; 1 + 1; end; end + class B; def test; 1 + 1; end; end + class C; def test; 1 + 1; end; end + class D; def test; 1 + 1; end; end + class E; def test; 1 + 1; end; end + class F; def test; 1 + 1; end; end + class G; def test; 1 + 1; end; end + + objs = [A.new, B.new, C.new, D.new, E.new, F.new, G.new].freeze + + def call_test(obj) + obj.test + end + + ractors = 7.times.map do + Ractor.new(objs) do |objs| + objs = objs.shuffle + 100_000.times do + objs.each do |o| + call_test(o) + end + end + end + end + ractors.each(&:join) + :ok +RUBY diff --git a/debug_counter.h b/debug_counter.h index 8ffce66f0f7b16..c8d8ed8f110988 100644 --- a/debug_counter.h +++ b/debug_counter.h @@ -49,7 +49,7 @@ RB_DEBUG_COUNTER(cc_temp) // dummy CC (stack-allocated) RB_DEBUG_COUNTER(cc_found_in_ccs) // count for CC lookup success in CCS RB_DEBUG_COUNTER(cc_not_found_in_ccs) // count for CC lookup success in CCS -RB_DEBUG_COUNTER(cc_ent_invalidate) // count for invalidating cc (cc->klass = 0) +RB_DEBUG_COUNTER(cc_ent_invalidate) // count for invalidating cc (cc->klass = Qundef) RB_DEBUG_COUNTER(cc_cme_invalidate) // count for invalidating CME RB_DEBUG_COUNTER(cc_invalidate_leaf) // count for invalidating klass if klass has no-subclasses diff --git a/ext/openssl/ossl_pkcs7.c b/ext/openssl/ossl_pkcs7.c index c53e512e884e1c..910ef9665c7919 100644 --- a/ext/openssl/ossl_pkcs7.c +++ b/ext/openssl/ossl_pkcs7.c @@ -143,11 +143,19 @@ ossl_PKCS7_SIGNER_INFO_dup(PKCS7_SIGNER_INFO *si) } static PKCS7_RECIP_INFO * -ossl_PKCS7_RECIP_INFO_dup(PKCS7_RECIP_INFO *si) +ossl_PKCS7_RECIP_INFO_dup(PKCS7_RECIP_INFO *ri) { - return ASN1_dup((i2d_of_void *)i2d_PKCS7_RECIP_INFO, - (d2i_of_void *)d2i_PKCS7_RECIP_INFO, - si); + PKCS7_RECIP_INFO *ri_new = ASN1_dup((i2d_of_void *)i2d_PKCS7_RECIP_INFO, + (d2i_of_void *)d2i_PKCS7_RECIP_INFO, + ri); + if (ri_new && ri->cert) { + if (!X509_up_ref(ri->cert)) { + PKCS7_RECIP_INFO_free(ri_new); + return NULL; + } + ri_new->cert = ri->cert; + } + return ri_new; } static VALUE @@ -510,6 +518,8 @@ ossl_pkcs7_get_detached(VALUE self) { PKCS7 *p7; GetPKCS7(self, p7); + if (!PKCS7_type_is_signed(p7)) + return Qfalse; return PKCS7_get_detached(p7) ? Qtrue : Qfalse; } @@ -838,30 +848,38 @@ ossl_pkcs7_add_data(VALUE self, VALUE data) PKCS7 *pkcs7; BIO *out, *in; char buf[4096]; - int len; + int len, ret; GetPKCS7(self, pkcs7); - if(PKCS7_type_is_signed(pkcs7)){ - if(!PKCS7_content_new(pkcs7, NID_pkcs7_data)) - ossl_raise(ePKCS7Error, NULL); + if (PKCS7_type_is_signed(pkcs7)) { + if (!PKCS7_content_new(pkcs7, NID_pkcs7_data)) + ossl_raise(ePKCS7Error, "PKCS7_content_new"); } in = ossl_obj2bio(&data); - if(!(out = PKCS7_dataInit(pkcs7, NULL))) goto err; - for(;;){ - if((len = BIO_read(in, buf, sizeof(buf))) <= 0) - break; - if(BIO_write(out, buf, len) != len) - goto err; + if (!(out = PKCS7_dataInit(pkcs7, NULL))) { + BIO_free(in); + ossl_raise(ePKCS7Error, "PKCS7_dataInit"); } - if(!PKCS7_dataFinal(pkcs7, out)) goto err; - ossl_pkcs7_set_data(self, Qnil); - - err: + for (;;) { + if ((len = BIO_read(in, buf, sizeof(buf))) <= 0) + break; + if (BIO_write(out, buf, len) != len) { + BIO_free_all(out); + BIO_free(in); + ossl_raise(ePKCS7Error, "BIO_write"); + } + } + if (BIO_flush(out) <= 0) { + BIO_free_all(out); + BIO_free(in); + ossl_raise(ePKCS7Error, "BIO_flush"); + } + ret = PKCS7_dataFinal(pkcs7, out); BIO_free_all(out); BIO_free(in); - if(ERR_peek_error()){ - ossl_raise(ePKCS7Error, NULL); - } + if (!ret) + ossl_raise(ePKCS7Error, "PKCS7_dataFinal"); + ossl_pkcs7_set_data(self, Qnil); return data; } diff --git a/gc.c b/gc.c index 499d0cda8d7f87..4af43edcc4cb4b 100644 --- a/gc.c +++ b/gc.c @@ -1208,7 +1208,7 @@ classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg) struct classext_foreach_args *args = (struct classext_foreach_args *)arg; rb_id_table_free(RCLASSEXT_M_TBL(ext)); - rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass); + if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) { rb_free_const_table(tbl); } @@ -1238,7 +1238,6 @@ classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *a if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) { rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext)); } - rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass); rb_class_classext_free_subclasses(ext, args->klass); @@ -1743,7 +1742,7 @@ rb_objspace_free_objects(void *objspace) int rb_objspace_garbage_object_p(VALUE obj) { - return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj); + return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj); } bool @@ -1898,8 +1897,6 @@ object_id0(VALUE obj) return object_id_get(obj, shape_id); } - // rb_shape_object_id_shape may lock if the current shape has - // multiple children. shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj); id = generate_next_object_id(); @@ -2262,24 +2259,6 @@ rb_gc_after_updating_jit_code(void) #endif } -static enum rb_id_table_iterator_result -cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr) -{ - size_t *total_size = data_ptr; - struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; - *total_size += sizeof(*ccs); - *total_size += sizeof(ccs->entries[0]) * ccs->capa; - return ID_TABLE_CONTINUE; -} - -static size_t -cc_table_memsize(struct rb_id_table *cc_table) -{ - size_t total = rb_id_table_memsize(cc_table); - rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total); - return total; -} - static void classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg) { @@ -2295,9 +2274,6 @@ classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg) if (RCLASSEXT_CONST_TBL(ext)) { s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext)); } - if (RCLASSEXT_CC_TBL(ext)) { - s += cc_table_memsize(RCLASSEXT_CC_TBL(ext)); - } if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) { s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE); } @@ -2348,9 +2324,6 @@ rb_obj_memsize_of(VALUE obj) size += rb_id_table_memsize(RCLASS_M_TBL(obj)); } } - if (RCLASS_WRITABLE_CC_TBL(obj)) { - size += cc_table_memsize(RCLASS_WRITABLE_CC_TBL(obj)); - } break; case T_STRING: size += rb_str_memsize(obj); @@ -2835,47 +2808,6 @@ mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl) rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace); } -struct mark_cc_entry_args { - rb_objspace_t *objspace; - VALUE klass; -}; - -static enum rb_id_table_iterator_result -mark_cc_entry_i(VALUE ccs_ptr, void *data) -{ - struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; - - VM_ASSERT(vm_ccs_p(ccs)); - - if (METHOD_ENTRY_INVALIDATED(ccs->cme)) { - rb_vm_ccs_free(ccs); - return ID_TABLE_DELETE; - } - else { - gc_mark_internal((VALUE)ccs->cme); - - for (int i=0; ilen; i++) { - VM_ASSERT(((struct mark_cc_entry_args *)data)->klass == ccs->entries[i].cc->klass); - VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme)); - - gc_mark_internal((VALUE)ccs->entries[i].cc); - } - return ID_TABLE_CONTINUE; - } -} - -static void -mark_cc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl, VALUE klass) -{ - struct mark_cc_entry_args args; - - if (!tbl) return; - - args.objspace = objspace; - args.klass = klass; - rb_id_table_foreach_values(tbl, mark_cc_entry_i, (void *)&args); -} - static enum rb_id_table_iterator_result mark_cvc_tbl_i(VALUE cvc_entry, void *objspace) { @@ -3113,7 +3045,6 @@ gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *a { struct gc_mark_classext_foreach_arg *foreach_arg = (struct gc_mark_classext_foreach_arg *)arg; rb_objspace_t *objspace = foreach_arg->objspace; - VALUE obj = foreach_arg->obj; if (RCLASSEXT_SUPER(ext)) { gc_mark_internal(RCLASSEXT_SUPER(ext)); @@ -3124,7 +3055,7 @@ gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *a mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext)); } mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext)); - mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), obj); + gc_mark_internal(RCLASSEXT_CC_TBL(ext)); mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext)); gc_mark_internal(RCLASSEXT_CLASSPATH(ext)); } @@ -3134,7 +3065,6 @@ gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *a { struct gc_mark_classext_foreach_arg *foreach_arg = (struct gc_mark_classext_foreach_arg *)arg; rb_objspace_t *objspace = foreach_arg->objspace; - VALUE iclass = foreach_arg->obj; if (RCLASSEXT_SUPER(ext)) { gc_mark_internal(RCLASSEXT_SUPER(ext)); @@ -3146,7 +3076,7 @@ gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *a gc_mark_internal(RCLASSEXT_INCLUDER(ext)); } mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext)); - mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), iclass); + gc_mark_internal(RCLASSEXT_CC_TBL(ext)); } #define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark @@ -3710,33 +3640,6 @@ update_m_tbl(void *objspace, struct rb_id_table *tbl) } } -static enum rb_id_table_iterator_result -update_cc_tbl_i(VALUE ccs_ptr, void *objspace) -{ - struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; - VM_ASSERT(vm_ccs_p(ccs)); - - if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) { - ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme); - } - - for (int i=0; ilen; i++) { - if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) { - ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc); - } - } - - // do not replace - return ID_TABLE_CONTINUE; -} - -static void -update_cc_tbl(void *objspace, struct rb_id_table *tbl) -{ - if (!tbl) return; - rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace); -} - static enum rb_id_table_iterator_result update_cvc_tbl_i(VALUE cvc_entry, void *objspace) { @@ -3835,7 +3738,7 @@ update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg) if (!RCLASSEXT_SHARED_CONST_TBL(ext)) { update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext)); } - update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext)); + UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext)); update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext)); update_superclasses(objspace, ext); update_subclasses(objspace, ext); @@ -3854,7 +3757,7 @@ update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void } update_m_tbl(objspace, RCLASSEXT_M_TBL(ext)); update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext)); - update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext)); + UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext)); update_subclasses(objspace, ext); update_classext_values(objspace, ext, true); @@ -4924,11 +4827,11 @@ rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALU case imemo_callcache: { const struct rb_callcache *cc = (const struct rb_callcache *)obj; - VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil; + VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil; const rb_callable_method_entry_t *cme = vm_cc_cme(cc); APPEND_F("(klass:%s cme:%s%s (%p) call:%p", - NIL_P(class_path) ? (cc->klass ? "??" : "") : RSTRING_PTR(class_path), + NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "") : RSTRING_PTR(class_path), cme ? rb_id2name(cme->called_id) : "", cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "", (void *)cme, diff --git a/id_table.c b/id_table.c index c831524ff87996..b70587319182ce 100644 --- a/id_table.c +++ b/id_table.c @@ -47,7 +47,7 @@ struct rb_id_table { #if SIZEOF_VALUE == 8 #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key) -#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key) +#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items && (tbl)->items[i].key) #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision) #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1) static inline void @@ -298,6 +298,10 @@ rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_f { int i, capa = tbl->capa; + if (!tbl->items) { + return; + } + for (i=0; iitems[i].val, data); @@ -345,7 +349,7 @@ managed_id_table_memsize(const void *data) return rb_id_table_memsize(tbl) - sizeof(struct rb_id_table); } -static const rb_data_type_t managed_id_table_type = { +const rb_data_type_t rb_managed_id_table_type = { .wrap_struct_name = "VM/managed_id_table", .function = { .dmark = NULL, // Nothing to mark @@ -359,20 +363,26 @@ static inline struct rb_id_table * managed_id_table_ptr(VALUE obj) { RUBY_ASSERT(RB_TYPE_P(obj, T_DATA)); - RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(obj), &managed_id_table_type)); + RUBY_ASSERT(rb_typeddata_inherited_p(RTYPEDDATA_TYPE(obj), &rb_managed_id_table_type)); return RTYPEDDATA_GET_DATA(obj); } VALUE -rb_managed_id_table_new(size_t capa) +rb_managed_id_table_create(const rb_data_type_t *type, size_t capa) { struct rb_id_table *tbl; - VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &managed_id_table_type, tbl); + VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, type, tbl); rb_id_table_init(tbl, capa); return obj; } +VALUE +rb_managed_id_table_new(size_t capa) +{ + return rb_managed_id_table_create(&rb_managed_id_table_type, capa); +} + static enum rb_id_table_iterator_result managed_id_table_dup_i(ID id, VALUE val, void *data) { @@ -385,7 +395,7 @@ VALUE rb_managed_id_table_dup(VALUE old_table) { struct rb_id_table *new_tbl; - VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, &managed_id_table_type, new_tbl); + VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, RTYPEDDATA_TYPE(old_table), new_tbl); struct rb_id_table *old_tbl = managed_id_table_ptr(old_table); rb_id_table_init(new_tbl, old_tbl->num + 1); rb_id_table_foreach(old_tbl, managed_id_table_dup_i, new_tbl); @@ -415,3 +425,15 @@ rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void { rb_id_table_foreach(managed_id_table_ptr(table), func, data); } + +void +rb_managed_id_table_foreach_values(VALUE table, rb_id_table_foreach_values_func_t *func, void *data) +{ + rb_id_table_foreach_values(managed_id_table_ptr(table), func, data); +} + +int +rb_managed_id_table_delete(VALUE table, ID id) +{ + return rb_id_table_delete(managed_id_table_ptr(table), id); +} diff --git a/id_table.h b/id_table.h index 3e8d82e64af98a..0c8cd343ee2587 100644 --- a/id_table.h +++ b/id_table.h @@ -35,12 +35,17 @@ void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *fu void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data); void rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, rb_id_table_update_value_callback_func_t *replace, void *data); +VALUE rb_managed_id_table_create(const rb_data_type_t *type, size_t capa); VALUE rb_managed_id_table_new(size_t capa); VALUE rb_managed_id_table_dup(VALUE table); int rb_managed_id_table_insert(VALUE table, ID id, VALUE val); int rb_managed_id_table_lookup(VALUE table, ID id, VALUE *valp); size_t rb_managed_id_table_size(VALUE table); void rb_managed_id_table_foreach(VALUE table, rb_id_table_foreach_func_t *func, void *data); +void rb_managed_id_table_foreach_values(VALUE table, rb_id_table_foreach_values_func_t *func, void *data); +int rb_managed_id_table_delete(VALUE table, ID id); + +extern const rb_data_type_t rb_managed_id_table_type; RUBY_SYMBOL_EXPORT_BEGIN size_t rb_id_table_size(const struct rb_id_table *tbl); diff --git a/imemo.c b/imemo.c index 7153689030a7b6..7298d78d65cdb7 100644 --- a/imemo.c +++ b/imemo.c @@ -273,7 +273,7 @@ rb_imemo_memsize(VALUE obj) static bool moved_or_living_object_strictly_p(VALUE obj) { - return obj && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED); + return !SPECIAL_CONST_P(obj) && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED); } static void @@ -353,25 +353,19 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating) */ struct rb_callcache *cc = (struct rb_callcache *)obj; if (reference_updating) { - if (!cc->klass) { - // already invalidated + if (moved_or_living_object_strictly_p((VALUE)cc->cme_)) { + *((VALUE *)&cc->klass) = rb_gc_location(cc->klass); + *((struct rb_callable_method_entry_struct **)&cc->cme_) = + (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_); } - else { - if (moved_or_living_object_strictly_p(cc->klass) && - moved_or_living_object_strictly_p((VALUE)cc->cme_)) { - *((VALUE *)&cc->klass) = rb_gc_location(cc->klass); - *((struct rb_callable_method_entry_struct **)&cc->cme_) = - (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_); - } - else { - vm_cc_invalidate(cc); - } + else if (vm_cc_valid(cc)) { + vm_cc_invalidate(cc); } } else { - if (cc->klass && (vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) { + rb_gc_mark_weak((VALUE *)&cc->klass); + if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) { rb_gc_mark_movable((VALUE)cc->cme_); - rb_gc_mark_movable((VALUE)cc->klass); } } @@ -521,61 +515,6 @@ rb_free_const_table(struct rb_id_table *tbl) rb_id_table_free(tbl); } -// alive: if false, target pointers can be freed already. -static void -vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, VALUE klass) -{ - if (ccs->entries) { - for (int i=0; ilen; i++) { - const struct rb_callcache *cc = ccs->entries[i].cc; - if (!alive) { - // ccs can be free'ed. - if (rb_gc_pointer_to_heap_p((VALUE)cc) && - !rb_objspace_garbage_object_p((VALUE)cc) && - IMEMO_TYPE_P(cc, imemo_callcache) && - cc->klass == klass) { - // OK. maybe target cc. - } - else { - continue; - } - } - - VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc)); - vm_cc_invalidate(cc); - } - ruby_xfree(ccs->entries); - } - ruby_xfree(ccs); -} - -void -rb_vm_ccs_free(struct rb_class_cc_entries *ccs) -{ - RB_DEBUG_COUNTER_INC(ccs_free); - vm_ccs_free(ccs, true, Qundef); -} - -static enum rb_id_table_iterator_result -cc_tbl_free_i(VALUE ccs_ptr, void *data) -{ - struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; - VALUE klass = (VALUE)data; - VM_ASSERT(vm_ccs_p(ccs)); - - vm_ccs_free(ccs, false, klass); - - return ID_TABLE_CONTINUE; -} - -void -rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass) -{ - if (!cc_tbl) return; - rb_id_table_foreach_values(cc_tbl, cc_tbl_free_i, (void *)klass); - rb_id_table_free(cc_tbl); -} - static inline void imemo_fields_free(struct rb_fields *fields) { diff --git a/internal/class.h b/internal/class.h index 520994170faa59..328d650e8bb31a 100644 --- a/internal/class.h +++ b/internal/class.h @@ -83,7 +83,7 @@ struct rb_classext_struct { struct rb_id_table *m_tbl; struct rb_id_table *const_tbl; struct rb_id_table *callable_m_tbl; - struct rb_id_table *cc_tbl; /* ID -> [[ci1, cc1], [ci2, cc2] ...] */ + VALUE cc_tbl; /* { ID => { cme, [cc1, cc2, ...] }, ... } */ struct rb_id_table *cvc_tbl; VALUE *superclasses; /** @@ -262,7 +262,7 @@ static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super); static inline void RCLASS_SET_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared); static inline void RCLASS_WRITE_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared); static inline void RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table); -static inline void RCLASS_WRITE_CC_TBL(VALUE klass, struct rb_id_table *table); +static inline void RCLASS_WRITE_CC_TBL(VALUE klass, VALUE table); static inline void RCLASS_SET_CVC_TBL(VALUE klass, struct rb_id_table *table); static inline void RCLASS_WRITE_CVC_TBL(VALUE klass, struct rb_id_table *table); @@ -628,9 +628,9 @@ RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table) } static inline void -RCLASS_WRITE_CC_TBL(VALUE klass, struct rb_id_table *table) +RCLASS_WRITE_CC_TBL(VALUE klass, VALUE table) { - RCLASSEXT_CC_TBL(RCLASS_EXT_WRITABLE(klass)) = table; + RB_OBJ_WRITE(klass, &RCLASSEXT_CC_TBL(RCLASS_EXT_WRITABLE(klass)), table); } static inline void diff --git a/internal/imemo.h b/internal/imemo.h index 0ad00fe6b79d99..5dd2d04fa4d0ab 100644 --- a/internal/imemo.h +++ b/internal/imemo.h @@ -148,7 +148,6 @@ static inline void MEMO_V2_SET(struct MEMO *m, VALUE v); size_t rb_imemo_memsize(VALUE obj); void rb_imemo_mark_and_move(VALUE obj, bool reference_updating); -void rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass); void rb_imemo_free(VALUE obj); RUBY_SYMBOL_EXPORT_BEGIN diff --git a/iseq.c b/iseq.c index c0523f61d70de7..4334bdd7953970 100644 --- a/iseq.c +++ b/iseq.c @@ -325,15 +325,13 @@ cc_is_active(const struct rb_callcache *cc, bool reference_updating) cc = (const struct rb_callcache *)rb_gc_location((VALUE)cc); } - if (vm_cc_markable(cc)) { - if (cc->klass) { // cc is not invalidated - const struct rb_callable_method_entry_struct *cme = vm_cc_cme(cc); - if (reference_updating) { - cme = (const struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cme); - } - if (!METHOD_ENTRY_INVALIDATED(cme)) { - return true; - } + if (vm_cc_markable(cc) && vm_cc_valid(cc)) { + const struct rb_callable_method_entry_struct *cme = vm_cc_cme(cc); + if (reference_updating) { + cme = (const struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cme); + } + if (!METHOD_ENTRY_INVALIDATED(cme)) { + return true; } } } diff --git a/method.h b/method.h index 6abf2495b0df38..8328b86ee96102 100644 --- a/method.h +++ b/method.h @@ -259,6 +259,6 @@ void rb_vm_delete_cc_refinement(const struct rb_callcache *cc); void rb_clear_method_cache(VALUE klass_or_module, ID mid); void rb_clear_all_refinement_method_cache(void); -void rb_invalidate_method_caches(struct rb_id_table *cm_tbl, struct rb_id_table *cc_tbl); +void rb_invalidate_method_caches(struct rb_id_table *cm_tbl, VALUE cc_tbl); #endif /* RUBY_METHOD_H */ diff --git a/shape.c b/shape.c index e296ab2d8fa741..6e1b49352f922b 100644 --- a/shape.c +++ b/shape.c @@ -716,6 +716,7 @@ shape_transition_object_id(shape_id_t original_shape_id) rb_shape_t *shape = get_next_shape_internal(RSHAPE(original_shape_id), id_object_id, SHAPE_OBJ_ID, &dont_care, true); if (!shape) { shape = RSHAPE(ROOT_SHAPE_WITH_OBJ_ID); + return transition_complex(shape_id(shape, original_shape_id) | SHAPE_ID_FL_HAS_OBJECT_ID); } RUBY_ASSERT(shape); diff --git a/test/openssl/test_pkcs7.rb b/test/openssl/test_pkcs7.rb index 5245ec1a6c7657..85ee68c6d18fde 100644 --- a/test/openssl/test_pkcs7.rb +++ b/test/openssl/test_pkcs7.rb @@ -250,6 +250,28 @@ def test_enveloped } end + def test_enveloped_add_recipient + omit_on_fips # PKCS #1 v1.5 padding + + data = "aaaaa\nbbbbb\nccccc\n" + ktri_ee1 = OpenSSL::PKCS7::RecipientInfo.new(@ee1_cert) + ktri_ee2 = OpenSSL::PKCS7::RecipientInfo.new(@ee2_cert) + + tmp = OpenSSL::PKCS7.new + tmp.type = :enveloped + tmp.cipher = "AES-128-CBC" + tmp.add_recipient(ktri_ee1) + tmp.add_recipient(ktri_ee2) + tmp.add_data(data) + + p7 = OpenSSL::PKCS7.new(tmp.to_der) + assert_equal(:enveloped, p7.type) + assert_equal(data, p7.decrypt(@ee1_key, @ee1_cert)) + assert_equal(data, p7.decrypt(@ee2_key, @ee2_cert)) + assert_equal([@ee1_cert.serial, @ee2_cert.serial].sort, + p7.recipients.map(&:serial).sort) + end + def test_data asn1 = OpenSSL::ASN1::Sequence([ OpenSSL::ASN1::ObjectId("pkcs7-data"), @@ -265,6 +287,7 @@ def test_data p7 = OpenSSL::PKCS7.new(asn1) assert_equal(:data, p7.type) + assert_equal(false, p7.detached) assert_equal(false, p7.detached?) # Not applicable assert_nil(p7.certificates) @@ -317,12 +340,6 @@ def test_set_type_signed_and_enveloped assert_equal(:signedAndEnveloped, p7.type) end - def test_set_type_enveloped - p7 = OpenSSL::PKCS7.new - p7.type = "enveloped" - assert_equal(:enveloped, p7.type) - end - def test_set_type_encrypted p7 = OpenSSL::PKCS7.new p7.type = "encrypted" diff --git a/test/ruby/test_shapes.rb b/test/ruby/test_shapes.rb index a4cf23c6d5ae89..ed55b95c3edfde 100644 --- a/test/ruby/test_shapes.rb +++ b/test/ruby/test_shapes.rb @@ -655,6 +655,14 @@ class TooComplex end def test_object_id_transition_too_complex + assert_separately([], "#{<<~"begin;"}\n#{<<~'end;'}") + begin; + obj = Object.new + obj.instance_variable_set(:@a, 1) + RubyVM::Shape.exhaust_shapes + assert_equal obj.object_id, obj.object_id + end; + assert_separately([], "#{<<~"begin;"}\n#{<<~'end;'}") begin; class Hi; end diff --git a/vm.c b/vm.c index c46efafb0db0c1..9284a2ce69ffe3 100644 --- a/vm.c +++ b/vm.c @@ -607,7 +607,7 @@ rb_serial_t ruby_vm_global_cvar_state = 1; static const struct rb_callcache vm_empty_cc = { .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE, - .klass = Qfalse, + .klass = Qundef, .cme_ = NULL, .call_ = vm_call_general, .aux_ = { @@ -617,7 +617,7 @@ static const struct rb_callcache vm_empty_cc = { static const struct rb_callcache vm_empty_cc_for_super = { .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE, - .klass = Qfalse, + .klass = Qundef, .cme_ = NULL, .call_ = vm_call_super_method, .aux_ = { diff --git a/vm_callinfo.h b/vm_callinfo.h index 0ce25c2c0f89dc..b95d9a99db7772 100644 --- a/vm_callinfo.h +++ b/vm_callinfo.h @@ -279,9 +279,7 @@ struct rb_callcache { const VALUE flags; /* inline cache: key */ - const VALUE klass; // should not mark it because klass can not be free'd - // because of this marking. When klass is collected, - // cc will be cleared (cc->klass = 0) at vm_ccs_free(). + const VALUE klass; // Weak reference. When klass is collected, `cc->klass = Qundef`. /* inline cache: values */ const struct rb_callable_method_entry_struct * const cme_; @@ -324,12 +322,24 @@ vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id) vm_cc_attr_index_set(cc, (attr_index_t)-1, shape_id); } +static inline VALUE +cc_check_class(VALUE klass) +{ + VM_ASSERT(klass == Qundef || RB_TYPE_P(klass, T_CLASS) || RB_TYPE_P(klass, T_ICLASS)); + return klass; +} + +VALUE rb_vm_cc_table_create(size_t capa); +VALUE rb_vm_cc_table_dup(VALUE old_table); +void rb_vm_cc_table_delete(VALUE table, ID mid); + static inline const struct rb_callcache * vm_cc_new(VALUE klass, const struct rb_callable_method_entry_struct *cme, vm_call_handler call, enum vm_cc_type type) { + cc_check_class(klass); struct rb_callcache *cc = IMEMO_NEW(struct rb_callcache, imemo_callcache, klass); *((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme; *((vm_call_handler *)&cc->call_) = call; @@ -374,7 +384,7 @@ vm_cc_refinement_p(const struct rb_callcache *cc) (imemo_callcache << FL_USHIFT) | \ VM_CALLCACHE_UNMARKABLE | \ VM_CALLCACHE_ON_STACK, \ - .klass = clazz, \ + .klass = cc_check_class(clazz), \ .cme_ = cme, \ .call_ = call, \ .aux_ = aux, \ @@ -384,8 +394,7 @@ static inline bool vm_cc_class_check(const struct rb_callcache *cc, VALUE klass) { VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); - VM_ASSERT(cc->klass == 0 || - RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS)); + VM_ASSERT(cc_check_class(cc->klass)); return cc->klass == klass; } @@ -396,6 +405,15 @@ vm_cc_markable(const struct rb_callcache *cc) return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0; } +static inline bool +vm_cc_valid(const struct rb_callcache *cc) +{ + VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); + VM_ASSERT(cc_check_class(cc->klass)); + + return !UNDEF_P(cc->klass); +} + static inline const struct rb_callable_method_entry_struct * vm_cc_cme(const struct rb_callcache *cc) { @@ -447,7 +465,7 @@ vm_cc_cmethod_missing_reason(const struct rb_callcache *cc) static inline bool vm_cc_invalidated_p(const struct rb_callcache *cc) { - if (cc->klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) { + if (vm_cc_valid(cc) && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) { return false; } else { @@ -543,9 +561,9 @@ vm_cc_invalidate(const struct rb_callcache *cc) { VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); VM_ASSERT(cc != vm_cc_empty()); - VM_ASSERT(cc->klass != 0); // should be enable + VM_ASSERT(cc->klass != Qundef); // should be enable - *(VALUE *)&cc->klass = 0; + *(VALUE *)&cc->klass = Qundef; RB_DEBUG_COUNTER_INC(cc_ent_invalidate); } @@ -584,11 +602,14 @@ vm_ccs_p(const struct rb_class_cc_entries *ccs) static inline bool vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme) { - if (vm_cc_cme(cc) == cme || - (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) { + bool valid; + RB_VM_LOCKING() { + valid = vm_cc_cme(cc) == cme || + (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme)); + } + if (valid) { return true; } - else { #if 1 // debug print @@ -600,13 +621,9 @@ vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t rp(vm_cc_cme(cc)); rp(rb_vm_lookup_overloaded_cme(cme)); #endif - return false; - } + return false; } #endif -// gc.c -void rb_vm_ccs_free(struct rb_class_cc_entries *ccs); - #endif /* RUBY_VM_CALLINFO_H */ diff --git a/vm_eval.c b/vm_eval.c index dff0e47c7e1290..68b692ac9c4112 100644 --- a/vm_eval.c +++ b/vm_eval.c @@ -57,7 +57,7 @@ static inline VALUE vm_call0_cc(rb_execution_context_t *ec, VALUE recv, ID id, i VALUE rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *cme, int kw_splat) { - const struct rb_callcache cc = VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme); + const struct rb_callcache cc = VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme); return vm_call0_cc(ec, recv, id, argc, argv, &cc, kw_splat); } @@ -104,7 +104,7 @@ vm_call0_cc(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE static VALUE vm_call0_cme(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv, const rb_callable_method_entry_t *cme) { - calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme); + calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme); return vm_call0_body(ec, calling, argv); } diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 0fef0e0976981e..13d4cd5a3fc2e9 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1979,7 +1979,7 @@ static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg static VALUE vm_mtbl_dump(VALUE klass, ID target_mid); static struct rb_class_cc_entries * -vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable_method_entry_t *cme) +vm_ccs_create(VALUE klass, VALUE cc_tbl, ID mid, const rb_callable_method_entry_t *cme) { struct rb_class_cc_entries *ccs = ALLOC(struct rb_class_cc_entries); #if VM_CHECK_MODE > 0 @@ -1991,13 +1991,13 @@ vm_ccs_create(VALUE klass, struct rb_id_table *cc_tbl, ID mid, const rb_callable METHOD_ENTRY_CACHED_SET((rb_callable_method_entry_t *)cme); ccs->entries = NULL; - rb_id_table_insert(cc_tbl, mid, (VALUE)ccs); - RB_OBJ_WRITTEN(klass, Qundef, cme); + rb_managed_id_table_insert(cc_tbl, mid, (VALUE)ccs); + RB_OBJ_WRITTEN(cc_tbl, Qundef, cme); return ccs; } static void -vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc) +vm_ccs_push(VALUE cc_tbl, struct rb_class_cc_entries *ccs, const struct rb_callinfo *ci, const struct rb_callcache *cc) { if (! vm_cc_markable(cc)) { return; @@ -2018,7 +2018,7 @@ vm_ccs_push(VALUE klass, struct rb_class_cc_entries *ccs, const struct rb_callin const int pos = ccs->len++; ccs->entries[pos].argc = vm_ci_argc(ci); ccs->entries[pos].flag = vm_ci_flag(ci); - RB_OBJ_WRITE(klass, &ccs->entries[pos].cc, cc); + RB_OBJ_WRITE(cc_tbl, &ccs->entries[pos].cc, cc); if (RB_DEBUG_COUNTER_SETMAX(ccs_maxlen, ccs->len)) { // for tuning @@ -2060,58 +2060,49 @@ vm_ccs_verify(struct rb_class_cc_entries *ccs, ID mid, VALUE klass) const rb_callable_method_entry_t *rb_check_overloaded_cme(const rb_callable_method_entry_t *cme, const struct rb_callinfo * const ci); -static const struct rb_callcache * -vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci) +static void +vm_evict_cc(VALUE klass, VALUE cc_tbl, ID mid) { - const ID mid = vm_ci_mid(ci); - struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); - struct rb_class_cc_entries *ccs = NULL; - VALUE ccs_data; + ASSERT_vm_locking(); - if (cc_tbl) { - // CCS data is keyed on method id, so we don't need the method id - // for doing comparisons in the `for` loop below. - if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) { - ccs = (struct rb_class_cc_entries *)ccs_data; - const int ccs_len = ccs->len; - - if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) { - rb_vm_ccs_free(ccs); - rb_id_table_delete(cc_tbl, mid); - ccs = NULL; - } - else { - VM_ASSERT(vm_ccs_verify(ccs, mid, klass)); + if (rb_multi_ractor_p()) { + if (RCLASS_WRITABLE_CC_TBL(klass) != cc_tbl) { + // Another ractor updated the CC table while we were waiting on the VM lock. + // We have to retry. + return; + } - // We already know the method id is correct because we had - // to look up the ccs_data by method id. All we need to - // compare is argc and flag - unsigned int argc = vm_ci_argc(ci); - unsigned int flag = vm_ci_flag(ci); + struct rb_class_cc_entries *ccs = NULL; + rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs); - for (int i=0; ientries[i].argc; - unsigned int ccs_ci_flag = ccs->entries[i].flag; - const struct rb_callcache *ccs_cc = ccs->entries[i].cc; + if (!ccs || !METHOD_ENTRY_INVALIDATED(ccs->cme)) { + // Another ractor replaced that entry while we were waiting on the VM lock. + return; + } - VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache)); + VALUE new_table = rb_vm_cc_table_dup(cc_tbl); + rb_vm_cc_table_delete(new_table, mid); + RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), new_table); + } + else { + rb_vm_cc_table_delete(cc_tbl, mid); + } +} - if (ccs_ci_argc == argc && ccs_ci_flag == flag) { - RB_DEBUG_COUNTER_INC(cc_found_in_ccs); +static const struct rb_callcache * +vm_populate_cc(VALUE klass, const struct rb_callinfo * const ci, ID mid) +{ + ASSERT_vm_locking(); - VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid); - VM_ASSERT(ccs_cc->klass == klass); - VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc))); + VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); + const VALUE original_cc_table = cc_tbl; + struct rb_class_cc_entries *ccs = NULL; - return ccs_cc; - } - } - } - } + if (!cc_tbl) { + cc_tbl = rb_vm_cc_table_create(1); } - else { - cc_tbl = rb_id_table_create(2); - RCLASS_WRITE_CC_TBL(klass, cc_tbl); + else if (rb_multi_ractor_p()) { + cc_tbl = rb_vm_cc_table_dup(cc_tbl); } RB_DEBUG_COUNTER_INC(cc_not_found_in_ccs); @@ -2141,13 +2132,9 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci) METHOD_ENTRY_CACHED_SET((struct rb_callable_method_entry_struct *)cme); if (ccs == NULL) { - VM_ASSERT(cc_tbl != NULL); + VM_ASSERT(cc_tbl); - if (LIKELY(rb_id_table_lookup(cc_tbl, mid, &ccs_data))) { - // rb_callable_method_entry() prepares ccs. - ccs = (struct rb_class_cc_entries *)ccs_data; - } - else { + if (!LIKELY(rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs))) { // TODO: required? ccs = vm_ccs_create(klass, cc_tbl, mid, cme); } @@ -2156,12 +2143,97 @@ vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci) cme = rb_check_overloaded_cme(cme, ci); const struct rb_callcache *cc = vm_cc_new(klass, cme, vm_call_general, cc_type_normal); - vm_ccs_push(klass, ccs, ci, cc); + vm_ccs_push(cc_tbl, ccs, ci, cc); VM_ASSERT(vm_cc_cme(cc) != NULL); VM_ASSERT(cme->called_id == mid); VM_ASSERT(vm_cc_cme(cc)->called_id == mid); + if (original_cc_table != cc_tbl) { + RB_OBJ_ATOMIC_WRITE(klass, &RCLASS_WRITABLE_CC_TBL(klass), cc_tbl); + } + + return cc; +} + +static const struct rb_callcache * +vm_lookup_cc(const VALUE klass, const struct rb_callinfo * const ci, ID mid) +{ + VALUE cc_tbl; + struct rb_class_cc_entries *ccs; +retry: + cc_tbl = RUBY_ATOMIC_VALUE_LOAD(RCLASS_WRITABLE_CC_TBL(klass)); + ccs = NULL; + + if (cc_tbl) { + // CCS data is keyed on method id, so we don't need the method id + // for doing comparisons in the `for` loop below. + + if (rb_managed_id_table_lookup(cc_tbl, mid, (VALUE *)&ccs)) { + const int ccs_len = ccs->len; + + if (UNLIKELY(METHOD_ENTRY_INVALIDATED(ccs->cme))) { + RB_VM_LOCKING() { + vm_evict_cc(klass, cc_tbl, mid); + } + goto retry; + } + else { + VM_ASSERT(vm_ccs_verify(ccs, mid, klass)); + + // We already know the method id is correct because we had + // to look up the ccs_data by method id. All we need to + // compare is argc and flag + unsigned int argc = vm_ci_argc(ci); + unsigned int flag = vm_ci_flag(ci); + + for (int i=0; ientries[i].argc; + unsigned int ccs_ci_flag = ccs->entries[i].flag; + const struct rb_callcache *ccs_cc = ccs->entries[i].cc; + + VM_ASSERT(IMEMO_TYPE_P(ccs_cc, imemo_callcache)); + + if (ccs_ci_argc == argc && ccs_ci_flag == flag) { + RB_DEBUG_COUNTER_INC(cc_found_in_ccs); + + VM_ASSERT(vm_cc_cme(ccs_cc)->called_id == mid); + VM_ASSERT(ccs_cc->klass == klass); + VM_ASSERT(!METHOD_ENTRY_INVALIDATED(vm_cc_cme(ccs_cc))); + + return ccs_cc; + } + } + } + } + } + + RB_GC_GUARD(cc_tbl); + return NULL; +} + +static const struct rb_callcache * +vm_search_cc(const VALUE klass, const struct rb_callinfo * const ci) +{ + const ID mid = vm_ci_mid(ci); + + const struct rb_callcache *cc = vm_lookup_cc(klass, ci, mid); + if (cc) { + return cc; + } + + RB_VM_LOCKING() { + if (rb_multi_ractor_p()) { + // The CC may have been populated by another ractor while we were waiting on the lock, + // so we must lookup a second time. + cc = vm_lookup_cc(klass, ci, mid); + } + + if (!cc) { + cc = vm_populate_cc(klass, ci, mid); + } + } + return cc; } @@ -2172,16 +2244,14 @@ rb_vm_search_method_slowpath(const struct rb_callinfo *ci, VALUE klass) VM_ASSERT_TYPE2(klass, T_CLASS, T_ICLASS); - RB_VM_LOCKING() { - cc = vm_search_cc(klass, ci); + cc = vm_search_cc(klass, ci); - VM_ASSERT(cc); - VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); - VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass); - VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc))); - VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))); - VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci)); - } + VM_ASSERT(cc); + VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); + VM_ASSERT(cc == vm_cc_empty() || cc->klass == klass); + VM_ASSERT(cc == vm_cc_empty() || callable_method_entry_p(vm_cc_cme(cc))); + VM_ASSERT(cc == vm_cc_empty() || !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))); + VM_ASSERT(cc == vm_cc_empty() || vm_cc_cme(cc)->called_id == vm_ci_mid(ci)); return cc; } diff --git a/vm_method.c b/vm_method.c index faf327b36c7283..4788d54d2db343 100644 --- a/vm_method.c +++ b/vm_method.c @@ -22,6 +22,184 @@ static inline rb_method_entry_t *lookup_method_table(VALUE klass, ID id); #define ruby_running (GET_VM()->running) /* int ruby_running = 0; */ +static void +vm_ccs_free(struct rb_class_cc_entries *ccs) +{ + if (ccs->entries) { + ruby_xfree(ccs->entries); + } + ruby_xfree(ccs); +} + +static enum rb_id_table_iterator_result +mark_cc_entry_i(VALUE ccs_ptr, void *data) +{ + struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; + + VM_ASSERT(vm_ccs_p(ccs)); + + if (METHOD_ENTRY_INVALIDATED(ccs->cme)) { + vm_ccs_free(ccs); + return ID_TABLE_DELETE; + } + else { + rb_gc_mark_movable((VALUE)ccs->cme); + + for (int i=0; ilen; i++) { + VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme)); + + rb_gc_mark_movable((VALUE)ccs->entries[i].cc); + } + return ID_TABLE_CONTINUE; + } +} + +static void +vm_cc_table_mark(void *data) +{ + struct rb_id_table *tbl = (struct rb_id_table *)data; + if (tbl) { + rb_id_table_foreach_values(tbl, mark_cc_entry_i, NULL); + } +} + +static enum rb_id_table_iterator_result +cc_table_free_i(VALUE ccs_ptr, void *data) +{ + struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; + VM_ASSERT(vm_ccs_p(ccs)); + + vm_ccs_free(ccs); + + return ID_TABLE_CONTINUE; +} + +static void +vm_cc_table_free(void *data) +{ + struct rb_id_table *tbl = (struct rb_id_table *)data; + + rb_id_table_foreach_values(tbl, cc_table_free_i, NULL); + rb_managed_id_table_type.function.dfree(data); +} + +static enum rb_id_table_iterator_result +cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr) +{ + size_t *total_size = data_ptr; + struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; + *total_size += sizeof(*ccs); + *total_size += sizeof(ccs->entries[0]) * ccs->capa; + return ID_TABLE_CONTINUE; +} + +static size_t +vm_cc_table_memsize(const void *data) +{ + size_t memsize = rb_managed_id_table_type.function.dsize(data); + struct rb_id_table *tbl = (struct rb_id_table *)data; + rb_id_table_foreach_values(tbl, cc_table_memsize_i, &memsize); + return memsize; +} + +static enum rb_id_table_iterator_result +compact_cc_entry_i(VALUE ccs_ptr, void *data) +{ + struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr; + + ccs->cme = (const struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)ccs->cme); + VM_ASSERT(vm_ccs_p(ccs)); + + for (int i=0; ilen; i++) { + ccs->entries[i].cc = (const struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc); + } + + return ID_TABLE_CONTINUE; +} + +static void +vm_cc_table_compact(void *data) +{ + struct rb_id_table *tbl = (struct rb_id_table *)data; + rb_id_table_foreach_values(tbl, compact_cc_entry_i, NULL); +} + +static const rb_data_type_t cc_table_type = { + .wrap_struct_name = "VM/cc_table", + .function = { + .dmark = vm_cc_table_mark, + .dfree = vm_cc_table_free, + .dsize = vm_cc_table_memsize, + .dcompact = vm_cc_table_compact, + }, + .parent = &rb_managed_id_table_type, + .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE, +}; + +VALUE +rb_vm_cc_table_create(size_t capa) +{ + return rb_managed_id_table_create(&cc_table_type, capa); +} + +static enum rb_id_table_iterator_result +vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data) +{ + struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr; + struct rb_class_cc_entries *new_ccs = ALLOC(struct rb_class_cc_entries); + MEMCPY(new_ccs, old_ccs, struct rb_class_cc_entries, 1); +#if VM_CHECK_MODE > 0 + new_ccs->debug_sig = ~(VALUE)new_ccs; +#endif + new_ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, new_ccs->capa); + MEMCPY(new_ccs->entries, old_ccs->entries, struct rb_class_cc_entries_entry, new_ccs->capa); + + VALUE new_table = (VALUE)data; + rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs); + for (int index = 0; index < new_ccs->len; index++) { + RB_OBJ_WRITTEN(new_table, Qundef, new_ccs->entries[index].cc); + } + return ID_TABLE_CONTINUE; +} + +VALUE +rb_vm_cc_table_dup(VALUE old_table) +{ + VALUE new_table = rb_vm_cc_table_create(rb_managed_id_table_size(old_table)); + rb_managed_id_table_foreach(old_table, vm_cc_table_dup_i, (void *)new_table); + return new_table; +} + +static void +vm_ccs_invalidate(struct rb_class_cc_entries *ccs) +{ + if (ccs->entries) { + for (int i=0; ilen; i++) { + const struct rb_callcache *cc = ccs->entries[i].cc; + VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc)); + vm_cc_invalidate(cc); + } + } +} + +void +rb_vm_ccs_invalidate_and_free(struct rb_class_cc_entries *ccs) +{ + RB_DEBUG_COUNTER_INC(ccs_free); + vm_ccs_invalidate(ccs); + vm_ccs_free(ccs); +} + +void +rb_vm_cc_table_delete(VALUE table, ID mid) +{ + struct rb_class_cc_entries *ccs; + if (rb_managed_id_table_lookup(table, mid, (VALUE *)&ccs)) { + rb_managed_id_table_delete(table, mid); + rb_vm_ccs_invalidate_and_free(ccs); + } +} + static enum rb_id_table_iterator_result vm_ccs_dump_i(ID mid, VALUE val, void *data) { @@ -39,18 +217,18 @@ vm_ccs_dump_i(ID mid, VALUE val, void *data) static void vm_ccs_dump(VALUE klass, ID target_mid) { - struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); + VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); if (cc_tbl) { VALUE ccs; if (target_mid) { - if (rb_id_table_lookup(cc_tbl, target_mid, &ccs)) { + if (rb_managed_id_table_lookup(cc_tbl, target_mid, &ccs)) { fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl); vm_ccs_dump_i(target_mid, ccs, NULL); } } else { fprintf(stderr, " [CCTB] %p\n", (void *)cc_tbl); - rb_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid); + rb_managed_id_table_foreach(cc_tbl, vm_ccs_dump_i, (void *)target_mid); } } } @@ -169,15 +347,15 @@ static const rb_callable_method_entry_t *complemented_callable_method_entry(VALU static const rb_callable_method_entry_t *lookup_overloaded_cme(const rb_callable_method_entry_t *cme); static void -invalidate_method_cache_in_cc_table(struct rb_id_table *tbl, ID mid) +invalidate_method_cache_in_cc_table(VALUE tbl, ID mid) { VALUE ccs_data; - if (tbl && rb_id_table_lookup(tbl, mid, &ccs_data)) { + if (tbl && rb_managed_id_table_lookup(tbl, mid, &ccs_data)) { struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data; rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme); if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid); - rb_vm_ccs_free(ccs); - rb_id_table_delete(tbl, mid); + rb_vm_ccs_invalidate_and_free(ccs); + rb_managed_id_table_delete(tbl, mid); RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs); } } @@ -253,7 +431,7 @@ clear_method_cache_by_id_in_class(VALUE klass, ID mid) // check only current class // invalidate CCs - struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); + VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); invalidate_method_cache_in_cc_table(cc_tbl, mid); if (RCLASS_CC_TBL_NOT_PRIME_P(klass, cc_tbl)) { invalidate_method_cache_in_cc_table(RCLASS_PRIME_CC_TBL(klass), mid); @@ -385,13 +563,13 @@ invalidate_ccs_in_iclass_cc_tbl(VALUE value, void *data) } void -rb_invalidate_method_caches(struct rb_id_table *cm_tbl, struct rb_id_table *cc_tbl) +rb_invalidate_method_caches(struct rb_id_table *cm_tbl, VALUE cc_tbl) { if (cm_tbl) { rb_id_table_foreach_values(cm_tbl, invalidate_method_entry_in_iclass_callable_m_tbl, NULL); } if (cc_tbl) { - rb_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL); + rb_managed_id_table_foreach_values(cc_tbl, invalidate_ccs_in_iclass_cc_tbl, NULL); } } @@ -409,7 +587,7 @@ invalidate_cc_refinement(st_data_t key, st_data_t data) VM_ASSERT(vm_cc_refinement_p(cc)); - if (cc->klass) { + if (vm_cc_valid(cc)) { vm_cc_invalidate(cc); } } @@ -1559,10 +1737,10 @@ cached_callable_method_entry(VALUE klass, ID mid) { ASSERT_vm_locking(); - struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); + VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); VALUE ccs_data; - if (cc_tbl && rb_id_table_lookup(cc_tbl, mid, &ccs_data)) { + if (cc_tbl && rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) { struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data; VM_ASSERT(vm_ccs_p(ccs)); @@ -1572,8 +1750,8 @@ cached_callable_method_entry(VALUE klass, ID mid) return ccs->cme; } else { - rb_vm_ccs_free(ccs); - rb_id_table_delete(cc_tbl, mid); + rb_managed_id_table_delete(cc_tbl, mid); + rb_vm_ccs_invalidate_and_free(ccs); } } @@ -1587,15 +1765,15 @@ cache_callable_method_entry(VALUE klass, ID mid, const rb_callable_method_entry_ ASSERT_vm_locking(); VM_ASSERT(cme != NULL); - struct rb_id_table *cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); + VALUE cc_tbl = RCLASS_WRITABLE_CC_TBL(klass); VALUE ccs_data; if (!cc_tbl) { - cc_tbl = rb_id_table_create(2); + cc_tbl = rb_vm_cc_table_create(2); RCLASS_WRITE_CC_TBL(klass, cc_tbl); } - if (rb_id_table_lookup(cc_tbl, mid, &ccs_data)) { + if (rb_managed_id_table_lookup(cc_tbl, mid, &ccs_data)) { #if VM_CHECK_MODE > 0 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data; VM_ASSERT(ccs->cme == cme);