diff --git a/array.c b/array.c
index 378da150ee3e3e..12f45e2cbb421a 100644
--- a/array.c
+++ b/array.c
@@ -29,6 +29,7 @@
#include "ruby/st.h"
#include "ruby/thread.h"
#include "ruby/util.h"
+#include "ruby/ractor.h"
#include "vm_core.h"
#include "builtin.h"
@@ -107,10 +108,12 @@ should_be_T_ARRAY(VALUE ary)
} while (0)
#define FL_UNSET_SHARED(ary) FL_UNSET((ary), RARRAY_SHARED_FLAG)
+#define ARY_SET_PTR_FORCE(ary, p) \
+ RARRAY(ary)->as.heap.ptr = (p);
#define ARY_SET_PTR(ary, p) do { \
RUBY_ASSERT(!ARY_EMBED_P(ary)); \
RUBY_ASSERT(!OBJ_FROZEN(ary)); \
- RARRAY(ary)->as.heap.ptr = (p); \
+ ARY_SET_PTR_FORCE(ary, p); \
} while (0)
#define ARY_SET_EMBED_LEN(ary, n) do { \
long tmp_n = (n); \
@@ -148,11 +151,13 @@ should_be_T_ARRAY(VALUE ary)
#define ARY_CAPA(ary) (ARY_EMBED_P(ary) ? ary_embed_capa(ary) : \
ARY_SHARED_ROOT_P(ary) ? RARRAY_LEN(ary) : ARY_HEAP_CAPA(ary))
+#define ARY_SET_CAPA_FORCE(ary, n) \
+ RARRAY(ary)->as.heap.aux.capa = (n);
#define ARY_SET_CAPA(ary, n) do { \
RUBY_ASSERT(!ARY_EMBED_P(ary)); \
RUBY_ASSERT(!ARY_SHARED_P(ary)); \
RUBY_ASSERT(!OBJ_FROZEN(ary)); \
- RARRAY(ary)->as.heap.aux.capa = (n); \
+ ARY_SET_CAPA_FORCE(ary, n); \
} while (0)
#define ARY_SHARED_ROOT_OCCUPIED(ary) (!OBJ_FROZEN(ary) && ARY_SHARED_ROOT_REFCNT(ary) == 1)
@@ -560,8 +565,8 @@ rb_ary_cancel_sharing(VALUE ary)
VALUE *ptr = ary_heap_alloc_buffer(len);
MEMCPY(ptr, ARY_HEAP_PTR(ary), VALUE, len);
rb_ary_unshare(ary);
- ARY_SET_CAPA(ary, len);
- ARY_SET_PTR(ary, ptr);
+ ARY_SET_CAPA_FORCE(ary, len);
+ ARY_SET_PTR_FORCE(ary, ptr);
}
rb_gc_writebarrier_remember(ary);
@@ -4729,6 +4734,8 @@ rb_ary_replace(VALUE copy, VALUE orig)
ARY_SET_PTR(copy, ARY_HEAP_PTR(orig));
ARY_SET_LEN(copy, ARY_HEAP_LEN(orig));
rb_ary_set_shared(copy, shared_root);
+
+ RUBY_ASSERT(RB_OBJ_SHAREABLE_P(copy) ? RB_OBJ_SHAREABLE_P(shared_root) : 1);
}
ary_verify(copy);
return copy;
@@ -8883,7 +8890,7 @@ Init_Array(void)
rb_define_method(rb_cArray, "deconstruct", rb_ary_deconstruct, 0);
- rb_cArray_empty_frozen = rb_ary_freeze(rb_ary_new());
+ rb_cArray_empty_frozen = RB_OBJ_SET_SHAREABLE(rb_ary_freeze(rb_ary_new()));
rb_vm_register_global_object(rb_cArray_empty_frozen);
}
diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb
index 634a3e3e6102b9..65ef07fb73e291 100644
--- a/bootstraptest/test_ractor.rb
+++ b/bootstraptest/test_ractor.rb
@@ -1381,18 +1381,17 @@ class C
}.map{|r| r.value}.join
}
-# NameError
-assert_equal "ok", %q{
+# Now NoMethodError is copyable
+assert_equal "NoMethodError", %q{
obj = "".freeze # NameError refers the receiver indirectly
begin
obj.bar
rescue => err
end
- begin
- Ractor.new{} << err
- rescue TypeError
- 'ok'
- end
+
+ r = Ractor.new{ Ractor.receive }
+ r << err
+ r.value.class
}
assert_equal "ok", %q{
diff --git a/class.c b/class.c
index 74dcbe5fa7b99a..4dddf08c67af38 100644
--- a/class.c
+++ b/class.c
@@ -775,7 +775,7 @@ class_alloc0(enum ruby_value_type type, VALUE klass, bool namespaceable)
RUBY_ASSERT(type == T_CLASS || type == T_ICLASS || type == T_MODULE);
- VALUE flags = type;
+ VALUE flags = type | FL_SHAREABLE;
if (RGENGC_WB_PROTECTED_CLASS) flags |= FL_WB_PROTECTED;
if (namespaceable) flags |= RCLASS_NAMESPACEABLE;
diff --git a/compile.c b/compile.c
index 27ed42f1f574d4..87c4d15ccfb925 100644
--- a/compile.c
+++ b/compile.c
@@ -838,9 +838,9 @@ get_string_value(const NODE *node)
{
switch (nd_type(node)) {
case NODE_STR:
- return rb_node_str_string_val(node);
+ return RB_OBJ_SET_SHAREABLE(rb_node_str_string_val(node));
case NODE_FILE:
- return rb_node_file_path_val(node);
+ return RB_OBJ_SET_SHAREABLE(rb_node_file_path_val(node));
default:
rb_bug("unexpected node: %s", ruby_node_name(nd_type(node)));
}
@@ -1400,6 +1400,9 @@ static void
iseq_insn_each_object_write_barrier(VALUE * obj, VALUE iseq)
{
RB_OBJ_WRITTEN(iseq, Qundef, *obj);
+ RUBY_ASSERT(SPECIAL_CONST_P(*obj) ||
+ RBASIC_CLASS(*obj) == 0 || // hidden
+ RB_OBJ_SHAREABLE_P(*obj));
}
static INSN *
@@ -2063,6 +2066,7 @@ iseq_set_arguments_keywords(rb_iseq_t *iseq, LINK_ANCHOR *const optargs,
for (i = 0; i < RARRAY_LEN(default_values); i++) {
VALUE dv = RARRAY_AREF(default_values, i);
if (dv == complex_mark) dv = Qundef;
+ if (!SPECIAL_CONST_P(dv)) rb_ractor_make_shareable(dv);
RB_OBJ_WRITE(iseq, &dvs[i], dv);
}
@@ -2749,6 +2753,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
rb_hash_rehash(map);
freeze_hide_obj(map);
+ rb_ractor_make_shareable(map);
generated_iseq[code_index + 1 + j] = map;
ISEQ_MBITS_SET(mark_offset_bits, code_index + 1 + j);
RB_OBJ_WRITTEN(iseq, Qundef, map);
@@ -3489,7 +3494,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
is_frozen_putstring(beg, &str_beg) &&
!(insn_has_label_before(&beg->link) || insn_has_label_before(&end->link))) {
int excl = FIX2INT(OPERAND_AT(range, 0));
- VALUE lit_range = rb_range_new(str_beg, str_end, excl);
+ VALUE lit_range = RB_OBJ_SET_SHAREABLE(rb_range_new(str_beg, str_end, excl));
ELEM_REMOVE(&beg->link);
ELEM_REMOVE(&end->link);
@@ -3556,6 +3561,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
if (vm_ci_simple(ci) && vm_ci_argc(ci) == 0 && blockiseq == NULL && vm_ci_mid(ci) == idFreeze) {
VALUE hash = iobj->operands[0];
rb_obj_reveal(hash, rb_cHash);
+ RB_OBJ_SET_SHAREABLE(hash);
insn_replace_with_operands(iseq, iobj, BIN(opt_hash_freeze), 2, hash, (VALUE)ci);
ELEM_REMOVE(next);
@@ -3929,6 +3935,9 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
rb_set_errinfo(errinfo);
COMPILE_ERROR(iseq, line, "%" PRIsVALUE, message);
}
+ else {
+ RB_OBJ_SET_SHAREABLE(re);
+ }
RB_OBJ_WRITE(iseq, &OPERAND_AT(iobj, 0), re);
ELEM_REMOVE(iobj->link.next);
}
@@ -4170,7 +4179,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
unsigned int flags = vm_ci_flag(ci);
if ((flags & set_flags) == set_flags && !(flags & unset_flags)) {
((INSN*)niobj)->insn_id = BIN(putobject);
- RB_OBJ_WRITE(iseq, &OPERAND_AT(niobj, 0), rb_hash_freeze(rb_hash_resurrect(OPERAND_AT(niobj, 0))));
+ RB_OBJ_WRITE(iseq, &OPERAND_AT(niobj, 0), RB_OBJ_SET_SHAREABLE(rb_hash_freeze(rb_hash_resurrect(OPERAND_AT(niobj, 0)))));
const struct rb_callinfo *nci = vm_ci_new(vm_ci_mid(ci),
flags & ~VM_CALL_KW_SPLAT_MUT, vm_ci_argc(ci), vm_ci_kwarg(ci));
@@ -4725,6 +4734,7 @@ compile_dstr(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node)
if (!RNODE_DSTR(node)->nd_next) {
VALUE lit = rb_node_dstr_string_val(node);
ADD_INSN1(ret, node, putstring, lit);
+ RB_OBJ_SET_SHAREABLE(lit);
RB_OBJ_WRITTEN(iseq, Qundef, lit);
}
else {
@@ -4744,6 +4754,7 @@ compile_dregx(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
if (!popped) {
VALUE src = rb_node_dregx_string_val(node);
VALUE match = rb_reg_compile(src, cflag, NULL, 0);
+ RB_OBJ_SET_SHAREABLE(match);
ADD_INSN1(ret, node, putobject, match);
RB_OBJ_WRITTEN(iseq, Qundef, match);
}
@@ -5088,13 +5099,21 @@ static_literal_value(const NODE *node, rb_iseq_t *iseq)
{
switch (nd_type(node)) {
case NODE_INTEGER:
- return rb_node_integer_literal_val(node);
+ {
+ VALUE lit = rb_node_integer_literal_val(node);
+ if (!SPECIAL_CONST_P(lit)) RB_OBJ_SET_SHAREABLE(lit);
+ return lit;
+ }
case NODE_FLOAT:
- return rb_node_float_literal_val(node);
+ {
+ VALUE lit = rb_node_float_literal_val(node);
+ if (!SPECIAL_CONST_P(lit)) RB_OBJ_SET_SHAREABLE(lit);
+ return lit;
+ }
case NODE_RATIONAL:
- return rb_node_rational_literal_val(node);
+ return rb_ractor_make_shareable(rb_node_rational_literal_val(node));
case NODE_IMAGINARY:
- return rb_node_imaginary_literal_val(node);
+ return rb_ractor_make_shareable(rb_node_imaginary_literal_val(node));
case NODE_NIL:
return Qnil;
case NODE_TRUE:
@@ -5104,7 +5123,7 @@ static_literal_value(const NODE *node, rb_iseq_t *iseq)
case NODE_SYM:
return rb_node_sym_string_val(node);
case NODE_REGX:
- return rb_node_regx_string_val(node);
+ return RB_OBJ_SET_SHAREABLE(rb_node_regx_string_val(node));
case NODE_LINE:
return rb_node_line_lineno_val(node);
case NODE_ENCODING:
@@ -5113,7 +5132,9 @@ static_literal_value(const NODE *node, rb_iseq_t *iseq)
case NODE_STR:
if (ISEQ_COMPILE_DATA(iseq)->option->debug_frozen_string_literal || RTEST(ruby_debug)) {
VALUE lit = get_string_value(node);
- return rb_str_with_debug_created_info(lit, rb_iseq_path(iseq), (int)nd_line(node));
+ VALUE str = rb_str_with_debug_created_info(lit, rb_iseq_path(iseq), (int)nd_line(node));
+ RB_OBJ_SET_SHAREABLE(str);
+ return str;
}
else {
return get_string_value(node);
@@ -5211,7 +5232,7 @@ compile_array(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, int pop
/* Create a hidden array */
for (; count; count--, node = RNODE_LIST(node)->nd_next)
rb_ary_push(ary, static_literal_value(RNODE_LIST(node)->nd_head, iseq));
- OBJ_FREEZE(ary);
+ RB_OBJ_SET_FROZEN_SHAREABLE(ary);
/* Emit optimized code */
FLUSH_CHUNK;
@@ -5223,6 +5244,7 @@ compile_array(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, int pop
ADD_INSN1(ret, line_node, putobject, ary);
ADD_INSN(ret, line_node, concattoarray);
}
+ RB_OBJ_SET_SHAREABLE(ary);
RB_OBJ_WRITTEN(iseq, Qundef, ary);
}
}
@@ -5349,13 +5371,14 @@ compile_hash(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, int meth
for (; count; count--, node = RNODE_LIST(RNODE_LIST(node)->nd_next)->nd_next) {
VALUE elem[2];
elem[0] = static_literal_value(RNODE_LIST(node)->nd_head, iseq);
+ if (!RB_SPECIAL_CONST_P(elem[0])) RB_OBJ_SET_FROZEN_SHAREABLE(elem[0]);
elem[1] = static_literal_value(RNODE_LIST(RNODE_LIST(node)->nd_next)->nd_head, iseq);
+ if (!RB_SPECIAL_CONST_P(elem[1])) RB_OBJ_SET_FROZEN_SHAREABLE(elem[1]);
rb_ary_cat(ary, elem, 2);
}
VALUE hash = rb_hash_new_with_size(RARRAY_LEN(ary) / 2);
rb_hash_bulk_insert(RARRAY_LEN(ary), RARRAY_CONST_PTR(ary), hash);
- hash = rb_obj_hide(hash);
- OBJ_FREEZE(hash);
+ hash = RB_OBJ_SET_FROZEN_SHAREABLE(rb_obj_hide(hash));
/* Emit optimized code */
FLUSH_CHUNK();
@@ -6022,10 +6045,12 @@ collect_const_segments(rb_iseq_t *iseq, const NODE *node)
switch (nd_type(node)) {
case NODE_CONST:
rb_ary_unshift(arr, ID2SYM(RNODE_CONST(node)->nd_vid));
+ RB_OBJ_SET_SHAREABLE(arr);
return arr;
case NODE_COLON3:
rb_ary_unshift(arr, ID2SYM(RNODE_COLON3(node)->nd_mid));
rb_ary_unshift(arr, ID2SYM(idNULL));
+ RB_OBJ_SET_SHAREABLE(arr);
return arr;
case NODE_COLON2:
rb_ary_unshift(arr, ID2SYM(RNODE_COLON2(node)->nd_mid));
@@ -7122,6 +7147,7 @@ compile_case(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const orig_nod
if (only_special_literals && ISEQ_COMPILE_DATA(iseq)->option->specialized_instruction) {
ADD_INSN(ret, orig_node, dup);
+ rb_obj_hide(literals);
ADD_INSN2(ret, orig_node, opt_case_dispatch, literals, elselabel);
RB_OBJ_WRITTEN(iseq, Qundef, literals);
LABEL_REF(elselabel);
@@ -7657,6 +7683,7 @@ iseq_compile_pattern_each(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *c
ADD_INSN(ret, line_node, putnil);
}
else {
+ RB_OBJ_SET_FROZEN_SHAREABLE(keys);
ADD_INSN1(ret, line_node, duparray, keys);
RB_OBJ_WRITTEN(iseq, Qundef, rb_obj_hide(keys));
}
@@ -7694,7 +7721,8 @@ iseq_compile_pattern_each(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *c
ADD_INSN(ret, line_node, dup);
ADD_INSNL(ret, line_node, branchif, match_succeeded);
- ADD_INSN1(ret, line_node, putobject, rb_str_freeze(rb_sprintf("key not found: %+"PRIsVALUE, key))); // (4)
+ VALUE str = rb_str_freeze(rb_sprintf("key not found: %+"PRIsVALUE, key));
+ ADD_INSN1(ret, line_node, putobject, RB_OBJ_SET_SHAREABLE(str)); // (4)
ADD_INSN1(ret, line_node, setn, INT2FIX(base_index + CASE3_BI_OFFSET_ERROR_STRING + 2 /* (3), (4) */));
ADD_INSN1(ret, line_node, putobject, Qtrue); // (5)
ADD_INSN1(ret, line_node, setn, INT2FIX(base_index + CASE3_BI_OFFSET_KEY_ERROR_P + 3 /* (3), (4), (5) */));
@@ -10163,9 +10191,13 @@ compile_match(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, i
INIT_ANCHOR(val);
switch ((int)type) {
case NODE_MATCH:
- ADD_INSN1(recv, node, putobject, rb_node_regx_string_val(node));
- ADD_INSN2(val, node, getspecial, INT2FIX(0),
- INT2FIX(0));
+ {
+ VALUE re = rb_node_regx_string_val(node);
+ RB_OBJ_SET_FROZEN_SHAREABLE(re);
+ ADD_INSN1(recv, node, putobject, re);
+ ADD_INSN2(val, node, getspecial, INT2FIX(0),
+ INT2FIX(0));
+ }
break;
case NODE_MATCH2:
CHECK(COMPILE(recv, "receiver", RNODE_MATCH2(node)->nd_recv));
@@ -10242,6 +10274,7 @@ compile_colon3(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node,
if (ISEQ_COMPILE_DATA(iseq)->option->inline_const_cache) {
ISEQ_BODY(iseq)->ic_size++;
VALUE segments = rb_ary_new_from_args(2, ID2SYM(idNULL), ID2SYM(RNODE_COLON3(node)->nd_mid));
+ RB_OBJ_SET_FROZEN_SHAREABLE(segments);
ADD_INSN1(ret, node, opt_getconstant_path, segments);
RB_OBJ_WRITTEN(iseq, Qundef, segments);
}
@@ -10269,6 +10302,7 @@ compile_dots(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, in
VALUE bv = optimized_range_item(b);
VALUE ev = optimized_range_item(e);
VALUE val = rb_range_new(bv, ev, excl);
+ rb_ractor_make_shareable(rb_obj_freeze(val));
ADD_INSN1(ret, node, putobject, val);
RB_OBJ_WRITTEN(iseq, Qundef, val);
}
@@ -11080,6 +11114,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
if (ISEQ_COMPILE_DATA(iseq)->option->inline_const_cache) {
body->ic_size++;
VALUE segments = rb_ary_new_from_args(1, ID2SYM(RNODE_CONST(node)->nd_vid));
+ RB_OBJ_SET_FROZEN_SHAREABLE(segments);
ADD_INSN1(ret, node, opt_getconstant_path, segments);
RB_OBJ_WRITTEN(iseq, Qundef, segments);
}
@@ -11145,6 +11180,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
}
case NODE_INTEGER:{
VALUE lit = rb_node_integer_literal_val(node);
+ if (!SPECIAL_CONST_P(lit)) RB_OBJ_SET_SHAREABLE(lit);
debugp_param("integer", lit);
if (!popped) {
ADD_INSN1(ret, node, putobject, lit);
@@ -11154,6 +11190,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
}
case NODE_FLOAT:{
VALUE lit = rb_node_float_literal_val(node);
+ if (!SPECIAL_CONST_P(lit)) RB_OBJ_SET_SHAREABLE(lit);
debugp_param("float", lit);
if (!popped) {
ADD_INSN1(ret, node, putobject, lit);
@@ -11163,6 +11200,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
}
case NODE_RATIONAL:{
VALUE lit = rb_node_rational_literal_val(node);
+ rb_ractor_make_shareable(lit);
debugp_param("rational", lit);
if (!popped) {
ADD_INSN1(ret, node, putobject, lit);
@@ -11172,6 +11210,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
}
case NODE_IMAGINARY:{
VALUE lit = rb_node_imaginary_literal_val(node);
+ rb_ractor_make_shareable(lit);
debugp_param("imaginary", lit);
if (!popped) {
ADD_INSN1(ret, node, putobject, lit);
@@ -11188,6 +11227,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
if ((option->debug_frozen_string_literal || RTEST(ruby_debug)) &&
option->frozen_string_literal != ISEQ_FROZEN_STRING_LITERAL_DISABLED) {
lit = rb_str_with_debug_created_info(lit, rb_iseq_path(iseq), line);
+ RB_OBJ_SET_SHAREABLE(lit);
}
switch (option->frozen_string_literal) {
case ISEQ_FROZEN_STRING_LITERAL_UNSET:
@@ -11242,6 +11282,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const no
case NODE_REGX:{
if (!popped) {
VALUE lit = rb_node_regx_string_val(node);
+ RB_OBJ_SET_SHAREABLE(lit);
ADD_INSN1(ret, node, putobject, lit);
RB_OBJ_WRITTEN(iseq, Qundef, lit);
}
@@ -12105,6 +12146,7 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
rb_hash_aset(map, key, (VALUE)label | 1);
}
RB_GC_GUARD(op);
+ RB_OBJ_SET_SHAREABLE(rb_obj_hide(map)); // allow mutation while compiling
argv[j] = map;
RB_OBJ_WRITTEN(iseq, Qundef, map);
}
@@ -12992,7 +13034,7 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
v = rb_hash_dup(v); // hash dumped as frozen
RHASH_TBL_RAW(v)->type = &cdhash_type;
rb_hash_rehash(v); // hash function changed
- freeze_hide_obj(v);
+ RB_OBJ_SET_SHAREABLE(freeze_hide_obj(v));
// Overwrite the existing hash in the object list. This
// is to keep the object alive during load time.
@@ -14126,7 +14168,9 @@ ibf_load_object_float(const struct ibf_load *load, const struct ibf_object_heade
double d;
/* Avoid unaligned VFP load on ARMv7; IBF payload may be unaligned (C99 6.3.2.3 p7). */
memcpy(&d, IBF_OBJBODY(double, offset), sizeof(d));
- return DBL2NUM(d);
+ VALUE f = DBL2NUM(d);
+ if (!FLONUM_P(f)) RB_OBJ_SET_SHAREABLE(f);
+ return f;
}
static void
@@ -14197,7 +14241,7 @@ ibf_load_object_regexp(const struct ibf_load *load, const struct ibf_object_head
VALUE reg = rb_reg_compile(srcstr, (int)regexp.option, NULL, 0);
if (header->internal) rb_obj_hide(reg);
- if (header->frozen) rb_obj_freeze(reg);
+ if (header->frozen) RB_OBJ_SET_SHAREABLE(rb_obj_freeze(reg));
return reg;
}
@@ -14228,7 +14272,10 @@ ibf_load_object_array(const struct ibf_load *load, const struct ibf_object_heade
rb_ary_push(ary, ibf_load_object(load, index));
}
- if (header->frozen) rb_ary_freeze(ary);
+ if (header->frozen) {
+ rb_ary_freeze(ary);
+ rb_ractor_make_shareable(ary); // TODO: check elements
+ }
return ary;
}
@@ -14273,7 +14320,9 @@ ibf_load_object_hash(const struct ibf_load *load, const struct ibf_object_header
rb_hash_rehash(obj);
if (header->internal) rb_obj_hide(obj);
- if (header->frozen) rb_obj_freeze(obj);
+ if (header->frozen) {
+ RB_OBJ_SET_FROZEN_SHAREABLE(obj);
+ }
return obj;
}
@@ -14309,7 +14358,7 @@ ibf_load_object_struct(const struct ibf_load *load, const struct ibf_object_head
VALUE end = ibf_load_object(load, range->end);
VALUE obj = rb_range_new(beg, end, range->excl);
if (header->internal) rb_obj_hide(obj);
- if (header->frozen) rb_obj_freeze(obj);
+ if (header->frozen) RB_OBJ_SET_FROZEN_SHAREABLE(obj);
return obj;
}
@@ -14337,7 +14386,7 @@ ibf_load_object_bignum(const struct ibf_load *load, const struct ibf_object_head
big_unpack_flags |
(sign == 0 ? INTEGER_PACK_NEGATIVE : 0));
if (header->internal) rb_obj_hide(obj);
- if (header->frozen) rb_obj_freeze(obj);
+ if (header->frozen) RB_OBJ_SET_FROZEN_SHAREABLE(obj);
return obj;
}
@@ -14398,7 +14447,7 @@ ibf_load_object_complex_rational(const struct ibf_load *load, const struct ibf_o
rb_complex_new(a, b) : rb_rational_new(a, b);
if (header->internal) rb_obj_hide(obj);
- if (header->frozen) rb_obj_freeze(obj);
+ if (header->frozen) rb_ractor_make_shareable(rb_obj_freeze(obj));
return obj;
}
diff --git a/depend b/depend
index 5ed27d04e0c38d..b56378ebce14c8 100644
--- a/depend
+++ b/depend
@@ -263,6 +263,7 @@ array.$(OBJEXT): {$(VPATH)}onigmo.h
array.$(OBJEXT): {$(VPATH)}oniguruma.h
array.$(OBJEXT): {$(VPATH)}probes.dmyh
array.$(OBJEXT): {$(VPATH)}probes.h
+array.$(OBJEXT): {$(VPATH)}ractor.h
array.$(OBJEXT): {$(VPATH)}ruby_assert.h
array.$(OBJEXT): {$(VPATH)}ruby_atomic.h
array.$(OBJEXT): {$(VPATH)}rubyparser.h
@@ -4385,6 +4386,7 @@ encoding.$(OBJEXT): {$(VPATH)}missing.h
encoding.$(OBJEXT): {$(VPATH)}node.h
encoding.$(OBJEXT): {$(VPATH)}onigmo.h
encoding.$(OBJEXT): {$(VPATH)}oniguruma.h
+encoding.$(OBJEXT): {$(VPATH)}ractor.h
encoding.$(OBJEXT): {$(VPATH)}regenc.h
encoding.$(OBJEXT): {$(VPATH)}ruby_assert.h
encoding.$(OBJEXT): {$(VPATH)}ruby_atomic.h
@@ -16794,6 +16796,7 @@ string.$(OBJEXT): {$(VPATH)}onigmo.h
string.$(OBJEXT): {$(VPATH)}oniguruma.h
string.$(OBJEXT): {$(VPATH)}probes.dmyh
string.$(OBJEXT): {$(VPATH)}probes.h
+string.$(OBJEXT): {$(VPATH)}ractor.h
string.$(OBJEXT): {$(VPATH)}re.h
string.$(OBJEXT): {$(VPATH)}regex.h
string.$(OBJEXT): {$(VPATH)}ruby_assert.h
@@ -17265,6 +17268,7 @@ symbol.$(OBJEXT): {$(VPATH)}onigmo.h
symbol.$(OBJEXT): {$(VPATH)}oniguruma.h
symbol.$(OBJEXT): {$(VPATH)}probes.dmyh
symbol.$(OBJEXT): {$(VPATH)}probes.h
+symbol.$(OBJEXT): {$(VPATH)}ractor.h
symbol.$(OBJEXT): {$(VPATH)}ruby_assert.h
symbol.$(OBJEXT): {$(VPATH)}ruby_atomic.h
symbol.$(OBJEXT): {$(VPATH)}rubyparser.h
diff --git a/encoding.c b/encoding.c
index da434cda1a4ff0..3d5c1d777283fe 100644
--- a/encoding.c
+++ b/encoding.c
@@ -27,6 +27,7 @@
#include "ruby/atomic.h"
#include "ruby/encoding.h"
#include "ruby/util.h"
+#include "ruby/ractor.h"
#include "ruby_assert.h"
#include "vm_sync.h"
#include "ruby_atomic.h"
@@ -135,8 +136,7 @@ static VALUE
enc_new(rb_encoding *encoding)
{
VALUE enc = TypedData_Wrap_Struct(rb_cEncoding, &encoding_data_type, (void *)encoding);
- rb_obj_freeze(enc);
- FL_SET_RAW(enc, RUBY_FL_SHAREABLE);
+ RB_OBJ_SET_FROZEN_SHAREABLE(enc);
return enc;
}
diff --git a/ext/stringio/stringio.c b/ext/stringio/stringio.c
index 0493c8cd50856b..d37dee59ff963c 100644
--- a/ext/stringio/stringio.c
+++ b/ext/stringio/stringio.c
@@ -370,23 +370,20 @@ strio_finalize(VALUE self)
/*
* call-seq:
- * StringIO.open(string = '', mode = 'r+') {|strio| ... }
+ * StringIO.open(string = '', mode = 'r+') -> new_stringio
+ * StringIO.open(string = '', mode = 'r+') {|strio| ... } -> object
*
- * Note that +mode+ defaults to 'r' if +string+ is frozen.
- *
- * Creates a new \StringIO instance formed from +string+ and +mode+;
- * see {Access Modes}[rdoc-ref:File@Access+Modes].
+ * Creates new \StringIO instance by calling StringIO.new(string, mode).
*
- * With no block, returns the new instance:
+ * With no block given, returns the new instance:
*
* strio = StringIO.open # => #
*
- * With a block, calls the block with the new instance
+ * With a block given, calls the block with the new instance
* and returns the block's value;
- * closes the instance on block exit.
+ * closes the instance on block exit:
*
- * StringIO.open {|strio| p strio }
- * # => #
+ * StringIO.open('foo') {|strio| strio.string.upcase } # => "FOO"
*
* Related: StringIO.new.
*/
@@ -715,7 +712,7 @@ strio_set_lineno(VALUE self, VALUE lineno)
* binmode -> self
*
* Sets the data mode in +self+ to binary mode;
- * see {Data Mode}[rdoc-ref:File@Data+Mode].
+ * see {Data Mode}[https://docs.ruby-lang.org/en/master/File.html#class-File-label-Data+Mode].
*
*/
static VALUE
diff --git a/gc.c b/gc.c
index 897447c808e3d1..53cfe839d5b912 100644
--- a/gc.c
+++ b/gc.c
@@ -2804,13 +2804,24 @@ mark_m_tbl(void *objspace, struct rb_id_table *tbl)
}
}
+bool rb_gc_impl_checking_shareable(void *objspace_ptr); // in defaut/deafult.c
+
+bool
+rb_gc_checking_shareable(void)
+{
+ return rb_gc_impl_checking_shareable(rb_gc_get_objspace());
+}
+
+
static enum rb_id_table_iterator_result
mark_const_entry_i(VALUE value, void *objspace)
{
const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
- gc_mark_internal(ce->value);
- gc_mark_internal(ce->file);
+ if (!rb_gc_impl_checking_shareable(objspace)) {
+ gc_mark_internal(ce->value);
+ gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
+ }
return ID_TABLE_CONTINUE;
}
@@ -3071,7 +3082,12 @@ gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *a
gc_mark_internal(RCLASSEXT_SUPER(ext));
}
mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
- gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
+
+ if (!rb_gc_impl_checking_shareable(objspace)) {
+ // unshareable
+ gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
+ }
+
if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
}
@@ -3137,7 +3153,8 @@ rb_gc_mark_children(void *objspace, VALUE obj)
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
- if (FL_TEST_RAW(obj, FL_SINGLETON)) {
+ if (FL_TEST_RAW(obj, FL_SINGLETON) &&
+ !rb_gc_impl_checking_shareable(objspace)) {
gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
}
// Continue to the shared T_CLASS/T_MODULE
@@ -5410,6 +5427,18 @@ rb_gc_after_fork(rb_pid_t pid)
rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
}
+bool
+rb_gc_obj_shareable_p(VALUE obj)
+{
+ return RB_OBJ_SHAREABLE_P(obj);
+}
+
+void
+rb_gc_rp(VALUE obj)
+{
+ rp(obj);
+}
+
/*
* Document-module: ObjectSpace
*
diff --git a/gc/default/default.c b/gc/default/default.c
index 7c10cc33063b0c..0a9945cdac98b2 100644
--- a/gc/default/default.c
+++ b/gc/default/default.c
@@ -491,6 +491,7 @@ typedef struct rb_objspace {
unsigned int during_minor_gc : 1;
unsigned int during_incremental_marking : 1;
unsigned int measure_gc : 1;
+ unsigned int check_shareable : 1;
} flags;
rb_event_flag_t hook_events;
@@ -1455,6 +1456,13 @@ RVALUE_WHITE_P(rb_objspace_t *objspace, VALUE obj)
return !RVALUE_MARKED(objspace, obj);
}
+bool
+rb_gc_impl_checking_shareable(void *objspace_ptr)
+{
+ rb_objspace_t *objspace = objspace_ptr;
+ return objspace->flags.check_shareable;
+}
+
bool
rb_gc_impl_gc_enabled_p(void *objspace_ptr)
{
@@ -4962,6 +4970,55 @@ check_children_i(const VALUE child, void *ptr)
}
}
+static void
+check_shareable_i(const VALUE child, void *ptr)
+{
+ struct verify_internal_consistency_struct *data = (struct verify_internal_consistency_struct *)ptr;
+
+ if (!rb_gc_obj_shareable_p(child)) {
+ fprintf(stderr, "(a) ");
+ rb_gc_rp(data->parent);
+ fprintf(stderr, "(b) ");
+ rb_gc_rp(child);
+ fprintf(stderr, "check_shareable_i: shareable (a) -> unshareable (b)\n");
+
+ data->err_count++;
+ rb_bug("!! violate shareable constraint !!");
+ }
+}
+
+static void
+gc_verify_shareable(rb_objspace_t *objspace, VALUE obj, void *data)
+{
+ // while objspace->flags.check_shareable is true,
+ // other Ractors should not run the GC, until the flag is not local.
+ // TODO: remove VM locking if the flag is Ractor local
+
+ unsigned int lev = RB_GC_VM_LOCK();
+ {
+ objspace->flags.check_shareable = true;
+ rb_objspace_reachable_objects_from(obj, check_shareable_i, (void *)data);
+ objspace->flags.check_shareable = false;
+ }
+ RB_GC_VM_UNLOCK(lev);
+}
+
+// TODO: only one level (non-recursive)
+void
+rb_gc_verify_shareable(VALUE obj)
+{
+ rb_objspace_t *objspace = rb_gc_get_objspace();
+ struct verify_internal_consistency_struct data = {
+ .parent = obj,
+ .err_count = 0,
+ };
+ gc_verify_shareable(objspace, obj, &data);
+
+ if (data.err_count > 0) {
+ rb_bug("rb_gc_verify_shareable");
+ }
+}
+
static int
verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
struct verify_internal_consistency_struct *data)
@@ -4993,6 +5050,10 @@ verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
}
+ if (!is_marking(objspace) && rb_gc_obj_shareable_p(obj)) {
+ gc_verify_shareable(objspace, obj, data);
+ }
+
if (is_incremental_marking(objspace)) {
if (RVALUE_BLACK_P(objspace, obj)) {
/* reachable objects from black objects should be black or grey objects */
@@ -6645,6 +6706,7 @@ gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_
gc_enter_count(event);
if (RB_UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
+ GC_ASSERT(!objspace->flags.check_shareable);
during_gc = TRUE;
RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
@@ -6658,6 +6720,7 @@ static inline void
gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
{
GC_ASSERT(during_gc != 0);
+ GC_ASSERT(!objspace->flags.check_shareable);
rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_EXIT);
@@ -8913,6 +8976,12 @@ gc_profile_disable(VALUE _)
return Qnil;
}
+void
+rb_gc_verify_internal_consistency(void)
+{
+ gc_verify_internal_consistency(rb_gc_get_objspace());
+}
+
/*
* call-seq:
* GC.verify_internal_consistency -> nil
@@ -8926,7 +8995,7 @@ gc_profile_disable(VALUE _)
static VALUE
gc_verify_internal_consistency_m(VALUE dummy)
{
- gc_verify_internal_consistency(rb_gc_get_objspace());
+ rb_gc_verify_internal_consistency();
return Qnil;
}
diff --git a/gc/gc.h b/gc/gc.h
index 8ca9987477488c..89219eb7934692 100644
--- a/gc/gc.h
+++ b/gc/gc.h
@@ -96,6 +96,8 @@ MODULAR_GC_FN bool rb_memerror_reentered(void);
MODULAR_GC_FN bool rb_obj_id_p(VALUE);
MODULAR_GC_FN void rb_gc_before_updating_jit_code(void);
MODULAR_GC_FN void rb_gc_after_updating_jit_code(void);
+MODULAR_GC_FN bool rb_gc_obj_shareable_p(VALUE);
+MODULAR_GC_FN void rb_gc_rp(VALUE);
#if USE_MODULAR_GC
MODULAR_GC_FN bool rb_gc_event_hook_required_p(rb_event_flag_t event);
diff --git a/gc/mmtk/mmtk.c b/gc/mmtk/mmtk.c
index 9dd3129e01664e..5861f5e70fdb66 100644
--- a/gc/mmtk/mmtk.c
+++ b/gc/mmtk/mmtk.c
@@ -1260,6 +1260,12 @@ rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
rb_gc_impl_copy_finalizer(objspace_ptr, dest, obj);
}
+bool
+rb_gc_impl_checking_shareable(void *ptr)
+{
+ return false;
+}
+
// GC Identification
const char *
diff --git a/hash.c b/hash.c
index 7b523ba23561ef..603cab76db33d9 100644
--- a/hash.c
+++ b/hash.c
@@ -7474,6 +7474,7 @@ Init_Hash(void)
rb_define_singleton_method(rb_cHash, "ruby2_keywords_hash", rb_hash_s_ruby2_keywords_hash, 1);
rb_cHash_empty_frozen = rb_hash_freeze(rb_hash_new());
+ RB_OBJ_SET_SHAREABLE(rb_cHash_empty_frozen);
rb_vm_register_global_object(rb_cHash_empty_frozen);
/* Document-class: ENV
@@ -7643,8 +7644,7 @@ Init_Hash(void)
origenviron = environ;
envtbl = TypedData_Wrap_Struct(rb_cObject, &env_data_type, NULL);
rb_extend_object(envtbl, rb_mEnumerable);
- FL_SET_RAW(envtbl, RUBY_FL_SHAREABLE);
-
+ RB_OBJ_SET_SHAREABLE(envtbl);
rb_define_singleton_method(envtbl, "[]", rb_f_getenv, 1);
rb_define_singleton_method(envtbl, "fetch", env_fetch, -1);
diff --git a/id_table.c b/id_table.c
index b70587319182ce..eb8477237ea622 100644
--- a/id_table.c
+++ b/id_table.c
@@ -373,6 +373,7 @@ rb_managed_id_table_create(const rb_data_type_t *type, size_t capa)
{
struct rb_id_table *tbl;
VALUE obj = TypedData_Make_Struct(0, struct rb_id_table, type, tbl);
+ RB_OBJ_SET_SHAREABLE(obj);
rb_id_table_init(tbl, capa);
return obj;
}
diff --git a/imemo.c b/imemo.c
index 2dd05b29f9df3b..d83c690ba5ae81 100644
--- a/imemo.c
+++ b/imemo.c
@@ -40,9 +40,9 @@ rb_imemo_name(enum imemo_type type)
* ========================================================================= */
VALUE
-rb_imemo_new(enum imemo_type type, VALUE v0, size_t size)
+rb_imemo_new(enum imemo_type type, VALUE v0, size_t size, bool is_shareable)
{
- VALUE flags = T_IMEMO | FL_WB_PROTECTED | (type << FL_USHIFT);
+ VALUE flags = T_IMEMO | FL_WB_PROTECTED | (type << FL_USHIFT) | (is_shareable ? FL_SHAREABLE : 0);
NEWOBJ_OF(obj, void, v0, flags, size, 0);
return (VALUE)obj;
@@ -98,16 +98,16 @@ rb_free_tmp_buffer(volatile VALUE *store)
}
static VALUE
-imemo_fields_new(VALUE owner, size_t capa)
+imemo_fields_new(VALUE owner, size_t capa, bool shareable)
{
size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
if (rb_gc_size_allocatable_p(embedded_size)) {
- VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size);
+ VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size, shareable);
RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
return fields;
}
else {
- VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields));
+ VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
FL_SET_RAW(fields, OBJ_FIELD_HEAP);
return fields;
@@ -115,24 +115,24 @@ imemo_fields_new(VALUE owner, size_t capa)
}
VALUE
-rb_imemo_fields_new(VALUE owner, size_t capa)
+rb_imemo_fields_new(VALUE owner, size_t capa, bool shareable)
{
- return imemo_fields_new(owner, capa);
+ return imemo_fields_new(owner, capa, shareable);
}
static VALUE
-imemo_fields_new_complex(VALUE owner, size_t capa)
+imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
{
- VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields));
+ VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
FL_SET_RAW(fields, OBJ_FIELD_HEAP);
return fields;
}
VALUE
-rb_imemo_fields_new_complex(VALUE owner, size_t capa)
+rb_imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable)
{
- return imemo_fields_new_complex(owner, capa);
+ return imemo_fields_new_complex(owner, capa, shareable);
}
static int
@@ -151,9 +151,9 @@ imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
}
VALUE
-rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl)
+rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl, bool shareable)
{
- VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields));
+ VALUE fields = rb_imemo_new(imemo_fields, owner, sizeof(struct rb_fields), shareable);
IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
FL_SET_RAW(fields, OBJ_FIELD_HEAP);
st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
@@ -170,7 +170,7 @@ rb_imemo_fields_clone(VALUE fields_obj)
st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
st_table *dest_table = xcalloc(1, sizeof(st_table));
- clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table);
+ clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table, false /* TODO: check */);
st_replace(dest_table, src_table);
RBASIC_SET_SHAPE_ID(clone, shape_id);
@@ -178,7 +178,7 @@ rb_imemo_fields_clone(VALUE fields_obj)
st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
}
else {
- clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id));
+ clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id), false /* TODO: check */);
RBASIC_SET_SHAPE_ID(clone, shape_id);
VALUE *fields = rb_imemo_fields_ptr(clone);
attr_index_t fields_count = RSHAPE_LEN(shape_id);
@@ -303,7 +303,9 @@ mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
rb_gc_mark_and_move(&def->body.attr.location);
break;
case VM_METHOD_TYPE_BMETHOD:
- rb_gc_mark_and_move(&def->body.bmethod.proc);
+ if (!rb_gc_checking_shareable()) {
+ rb_gc_mark_and_move(&def->body.bmethod.proc);
+ }
if (def->body.bmethod.hooks) {
rb_hook_list_mark_and_move(def->body.bmethod.hooks);
}
@@ -386,16 +388,27 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
case imemo_constcache: {
struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
- rb_gc_mark_and_move(&ice->value);
+ if ((ice->flags & IMEMO_CONST_CACHE_SHAREABLE) ||
+ !rb_gc_checking_shareable()) {
+ rb_gc_mark_and_move(&ice->value);
+ }
break;
}
case imemo_cref: {
rb_cref_t *cref = (rb_cref_t *)obj;
- rb_gc_mark_and_move(&cref->klass_or_self);
+ if (!rb_gc_checking_shareable()) {
+ // cref->klass_or_self can be unshareable, but no way to access it from other ractors
+ rb_gc_mark_and_move(&cref->klass_or_self);
+ }
+
rb_gc_mark_and_move_ptr(&cref->next);
- rb_gc_mark_and_move(&cref->refinements);
+
+ // TODO: Ractor and refeinements are not resolved yet
+ if (!rb_gc_checking_shareable()) {
+ rb_gc_mark_and_move(&cref->refinements);
+ }
break;
}
@@ -481,20 +494,25 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
case imemo_fields: {
rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
- if (rb_shape_obj_too_complex_p(obj)) {
- st_table *tbl = rb_imemo_fields_complex_tbl(obj);
- if (reference_updating) {
- rb_gc_ref_update_table_values_only(tbl);
+ if (!rb_gc_checking_shareable()) {
+ // imemo_fields can refer unshareable objects
+ // even if the imemo_fields is shareable.
+
+ if (rb_shape_obj_too_complex_p(obj)) {
+ st_table *tbl = rb_imemo_fields_complex_tbl(obj);
+ if (reference_updating) {
+ rb_gc_ref_update_table_values_only(tbl);
+ }
+ else {
+ rb_mark_tbl_no_pin(tbl);
+ }
}
else {
- rb_mark_tbl_no_pin(tbl);
- }
- }
- else {
- VALUE *fields = rb_imemo_fields_ptr(obj);
- attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
- for (attr_index_t i = 0; i < len; i++) {
- rb_gc_mark_and_move(&fields[i]);
+ VALUE *fields = rb_imemo_fields_ptr(obj);
+ attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
+ for (attr_index_t i = 0; i < len; i++) {
+ rb_gc_mark_and_move(&fields[i]);
+ }
}
}
break;
diff --git a/include/ruby/ractor.h b/include/ruby/ractor.h
index 7811616f6d8a84..85222bbe115860 100644
--- a/include/ruby/ractor.h
+++ b/include/ruby/ractor.h
@@ -261,4 +261,18 @@ rb_ractor_shareable_p(VALUE obj)
}
}
+// TODO: optimize on interpreter core
+#ifndef RB_OBJ_SET_SHAREABLE
+VALUE rb_obj_set_shareable(VALUE obj); // ractor.c
+#define RB_OBJ_SET_SHAREABLE(obj) rb_obj_set_shareable(obj)
+#endif
+
+static inline VALUE
+RB_OBJ_SET_FROZEN_SHAREABLE(VALUE obj)
+{
+ RB_OBJ_FREEZE(obj);
+ RB_OBJ_SET_SHAREABLE(obj);
+ return obj;
+}
+
#endif /* RUBY_RACTOR_H */
diff --git a/internal/class.h b/internal/class.h
index a791672cadcacf..5d843e58da3923 100644
--- a/internal/class.h
+++ b/internal/class.h
@@ -555,7 +555,7 @@ RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(VALUE obj)
RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
rb_classext_t *ext = RCLASS_EXT_WRITABLE(obj);
if (!ext->fields_obj) {
- RB_OBJ_WRITE(obj, &ext->fields_obj, rb_imemo_fields_new(obj, 1));
+ RB_OBJ_WRITE(obj, &ext->fields_obj, rb_imemo_fields_new(obj, 1, true));
}
return ext->fields_obj;
}
@@ -762,6 +762,7 @@ RCLASS_SET_CLASSPATH(VALUE klass, VALUE classpath, bool permanent)
rb_classext_t *ext = RCLASS_EXT_READABLE(klass);
assert(BUILTIN_TYPE(klass) == T_CLASS || BUILTIN_TYPE(klass) == T_MODULE);
assert(classpath == 0 || BUILTIN_TYPE(classpath) == T_STRING);
+ assert(FL_TEST_RAW(classpath, RUBY_FL_SHAREABLE));
RB_OBJ_WRITE(klass, &(RCLASSEXT_CLASSPATH(ext)), classpath);
RCLASSEXT_PERMANENT_CLASSPATH(ext) = permanent;
@@ -773,6 +774,7 @@ RCLASS_WRITE_CLASSPATH(VALUE klass, VALUE classpath, bool permanent)
rb_classext_t *ext = RCLASS_EXT_WRITABLE(klass);
assert(BUILTIN_TYPE(klass) == T_CLASS || BUILTIN_TYPE(klass) == T_MODULE);
assert(classpath == 0 || BUILTIN_TYPE(classpath) == T_STRING);
+ assert(!RB_FL_ABLE(classpath) || FL_TEST_RAW(classpath, RUBY_FL_SHAREABLE));
RB_OBJ_WRITE(klass, &(RCLASSEXT_CLASSPATH(ext)), classpath);
RCLASSEXT_PERMANENT_CLASSPATH(ext) = permanent;
diff --git a/internal/gc.h b/internal/gc.h
index f0dc04fc58a954..ec408d7fac53b9 100644
--- a/internal/gc.h
+++ b/internal/gc.h
@@ -351,4 +351,8 @@ ruby_sized_realloc_n(void *ptr, size_t new_count, size_t element_size, size_t ol
#define ruby_sized_xrealloc ruby_sized_xrealloc_inlined
#define ruby_sized_xrealloc2 ruby_sized_xrealloc2_inlined
#define ruby_sized_xfree ruby_sized_xfree_inlined
+
+void rb_gc_verify_shareable(VALUE);
+bool rb_gc_checking_shareable(void);
+
#endif /* INTERNAL_GC_H */
diff --git a/internal/imemo.h b/internal/imemo.h
index 3b91ef4b818f93..f8bda26f0b50f9 100644
--- a/internal/imemo.h
+++ b/internal/imemo.h
@@ -114,7 +114,8 @@ struct MEMO {
} u3;
};
-#define IMEMO_NEW(T, type, v0) ((T *)rb_imemo_new((type), (v0), sizeof(T)))
+#define IMEMO_NEW(T, type, v0) ((T *)rb_imemo_new((type), (v0), sizeof(T), false))
+#define SHAREABLE_IMEMO_NEW(T, type, v0) ((T *)rb_imemo_new((type), (v0), sizeof(T), true))
/* ment is in method.h */
@@ -131,7 +132,7 @@ struct MEMO {
#ifndef RUBY_RUBYPARSER_H
typedef struct rb_imemo_tmpbuf_struct rb_imemo_tmpbuf_t;
#endif
-VALUE rb_imemo_new(enum imemo_type type, VALUE v0, size_t size);
+VALUE rb_imemo_new(enum imemo_type type, VALUE v0, size_t size, bool is_shareable);
VALUE rb_imemo_tmpbuf_new(void);
struct vm_ifunc *rb_vm_ifunc_new(rb_block_call_func_t func, const void *data, int min_argc, int max_argc);
static inline enum imemo_type imemo_type(VALUE imemo);
@@ -270,9 +271,9 @@ STATIC_ASSERT(imemo_fields_embed_offset, offsetof(struct RObject, as.heap.fields
#define IMEMO_OBJ_FIELDS(fields) ((struct rb_fields *)fields)
-VALUE rb_imemo_fields_new(VALUE owner, size_t capa);
-VALUE rb_imemo_fields_new_complex(VALUE owner, size_t capa);
-VALUE rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl);
+VALUE rb_imemo_fields_new(VALUE owner, size_t capa, bool shareable);
+VALUE rb_imemo_fields_new_complex(VALUE owner, size_t capa, bool shareable);
+VALUE rb_imemo_fields_new_complex_tbl(VALUE owner, st_table *tbl, bool shareable);
VALUE rb_imemo_fields_clone(VALUE fields_obj);
void rb_imemo_fields_clear(VALUE fields_obj);
diff --git a/iseq.c b/iseq.c
index ae30d60ced9e3f..aabeb83b3c349e 100644
--- a/iseq.c
+++ b/iseq.c
@@ -412,12 +412,15 @@ rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating)
#endif
}
else {
+ // TODO: check jit payload
+ if (!rb_gc_checking_shareable()) {
#if USE_YJIT
- rb_yjit_iseq_mark(body->yjit_payload);
+ rb_yjit_iseq_mark(body->yjit_payload);
#endif
#if USE_ZJIT
- rb_zjit_iseq_mark(body->zjit_payload);
+ rb_zjit_iseq_mark(body->zjit_payload);
#endif
+ }
}
}
@@ -425,13 +428,15 @@ rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating)
rb_gc_mark_and_move(&iseq->aux.loader.obj);
}
else if (FL_TEST_RAW((VALUE)iseq, ISEQ_USE_COMPILE_DATA)) {
- const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq);
+ if (!rb_gc_checking_shareable()) {
+ const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq);
- rb_iseq_mark_and_move_insn_storage(compile_data->insn.storage_head);
- rb_iseq_mark_and_move_each_compile_data_value(iseq, reference_updating ? ISEQ_ORIGINAL_ISEQ(iseq) : NULL);
+ rb_iseq_mark_and_move_insn_storage(compile_data->insn.storage_head);
+ rb_iseq_mark_and_move_each_compile_data_value(iseq, reference_updating ? ISEQ_ORIGINAL_ISEQ(iseq) : NULL);
- rb_gc_mark_and_move((VALUE *)&compile_data->err_info);
- rb_gc_mark_and_move((VALUE *)&compile_data->catch_table_ary);
+ rb_gc_mark_and_move((VALUE *)&compile_data->err_info);
+ rb_gc_mark_and_move((VALUE *)&compile_data->catch_table_ary);
+ }
}
else {
/* executable */
@@ -544,9 +549,14 @@ rb_iseq_pathobj_new(VALUE path, VALUE realpath)
pathobj = rb_fstring(path);
}
else {
- if (!NIL_P(realpath)) realpath = rb_fstring(realpath);
- pathobj = rb_ary_new_from_args(2, rb_fstring(path), realpath);
+ if (!NIL_P(realpath)) {
+ realpath = rb_fstring(realpath);
+ }
+ VALUE fpath = rb_fstring(path);
+
+ pathobj = rb_ary_new_from_args(2, fpath, realpath);
rb_ary_freeze(pathobj);
+ RB_OBJ_SET_SHAREABLE(pathobj);
}
return pathobj;
}
@@ -565,6 +575,11 @@ rb_iseq_alloc_with_dummy_path(VALUE fname)
rb_iseq_t *dummy_iseq = iseq_alloc();
ISEQ_BODY(dummy_iseq)->type = ISEQ_TYPE_TOP;
+
+ if (!RB_OBJ_SHAREABLE_P(fname)) {
+ RB_OBJ_SET_FROZEN_SHAREABLE(fname);
+ }
+
RB_OBJ_WRITE(dummy_iseq, &ISEQ_BODY(dummy_iseq)->location.pathobj, fname);
RB_OBJ_WRITE(dummy_iseq, &ISEQ_BODY(dummy_iseq)->location.label, fname);
@@ -1568,6 +1583,7 @@ iseqw_new(const rb_iseq_t *iseq)
RB_OBJ_WRITE(obj, ptr, iseq);
/* cache a wrapper object */
+ RB_OBJ_SET_FROZEN_SHAREABLE((VALUE)obj);
RB_OBJ_WRITE((VALUE)iseq, &iseq->wrapper, obj);
return obj;
diff --git a/iseq.h b/iseq.h
index a8ad8ef9b064b8..86063d8be28136 100644
--- a/iseq.h
+++ b/iseq.h
@@ -175,7 +175,7 @@ ISEQ_COMPILE_DATA_CLEAR(rb_iseq_t *iseq)
static inline rb_iseq_t *
iseq_imemo_alloc(void)
{
- rb_iseq_t *iseq = IMEMO_NEW(rb_iseq_t, imemo_iseq, 0);
+ rb_iseq_t *iseq = SHAREABLE_IMEMO_NEW(rb_iseq_t, imemo_iseq, 0);
// Clear out the whole iseq except for the flags.
memset((char *)iseq + sizeof(VALUE), 0, sizeof(rb_iseq_t) - sizeof(VALUE));
diff --git a/lib/bundler/vendor/uri/lib/uri/generic.rb b/lib/bundler/vendor/uri/lib/uri/generic.rb
index 6abb171d14254b..a27874748e5312 100644
--- a/lib/bundler/vendor/uri/lib/uri/generic.rb
+++ b/lib/bundler/vendor/uri/lib/uri/generic.rb
@@ -186,18 +186,18 @@ def initialize(scheme,
if arg_check
self.scheme = scheme
- self.userinfo = userinfo
self.hostname = host
self.port = port
+ self.userinfo = userinfo
self.path = path
self.query = query
self.opaque = opaque
self.fragment = fragment
else
self.set_scheme(scheme)
- self.set_userinfo(userinfo)
self.set_host(host)
self.set_port(port)
+ self.set_userinfo(userinfo)
self.set_path(path)
self.query = query
self.set_opaque(opaque)
@@ -511,7 +511,7 @@ def set_userinfo(user, password = nil)
user, password = split_userinfo(user)
end
@user = user
- @password = password if password
+ @password = password
[@user, @password]
end
@@ -522,7 +522,7 @@ def set_userinfo(user, password = nil)
# See also Bundler::URI::Generic.user=.
#
def set_user(v)
- set_userinfo(v, @password)
+ set_userinfo(v, nil)
v
end
protected :set_user
@@ -574,6 +574,12 @@ def password
@password
end
+ # Returns the authority info (array of user, password, host and
+ # port), if any is set. Or returns +nil+.
+ def authority
+ return @user, @password, @host, @port if @user || @password || @host || @port
+ end
+
# Returns the user component after Bundler::URI decoding.
def decoded_user
Bundler::URI.decode_uri_component(@user) if @user
@@ -615,6 +621,13 @@ def set_host(v)
end
protected :set_host
+ # Protected setter for the authority info (+user+, +password+, +host+
+ # and +port+). If +port+ is +nil+, +default_port+ will be set.
+ #
+ protected def set_authority(user, password, host, port = nil)
+ @user, @password, @host, @port = user, password, host, port || self.default_port
+ end
+
#
# == Args
#
@@ -639,6 +652,7 @@ def set_host(v)
def host=(v)
check_host(v)
set_host(v)
+ set_userinfo(nil)
v
end
@@ -729,6 +743,7 @@ def set_port(v)
def port=(v)
check_port(v)
set_port(v)
+ set_userinfo(nil)
port
end
@@ -1121,7 +1136,7 @@ def merge(oth)
base = self.dup
- authority = rel.userinfo || rel.host || rel.port
+ authority = rel.authority
# RFC2396, Section 5.2, 2)
if (rel.path.nil? || rel.path.empty?) && !authority && !rel.query
@@ -1134,9 +1149,7 @@ def merge(oth)
# RFC2396, Section 5.2, 4)
if authority
- base.set_userinfo(rel.userinfo)
- base.set_host(rel.host)
- base.set_port(rel.port || base.default_port)
+ base.set_authority(*authority)
base.set_path(rel.path)
elsif base.path && rel.path
base.set_path(merge_path(base.path, rel.path))
diff --git a/lib/bundler/vendor/uri/lib/uri/version.rb b/lib/bundler/vendor/uri/lib/uri/version.rb
index d4996a12e2929b..a3ec8b9b0b478b 100644
--- a/lib/bundler/vendor/uri/lib/uri/version.rb
+++ b/lib/bundler/vendor/uri/lib/uri/version.rb
@@ -1,6 +1,6 @@
module Bundler::URI
# :stopdoc:
- VERSION_CODE = '010003'.freeze
+ VERSION_CODE = '010004'.freeze
VERSION = VERSION_CODE.scan(/../).collect{|n| n.to_i}.join('.').freeze
# :startdoc:
end
diff --git a/lib/rubygems/vendor/uri/lib/uri/generic.rb b/lib/rubygems/vendor/uri/lib/uri/generic.rb
index 2eabe2b4e338db..99b33b3d4f1136 100644
--- a/lib/rubygems/vendor/uri/lib/uri/generic.rb
+++ b/lib/rubygems/vendor/uri/lib/uri/generic.rb
@@ -186,18 +186,18 @@ def initialize(scheme,
if arg_check
self.scheme = scheme
- self.userinfo = userinfo
self.hostname = host
self.port = port
+ self.userinfo = userinfo
self.path = path
self.query = query
self.opaque = opaque
self.fragment = fragment
else
self.set_scheme(scheme)
- self.set_userinfo(userinfo)
self.set_host(host)
self.set_port(port)
+ self.set_userinfo(userinfo)
self.set_path(path)
self.query = query
self.set_opaque(opaque)
@@ -511,7 +511,7 @@ def set_userinfo(user, password = nil)
user, password = split_userinfo(user)
end
@user = user
- @password = password if password
+ @password = password
[@user, @password]
end
@@ -522,7 +522,7 @@ def set_userinfo(user, password = nil)
# See also Gem::URI::Generic.user=.
#
def set_user(v)
- set_userinfo(v, @password)
+ set_userinfo(v, nil)
v
end
protected :set_user
@@ -574,6 +574,12 @@ def password
@password
end
+ # Returns the authority info (array of user, password, host and
+ # port), if any is set. Or returns +nil+.
+ def authority
+ return @user, @password, @host, @port if @user || @password || @host || @port
+ end
+
# Returns the user component after Gem::URI decoding.
def decoded_user
Gem::URI.decode_uri_component(@user) if @user
@@ -615,6 +621,13 @@ def set_host(v)
end
protected :set_host
+ # Protected setter for the authority info (+user+, +password+, +host+
+ # and +port+). If +port+ is +nil+, +default_port+ will be set.
+ #
+ protected def set_authority(user, password, host, port = nil)
+ @user, @password, @host, @port = user, password, host, port || self.default_port
+ end
+
#
# == Args
#
@@ -639,6 +652,7 @@ def set_host(v)
def host=(v)
check_host(v)
set_host(v)
+ set_userinfo(nil)
v
end
@@ -729,6 +743,7 @@ def set_port(v)
def port=(v)
check_port(v)
set_port(v)
+ set_userinfo(nil)
port
end
@@ -1121,7 +1136,7 @@ def merge(oth)
base = self.dup
- authority = rel.userinfo || rel.host || rel.port
+ authority = rel.authority
# RFC2396, Section 5.2, 2)
if (rel.path.nil? || rel.path.empty?) && !authority && !rel.query
@@ -1134,9 +1149,7 @@ def merge(oth)
# RFC2396, Section 5.2, 4)
if authority
- base.set_userinfo(rel.userinfo)
- base.set_host(rel.host)
- base.set_port(rel.port || base.default_port)
+ base.set_authority(*authority)
base.set_path(rel.path)
elsif base.path && rel.path
base.set_path(merge_path(base.path, rel.path))
diff --git a/lib/rubygems/vendor/uri/lib/uri/version.rb b/lib/rubygems/vendor/uri/lib/uri/version.rb
index c2f617ce25008e..d3dd421aaa3923 100644
--- a/lib/rubygems/vendor/uri/lib/uri/version.rb
+++ b/lib/rubygems/vendor/uri/lib/uri/version.rb
@@ -1,6 +1,6 @@
module Gem::URI
# :stopdoc:
- VERSION_CODE = '010003'.freeze
+ VERSION_CODE = '010004'.freeze
VERSION = VERSION_CODE.scan(/../).collect{|n| n.to_i}.join('.').freeze
# :startdoc:
end
diff --git a/prism_compile.c b/prism_compile.c
index 86753c90cc17a9..6b4e32f629a6a3 100644
--- a/prism_compile.c
+++ b/prism_compile.c
@@ -201,6 +201,10 @@ parse_integer_value(const pm_integer_t *integer)
result = rb_funcall(result, rb_intern("-@"), 0);
}
+ if (!SPECIAL_CONST_P(result)) {
+ RB_OBJ_SET_SHAREABLE(result); // bignum
+ }
+
return result;
}
@@ -219,7 +223,11 @@ parse_integer(const pm_integer_node_t *node)
static VALUE
parse_float(const pm_float_node_t *node)
{
- return DBL2NUM(node->value);
+ VALUE val = DBL2NUM(node->value);
+ if (!FLONUM_P(val)) {
+ RB_OBJ_SET_SHAREABLE(val);
+ }
+ return val;
}
/**
@@ -233,7 +241,8 @@ parse_rational(const pm_rational_node_t *node)
{
VALUE numerator = parse_integer_value(&node->numerator);
VALUE denominator = parse_integer_value(&node->denominator);
- return rb_rational_new(numerator, denominator);
+
+ return rb_ractor_make_shareable(rb_rational_new(numerator, denominator));
}
/**
@@ -263,7 +272,7 @@ parse_imaginary(const pm_imaginary_node_t *node)
rb_bug("Unexpected numeric type on imaginary number %s\n", pm_node_type_to_str(PM_NODE_TYPE(node->numeric)));
}
- return rb_complex_raw(INT2FIX(0), imaginary_part);
+ return RB_OBJ_SET_SHAREABLE(rb_complex_raw(INT2FIX(0), imaginary_part));
}
static inline VALUE
@@ -315,7 +324,7 @@ parse_static_literal_string(rb_iseq_t *iseq, const pm_scope_node_t *scope_node,
if (ISEQ_COMPILE_DATA(iseq)->option->debug_frozen_string_literal || RTEST(ruby_debug)) {
int line_number = pm_node_line_number(scope_node->parser, node);
- value = rb_str_with_debug_created_info(value, rb_iseq_path(iseq), line_number);
+ value = rb_ractor_make_shareable(rb_str_with_debug_created_info(value, rb_iseq_path(iseq), line_number));
}
return value;
@@ -531,8 +540,7 @@ parse_regexp(rb_iseq_t *iseq, const pm_scope_node_t *scope_node, const pm_node_t
return Qnil;
}
- rb_obj_freeze(regexp);
- return regexp;
+ return RB_OBJ_SET_SHAREABLE(rb_obj_freeze(regexp));
}
static inline VALUE
@@ -542,6 +550,7 @@ parse_regexp_literal(rb_iseq_t *iseq, const pm_scope_node_t *scope_node, const p
if (regexp_encoding == NULL) regexp_encoding = scope_node->encoding;
VALUE string = rb_enc_str_new((const char *) pm_string_source(unescaped), pm_string_length(unescaped), regexp_encoding);
+ RB_OBJ_SET_SHAREABLE(string);
return parse_regexp(iseq, scope_node, node, string);
}
@@ -724,7 +733,9 @@ static VALUE
pm_static_literal_string(rb_iseq_t *iseq, VALUE string, int line_number)
{
if (ISEQ_COMPILE_DATA(iseq)->option->debug_frozen_string_literal || RTEST(ruby_debug)) {
- return rb_str_with_debug_created_info(string, rb_iseq_path(iseq), line_number);
+ VALUE str = rb_str_with_debug_created_info(string, rb_iseq_path(iseq), line_number);
+ RB_OBJ_SET_SHAREABLE(str);
+ return str;
}
else {
return rb_fstring(string);
@@ -753,7 +764,7 @@ pm_static_literal_value(rb_iseq_t *iseq, const pm_node_t *node, const pm_scope_n
rb_ary_push(value, pm_static_literal_value(iseq, elements->nodes[index], scope_node));
}
- OBJ_FREEZE(value);
+ RB_OBJ_SET_FROZEN_SHAREABLE(value);
return value;
}
case PM_FALSE_NODE:
@@ -776,7 +787,7 @@ pm_static_literal_value(rb_iseq_t *iseq, const pm_node_t *node, const pm_scope_n
rb_hash_bulk_insert(RARRAY_LEN(array), RARRAY_CONST_PTR(array), value);
value = rb_obj_hide(value);
- OBJ_FREEZE(value);
+ RB_OBJ_SET_FROZEN_SHAREABLE(value);
return value;
}
case PM_IMAGINARY_NODE:
@@ -1445,7 +1456,7 @@ pm_compile_hash_elements(rb_iseq_t *iseq, const pm_node_t *node, const pm_node_l
VALUE hash = rb_hash_new_with_size(RARRAY_LEN(ary) / 2);
rb_hash_bulk_insert(RARRAY_LEN(ary), RARRAY_CONST_PTR(ary), hash);
hash = rb_obj_hide(hash);
- OBJ_FREEZE(hash);
+ RB_OBJ_SET_FROZEN_SHAREABLE(hash);
// Emit optimized code.
FLUSH_CHUNK;
@@ -2860,8 +2871,10 @@ pm_compile_pattern(rb_iseq_t *iseq, pm_scope_node_t *scope_node, const pm_node_t
PUSH_INSN(ret, location, putnil);
}
else {
+ rb_obj_hide(keys);
+ RB_OBJ_SET_FROZEN_SHAREABLE(keys);
PUSH_INSN1(ret, location, duparray, keys);
- RB_OBJ_WRITTEN(iseq, Qundef, rb_obj_hide(keys));
+ RB_OBJ_WRITTEN(iseq, Qundef, keys);
}
PUSH_SEND(ret, location, rb_intern("deconstruct_keys"), INT2FIX(1));
@@ -2897,6 +2910,7 @@ pm_compile_pattern(rb_iseq_t *iseq, pm_scope_node_t *scope_node, const pm_node_t
{
VALUE operand = rb_str_freeze(rb_sprintf("key not found: %+"PRIsVALUE, symbol));
+ RB_OBJ_SET_SHAREABLE(operand);
PUSH_INSN1(ret, location, putobject, operand);
}
@@ -5545,6 +5559,7 @@ pm_compile_constant_read(rb_iseq_t *iseq, VALUE name, const pm_location_t *name_
if (ISEQ_COMPILE_DATA(iseq)->option->inline_const_cache) {
ISEQ_BODY(iseq)->ic_size++;
VALUE segments = rb_ary_new_from_args(1, name);
+ RB_OBJ_SET_SHAREABLE(segments);
PUSH_INSN1(ret, location, opt_getconstant_path, segments);
}
else {
@@ -5758,6 +5773,9 @@ pm_compile_shareable_constant_value(rb_iseq_t *iseq, const pm_node_t *node, cons
if (shareability & PM_SHAREABLE_CONSTANT_NODE_FLAGS_LITERAL) {
PUSH_INSN1(ret, location, putspecialobject, INT2FIX(VM_SPECIAL_OBJECT_VMCORE));
PUSH_SEQ(ret, value_seq);
+ if (!RB_OBJ_SHAREABLE_P(path)) {
+ RB_OBJ_SET_SHAREABLE(path);
+ }
PUSH_INSN1(ret, location, putobject, path);
PUSH_SEND_WITH_FLAG(ret, location, rb_intern("ensure_shareable"), INT2FIX(2), INT2FIX(VM_CALL_ARGS_SIMPLE));
}
@@ -7108,6 +7126,7 @@ pm_compile_array_node(rb_iseq_t *iseq, const pm_node_t *node, const pm_node_list
if (!popped) {
if (elements->size) {
VALUE value = pm_static_literal_value(iseq, node, scope_node);
+ RB_OBJ_SET_FROZEN_SHAREABLE(value);
PUSH_INSN1(ret, *location, duparray, value);
}
else {
@@ -7238,7 +7257,7 @@ pm_compile_array_node(rb_iseq_t *iseq, const pm_node_t *node, const pm_node_list
rb_ary_push(tmp_array, pm_static_literal_value(iseq, elements->nodes[index++], scope_node));
index--; // about to be incremented by for loop
- OBJ_FREEZE(tmp_array);
+ RB_OBJ_SET_FROZEN_SHAREABLE(tmp_array);
// Emit the optimized code.
FLUSH_CHUNK;
@@ -7465,7 +7484,6 @@ static VALUE
pm_compile_case_node_dispatch(rb_iseq_t *iseq, VALUE dispatch, const pm_node_t *node, LABEL *label, const pm_scope_node_t *scope_node)
{
VALUE key = Qundef;
-
switch (PM_NODE_TYPE(node)) {
case PM_FLOAT_NODE: {
key = pm_static_literal_value(iseq, node, scope_node);
@@ -7498,7 +7516,6 @@ pm_compile_case_node_dispatch(rb_iseq_t *iseq, VALUE dispatch, const pm_node_t *
if (NIL_P(rb_hash_lookup(dispatch, key))) {
rb_hash_aset(dispatch, key, ((VALUE) label) | 1);
}
-
return dispatch;
}
@@ -7726,6 +7743,7 @@ pm_compile_case_node(rb_iseq_t *iseq, const pm_case_node_t *cast, const pm_node_
// optimization.
if (dispatch != Qundef) {
PUSH_INSN(ret, location, dup);
+ RB_OBJ_SET_SHAREABLE(dispatch); // it is special that the hash is shareable but not frozen, because compile.c modify them. This Hahs instance is not accessible so it is safe to leave it.
PUSH_INSN2(ret, location, opt_case_dispatch, dispatch, else_label);
LABEL_REF(else_label);
}
@@ -8953,6 +8971,7 @@ pm_compile_node(rb_iseq_t *iseq, const pm_node_t *node, LINK_ANCHOR *const ret,
if (ISEQ_COMPILE_DATA(iseq)->option->inline_const_cache && ((parts = pm_constant_path_parts(node, scope_node)) != Qnil)) {
ISEQ_BODY(iseq)->ic_size++;
+ RB_OBJ_SET_SHAREABLE(parts);
PUSH_INSN1(ret, location, opt_getconstant_path, parts);
}
else {
@@ -10068,6 +10087,7 @@ pm_compile_node(rb_iseq_t *iseq, const pm_node_t *node, LINK_ANCHOR *const ret,
exclude_end
);
+ RB_OBJ_SET_SHAREABLE(val);
PUSH_INSN1(ret, location, putobject, val);
}
}
diff --git a/ractor.c b/ractor.c
index 8e7f7d6497fb44..68ef0a87ac4cc7 100644
--- a/ractor.c
+++ b/ractor.c
@@ -207,28 +207,34 @@ static void
ractor_mark(void *ptr)
{
rb_ractor_t *r = (rb_ractor_t *)ptr;
+ bool checking_shareable = rb_gc_checking_shareable();
// mark received messages
ractor_sync_mark(r);
rb_gc_mark(r->loc);
rb_gc_mark(r->name);
- rb_gc_mark(r->r_stdin);
- rb_gc_mark(r->r_stdout);
- rb_gc_mark(r->r_stderr);
- rb_gc_mark(r->verbose);
- rb_gc_mark(r->debug);
- rb_hook_list_mark(&r->pub.hooks);
-
- if (r->threads.cnt > 0) {
- rb_thread_t *th = 0;
- ccan_list_for_each(&r->threads.set, th, lt_node) {
- VM_ASSERT(th != NULL);
- rb_gc_mark(th->self);
+
+ if (!checking_shareable) {
+ // may unshareable objects
+ rb_gc_mark(r->r_stdin);
+ rb_gc_mark(r->r_stdout);
+ rb_gc_mark(r->r_stderr);
+ rb_gc_mark(r->verbose);
+ rb_gc_mark(r->debug);
+
+ rb_hook_list_mark(&r->pub.hooks);
+
+ if (r->threads.cnt > 0) {
+ rb_thread_t *th = 0;
+ ccan_list_for_each(&r->threads.set, th, lt_node) {
+ VM_ASSERT(th != NULL);
+ rb_gc_mark(th->self);
+ }
}
- }
- ractor_local_storage_mark(r);
+ ractor_local_storage_mark(r);
+ }
}
static void
@@ -493,8 +499,9 @@ ractor_init(rb_ractor_t *r, VALUE name, VALUE loc)
}
name = rb_str_new_frozen(name);
}
- r->name = name;
+ if (!SPECIAL_CONST_P(loc)) RB_OBJ_SET_SHAREABLE(loc);
r->loc = loc;
+ r->name = name;
}
void
@@ -1113,6 +1120,44 @@ rb_ractor_hooks(rb_ractor_t *cr)
return &cr->pub.hooks;
}
+static void
+rb_obj_set_shareable_no_assert(VALUE obj)
+{
+ FL_SET_RAW(obj, FL_SHAREABLE);
+
+ if (rb_obj_exivar_p(obj)) {
+ VALUE fields = rb_obj_fields_no_ractor_check(obj);
+ if (imemo_type_p(fields, imemo_fields)) {
+ // no recursive mark
+ FL_SET_RAW(fields, FL_SHAREABLE);
+ }
+ }
+}
+
+#ifndef STRICT_VERIFY_SHAREABLE
+#define STRICT_VERIFY_SHAREABLE 0
+#endif
+
+bool
+rb_ractor_verify_shareable(VALUE obj)
+{
+#if STRICT_VERIFY_SHAREABLE
+ rb_gc_verify_shareable(obj);
+#endif
+ return true;
+}
+
+VALUE
+rb_obj_set_shareable(VALUE obj)
+{
+ RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
+
+ rb_obj_set_shareable_no_assert(obj);
+ RUBY_ASSERT(rb_ractor_verify_shareable(obj));
+
+ return obj;
+}
+
/// traverse function
// 2: stop search
@@ -1239,6 +1284,8 @@ obj_traverse_i(VALUE obj, struct obj_traverse_data *data)
case T_ARRAY:
{
+ rb_ary_cancel_sharing(obj);
+
for (int i = 0; i < RARRAY_LENINT(obj); i++) {
VALUE e = rb_ary_entry(obj, i);
if (obj_traverse_i(e, data)) return 1;
@@ -1422,7 +1469,11 @@ make_shareable_check_shareable(VALUE obj)
static enum obj_traverse_iterator_result
mark_shareable(VALUE obj)
{
- FL_SET_RAW(obj, RUBY_FL_SHAREABLE);
+ if (RB_TYPE_P(obj, T_STRING)) {
+ rb_str_make_independent(obj);
+ }
+
+ rb_obj_set_shareable_no_assert(obj);
return traverse_cont;
}
diff --git a/re.c b/re.c
index 13d7f0ef9e5fc7..e4d30d2939596e 100644
--- a/re.c
+++ b/re.c
@@ -3369,10 +3369,13 @@ static void
reg_set_source(VALUE reg, VALUE str, rb_encoding *enc)
{
rb_encoding *regenc = rb_enc_get(reg);
+
if (regenc != enc) {
- str = rb_enc_associate(rb_str_dup(str), enc = regenc);
+ VALUE dup = rb_str_dup(str);
+ str = rb_enc_associate(dup, enc = regenc);
}
- RB_OBJ_WRITE(reg, &RREGEXP(reg)->src, rb_fstring(str));
+ str = rb_fstring(str);
+ RB_OBJ_WRITE(reg, &RREGEXP(reg)->src, str);
}
static int
diff --git a/string.c b/string.c
index 1236057ad177ed..1de87071272938 100644
--- a/string.c
+++ b/string.c
@@ -45,6 +45,7 @@
#include "ruby/re.h"
#include "ruby/thread.h"
#include "ruby/util.h"
+#include "ruby/ractor.h"
#include "ruby_assert.h"
#include "shape.h"
#include "vm_sync.h"
@@ -537,7 +538,10 @@ fstring_concurrent_set_create(VALUE str, void *data)
ENC_CODERANGE_SET(str, coderange);
RBASIC(str)->flags |= RSTRING_FSTR;
-
+ if (!RB_OBJ_SHAREABLE_P(str)) {
+ RB_OBJ_SET_SHAREABLE(str);
+ }
+ RUBY_ASSERT((rb_gc_verify_shareable(str), 1));
RUBY_ASSERT(RB_TYPE_P(str, T_STRING));
RUBY_ASSERT(OBJ_FROZEN(str));
RUBY_ASSERT(!FL_TEST_RAW(str, STR_FAKESTR));
@@ -583,6 +587,8 @@ register_fstring(VALUE str, bool copy, bool force_precompute_hash)
RUBY_ASSERT(!rb_objspace_garbage_object_p(result));
RUBY_ASSERT(RB_TYPE_P(result, T_STRING));
RUBY_ASSERT(OBJ_FROZEN(result));
+ RUBY_ASSERT(RB_OBJ_SHAREABLE_P(result));
+ RUBY_ASSERT((rb_gc_verify_shareable(result), 1));
RUBY_ASSERT(!FL_TEST_RAW(result, STR_FAKESTR));
RUBY_ASSERT(RBASIC_CLASS(result) == rb_cString);
@@ -1555,6 +1561,10 @@ rb_str_tmp_frozen_no_embed_acquire(VALUE orig)
RBASIC(str)->flags |= RBASIC(orig)->flags & STR_NOFREE;
RBASIC(orig)->flags &= ~STR_NOFREE;
STR_SET_SHARED(orig, str);
+ if (RB_OBJ_SHAREABLE_P(orig)) {
+ RB_OBJ_SET_SHAREABLE(str);
+ RUBY_ASSERT((rb_gc_verify_shareable(str), 1));
+ }
}
RSTRING(str)->len = RSTRING(orig)->len;
@@ -1604,6 +1614,7 @@ heap_str_make_shared(VALUE klass, VALUE orig)
{
RUBY_ASSERT(!STR_EMBED_P(orig));
RUBY_ASSERT(!STR_SHARED_P(orig));
+ RUBY_ASSERT(!RB_OBJ_SHAREABLE_P(orig));
VALUE str = str_alloc_heap(klass);
STR_SET_LEN(str, RSTRING_LEN(orig));
@@ -1613,7 +1624,7 @@ heap_str_make_shared(VALUE klass, VALUE orig)
RBASIC(orig)->flags &= ~STR_NOFREE;
STR_SET_SHARED(orig, str);
if (klass == 0)
- FL_UNSET_RAW(str, STR_BORROWED);
+ FL_UNSET_RAW(str, STR_BORROWED);
return str;
}
@@ -1663,7 +1674,12 @@ str_new_frozen_buffer(VALUE klass, VALUE orig, int copy_encoding)
TERM_FILL(RSTRING_END(str), TERM_LEN(orig));
}
else {
- str = heap_str_make_shared(klass, orig);
+ if (RB_OBJ_SHAREABLE_P(orig)) {
+ str = str_new(klass, RSTRING_PTR(orig), RSTRING_LEN(orig));
+ }
+ else {
+ str = heap_str_make_shared(klass, orig);
+ }
}
}
@@ -12676,7 +12692,9 @@ rb_enc_literal_str(const char *ptr, long len, rb_encoding *enc)
}
struct RString fake_str = {RBASIC_INIT};
- return register_fstring(rb_setup_fake_str(&fake_str, ptr, len, enc), true, true);
+ VALUE str = register_fstring(rb_setup_fake_str(&fake_str, ptr, len, enc), true, true);
+ RUBY_ASSERT(RB_OBJ_SHAREABLE_P(str) && (rb_gc_verify_shareable(str), 1));
+ return str;
}
VALUE
diff --git a/symbol.c b/symbol.c
index a498f742aa1600..11602ee33b7c5d 100644
--- a/symbol.c
+++ b/symbol.c
@@ -19,6 +19,7 @@
#include "internal/vm.h"
#include "probes.h"
#include "ruby/encoding.h"
+#include "ruby/ractor.h"
#include "ruby/st.h"
#include "symbol.h"
#include "vm_sync.h"
@@ -200,7 +201,6 @@ dup_string_for_create(VALUE str)
OBJ_FREEZE(str);
str = rb_fstring(str);
-
return str;
}
@@ -255,8 +255,8 @@ sym_set_create(VALUE sym, void *data)
rb_encoding *enc = rb_enc_get(str);
rb_enc_set_index((VALUE)obj, rb_enc_to_index(enc));
- OBJ_FREEZE((VALUE)obj);
RB_OBJ_WRITE((VALUE)obj, &obj->fstr, str);
+ RB_OBJ_SET_FROZEN_SHAREABLE((VALUE)obj);
int id = rb_str_symname_type(str, IDSET_ATTRSET_FOR_INTERN);
if (id < 0) id = ID_INTERNAL;
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index 44f010d0561002..13c78170177a20 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -1664,6 +1664,28 @@ def test() = @foo = 1
}
end
+ def test_getclassvariable
+ assert_compiles '42', %q{
+ class Foo
+ def self.test = @@x
+ end
+
+ Foo.class_variable_set(:@@x, 42)
+ Foo.test()
+ }
+ end
+
+ def test_setclassvariable
+ assert_compiles '42', %q{
+ class Foo
+ def self.test = @@x = 42
+ end
+
+ Foo.test()
+ Foo.class_variable_get(:@@x)
+ }
+ end
+
def test_attr_reader
assert_compiles '[4, 4]', %q{
class C
diff --git a/test/rubygems/test_gem_request.rb b/test/rubygems/test_gem_request.rb
index 244f9d90feec46..cd0a416e79c02d 100644
--- a/test/rubygems/test_gem_request.rb
+++ b/test/rubygems/test_gem_request.rb
@@ -248,7 +248,7 @@ def test_fetch_basic_oauth_encoded
auth_header = conn.payload["Authorization"]
assert_equal "Basic #{base64_encode64("{DEScede}pass:x-oauth-basic")}".strip, auth_header
- assert_includes @ui.output, "GET https://REDACTED:x-oauth-basic@example.rubygems/specs.#{Gem.marshal_version}"
+ assert_includes @ui.output, "GET https://REDACTED@example.rubygems/specs.#{Gem.marshal_version}"
end
def test_fetch_head
diff --git a/test/rubygems/test_gem_uri.rb b/test/rubygems/test_gem_uri.rb
index 1253ebc6de4a45..ce633c99b63b0a 100644
--- a/test/rubygems/test_gem_uri.rb
+++ b/test/rubygems/test_gem_uri.rb
@@ -21,7 +21,7 @@ def test_redacted_with_token
end
def test_redacted_with_user_x_oauth_basic
- assert_equal "https://REDACTED:x-oauth-basic@example.com", Gem::Uri.new("https://token:x-oauth-basic@example.com").redacted.to_s
+ assert_equal "https://REDACTED@example.com", Gem::Uri.new("https://token:x-oauth-basic@example.com").redacted.to_s
end
def test_redacted_without_credential
diff --git a/tool/bundler/vendor_gems.rb b/tool/bundler/vendor_gems.rb
index b3e06d3f096047..72546faf31632e 100644
--- a/tool/bundler/vendor_gems.rb
+++ b/tool/bundler/vendor_gems.rb
@@ -14,4 +14,4 @@
gem "timeout", "0.4.3"
gem "thor", "1.4.0"
gem "tsort", "0.2.0"
-gem "uri", "1.0.3"
+gem "uri", "1.0.4"
diff --git a/tool/bundler/vendor_gems.rb.lock b/tool/bundler/vendor_gems.rb.lock
index d911764a472649..b1a2351e949e4d 100644
--- a/tool/bundler/vendor_gems.rb.lock
+++ b/tool/bundler/vendor_gems.rb.lock
@@ -40,7 +40,7 @@ GEM
thor (1.4.0)
timeout (0.4.3)
tsort (0.2.0)
- uri (1.0.3)
+ uri (1.0.4)
PLATFORMS
java
@@ -64,7 +64,7 @@ DEPENDENCIES
thor (= 1.4.0)
timeout (= 0.4.3)
tsort (= 0.2.0)
- uri (= 1.0.3)
+ uri (= 1.0.4)
CHECKSUMS
connection_pool (2.4.1) sha256=0f40cf997091f1f04ff66da67eabd61a9fe0d4928b9a3645228532512fab62f4
@@ -80,7 +80,7 @@ CHECKSUMS
thor (1.4.0) sha256=8763e822ccb0f1d7bee88cde131b19a65606657b847cc7b7b4b82e772bcd8a3d
timeout (0.4.3) sha256=9509f079b2b55fe4236d79633bd75e34c1c1e7e3fb4b56cb5fda61f80a0fe30e
tsort (0.2.0) sha256=9650a793f6859a43b6641671278f79cfead60ac714148aabe4e3f0060480089f
- uri (1.0.3) sha256=e9f2244608eea2f7bc357d954c65c910ce0399ca5e18a7a29207ac22d8767011
+ uri (1.0.4) sha256=34485d137c079f8753a0ca1d883841a7ba2e5fae556e3c30c2aab0dde616344b
BUNDLED WITH
4.0.0.dev
diff --git a/variable.c b/variable.c
index 5fc98fb0879024..bab423e95029f7 100644
--- a/variable.c
+++ b/variable.c
@@ -321,6 +321,7 @@ rb_mod_set_temporary_name(VALUE mod, VALUE name)
}
name = rb_str_new_frozen(name);
+ RB_OBJ_SET_SHAREABLE(name);
// Set the temporary classpath to the given name:
RB_VM_LOCKING() {
@@ -432,6 +433,7 @@ rb_set_class_path_string(VALUE klass, VALUE under, VALUE name)
str = build_const_pathname(str, name);
}
+ RB_OBJ_SET_SHAREABLE(str);
RCLASS_SET_CLASSPATH(klass, str, permanent);
}
@@ -1552,7 +1554,7 @@ obj_transition_too_complex(VALUE obj, st_table *table)
break;
default:
{
- VALUE fields_obj = rb_imemo_fields_new_complex_tbl(obj, table);
+ VALUE fields_obj = rb_imemo_fields_new_complex_tbl(obj, table, RB_OBJ_SHAREABLE_P(obj));
RBASIC_SET_SHAPE_ID(fields_obj, shape_id);
rb_obj_replace_fields(obj, fields_obj);
}
@@ -1731,7 +1733,7 @@ static VALUE
imemo_fields_complex_from_obj(VALUE owner, VALUE source_fields_obj, shape_id_t shape_id)
{
attr_index_t len = source_fields_obj ? RSHAPE_LEN(RBASIC_SHAPE_ID(source_fields_obj)) : 0;
- VALUE fields_obj = rb_imemo_fields_new_complex(owner, len + 1);
+ VALUE fields_obj = rb_imemo_fields_new_complex(owner, len + 1, RB_OBJ_SHAREABLE_P(owner));
rb_field_foreach(source_fields_obj, imemo_fields_complex_from_obj_i, (st_data_t)fields_obj, false);
RBASIC_SET_SHAPE_ID(fields_obj, shape_id);
@@ -1742,7 +1744,7 @@ imemo_fields_complex_from_obj(VALUE owner, VALUE source_fields_obj, shape_id_t s
static VALUE
imemo_fields_copy_capa(VALUE owner, VALUE source_fields_obj, attr_index_t new_size)
{
- VALUE fields_obj = rb_imemo_fields_new(owner, new_size);
+ VALUE fields_obj = rb_imemo_fields_new(owner, new_size, RB_OBJ_SHAREABLE_P(owner));
if (source_fields_obj) {
attr_index_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(source_fields_obj));
VALUE *fields = rb_imemo_fields_ptr(fields_obj);
@@ -2227,7 +2229,7 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
return;
}
- new_fields_obj = rb_imemo_fields_new(dest, RSHAPE_CAPACITY(dest_shape_id));
+ new_fields_obj = rb_imemo_fields_new(dest, RSHAPE_CAPACITY(dest_shape_id), RB_OBJ_SHAREABLE_P(dest));
VALUE *src_buf = rb_imemo_fields_ptr(fields_obj);
VALUE *dest_buf = rb_imemo_fields_ptr(new_fields_obj);
rb_shape_copy_fields(new_fields_obj, dest_buf, dest_shape_id, src_buf, src_shape_id);
@@ -3797,6 +3799,7 @@ static void
set_namespace_path(VALUE named_namespace, VALUE namespace_path)
{
struct rb_id_table *const_table = RCLASS_CONST_TBL(named_namespace);
+ RB_OBJ_SET_SHAREABLE(namespace_path);
RB_VM_LOCKING() {
RCLASS_WRITE_CLASSPATH(named_namespace, namespace_path, true);
@@ -3875,7 +3878,8 @@ const_set(VALUE klass, ID id, VALUE val)
set_namespace_path(val, build_const_path(parental_path, id));
}
else if (!parental_path_permanent && NIL_P(val_path)) {
- RCLASS_SET_CLASSPATH(val, build_const_path(parental_path, id), false);
+ VALUE path = build_const_path(parental_path, id);
+ RCLASS_SET_CLASSPATH(val, path, false);
}
}
}
@@ -4488,7 +4492,7 @@ static attr_index_t
class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool concurrent, VALUE *new_fields_obj, bool *new_ivar_out)
{
const VALUE original_fields_obj = fields_obj;
- fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_fields_new(klass, 1);
+ fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_fields_new(klass, 1, true);
shape_id_t current_shape_id = RBASIC_SHAPE_ID(fields_obj);
shape_id_t next_shape_id = current_shape_id; // for too_complex
diff --git a/vm.c b/vm.c
index 3281fad0181916..c11ac9f7a10510 100644
--- a/vm.c
+++ b/vm.c
@@ -311,7 +311,7 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_
VM_ASSERT(singleton || klass);
- rb_cref_t *cref = IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
+ rb_cref_t *cref = SHAREABLE_IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
cref->klass_or_self = klass;
cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
*((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi;
@@ -1311,7 +1311,7 @@ rb_proc_dup(VALUE self)
break;
}
- if (RB_OBJ_SHAREABLE_P(self)) FL_SET_RAW(procval, RUBY_FL_SHAREABLE);
+ if (RB_OBJ_SHAREABLE_P(self)) RB_OBJ_SET_SHAREABLE(procval);
RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
return procval;
}
@@ -1375,7 +1375,19 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
// Copy after allocations above, since they can move objects in src_ep.
- RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], src_ep[VM_ENV_DATA_INDEX_ME_CREF]);
+ VALUE svar_val = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
+ if (imemo_type_p(svar_val, imemo_svar)) {
+ const struct vm_svar *svar = (struct vm_svar *)svar_val;
+
+ if (svar->cref_or_me) {
+ svar_val = svar->cref_or_me;
+ }
+ else {
+ svar_val = Qfalse;
+ }
+ }
+ RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], svar_val);
+
ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
if (!VM_ENV_LOCAL_P(src_ep)) {
VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
@@ -1427,6 +1439,7 @@ env_copy(const VALUE *src_ep, VALUE read_only_variables)
ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
}
+ RB_OBJ_SET_SHAREABLE((VALUE)copied_env);
return copied_env;
}
@@ -1493,9 +1506,10 @@ rb_proc_isolate_bang(VALUE self, VALUE replace_self)
proc_isolate_env(self, proc, Qfalse);
proc->is_isolated = TRUE;
+ RB_OBJ_WRITE(self, &proc->block.as.captured.self, Qnil);
}
- FL_SET_RAW(self, RUBY_FL_SHAREABLE);
+ RB_OBJ_SET_SHAREABLE(self);
return self;
}
@@ -1537,10 +1551,16 @@ rb_proc_ractor_make_shareable(VALUE self, VALUE replace_self)
proc_isolate_env(self, proc, read_only_variables);
proc->is_isolated = TRUE;
}
+ else {
+ VALUE proc_self = vm_block_self(vm_proc_block(self));
+ if (!rb_ractor_shareable_p(proc_self)) {
+ rb_raise(rb_eRactorIsolationError,
+ "Proc's self is not shareable: %" PRIsVALUE,
+ self);
+ }
+ }
- rb_obj_freeze(self);
- FL_SET_RAW(self, RUBY_FL_SHAREABLE);
-
+ RB_OBJ_SET_FROZEN_SHAREABLE(self);
return self;
}
diff --git a/vm_callinfo.h b/vm_callinfo.h
index 80b5b9a0966c1e..6701b17d761cda 100644
--- a/vm_callinfo.h
+++ b/vm_callinfo.h
@@ -340,7 +340,7 @@ vm_cc_new(VALUE klass,
enum vm_cc_type type)
{
cc_check_class(klass);
- struct rb_callcache *cc = IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
+ struct rb_callcache *cc = SHAREABLE_IMEMO_NEW(struct rb_callcache, imemo_callcache, klass);
*((struct rb_callable_method_entry_struct **)&cc->cme_) = (struct rb_callable_method_entry_struct *)cme;
*((vm_call_handler *)&cc->call_) = call;
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index b99ffdc4fdbbe6..cbed6143297b81 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -6568,12 +6568,15 @@ vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep, const
return;
}
- struct iseq_inline_constant_cache_entry *ice = IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
+ struct iseq_inline_constant_cache_entry *ice = SHAREABLE_IMEMO_NEW(struct iseq_inline_constant_cache_entry, imemo_constcache, 0);
RB_OBJ_WRITE(ice, &ice->value, val);
ice->ic_cref = vm_get_const_key_cref(reg_ep);
- if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
- RB_OBJ_WRITE(iseq, &ic->entry, ice);
+ if (rb_ractor_shareable_p(val)) {
+ RUBY_ASSERT((rb_gc_verify_shareable(val), 1));
+ ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
+ }
+ RB_OBJ_WRITE(iseq, &ic->entry, ice);
RUBY_ASSERT(pc >= ISEQ_BODY(iseq)->iseq_encoded);
unsigned pos = (unsigned)(pc - ISEQ_BODY(iseq)->iseq_encoded);
rb_yjit_constant_ic_update(iseq, ic, pos);
@@ -6585,6 +6588,7 @@ rb_vm_opt_getconstant_path(rb_execution_context_t *ec, rb_control_frame_t *const
VALUE val;
const ID *segments = ic->segments;
struct iseq_inline_constant_cache_entry *ice = ic->entry;
+
if (ice && vm_ic_hit_p(ice, GET_EP())) {
val = ice->value;
@@ -6615,7 +6619,14 @@ vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
VALUE val;
is->once.running_thread = th;
val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
+ // TODO: confirm that it is shareable
+
+ if (RB_FL_ABLE(val)) {
+ RB_OBJ_SET_SHAREABLE(val);
+ }
+
RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
+
/* is->once.running_thread is cleared by vm_once_clear() */
is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
return val;
diff --git a/vm_method.c b/vm_method.c
index 60c273ff2f34fb..2cd41bd3774046 100644
--- a/vm_method.c
+++ b/vm_method.c
@@ -682,7 +682,7 @@ rb_vm_ci_lookup(ID mid, unsigned int flag, unsigned int argc, const struct rb_ca
((struct rb_callinfo_kwarg *)kwarg)->references++;
}
- struct rb_callinfo *new_ci = IMEMO_NEW(struct rb_callinfo, imemo_callinfo, (VALUE)kwarg);
+ struct rb_callinfo *new_ci = SHAREABLE_IMEMO_NEW(struct rb_callinfo, imemo_callinfo, (VALUE)kwarg);
new_ci->mid = mid;
new_ci->flag = flag;
new_ci->argc = argc;
@@ -1008,7 +1008,9 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de
if (cfp && (line = rb_vm_get_sourceline(cfp))) {
VALUE location = rb_ary_new3(2, rb_iseq_path(cfp->iseq), INT2FIX(line));
- RB_OBJ_WRITE(me, &def->body.attr.location, rb_ary_freeze(location));
+ rb_ary_freeze(location);
+ RB_OBJ_SET_SHAREABLE(location);
+ RB_OBJ_WRITE(me, &def->body.attr.location, location);
}
else {
VM_ASSERT(def->body.attr.location == 0);
@@ -1099,7 +1101,7 @@ rb_method_entry_alloc(ID called_id, VALUE owner, VALUE defined_class, rb_method_
// not negative cache
VM_ASSERT_TYPE2(defined_class, T_CLASS, T_ICLASS);
}
- rb_method_entry_t *me = IMEMO_NEW(rb_method_entry_t, imemo_ment, defined_class);
+ rb_method_entry_t *me = SHAREABLE_IMEMO_NEW(rb_method_entry_t, imemo_ment, defined_class);
*((rb_method_definition_t **)&me->def) = def;
me->called_id = called_id;
me->owner = owner;
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index c00bdb474ecbeb..029e144303f999 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -426,6 +426,8 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
&Insn::GetLocal { ep_offset, level, use_sp, .. } => gen_getlocal(asm, ep_offset, level, use_sp),
&Insn::SetLocal { val, ep_offset, level } => no_output!(gen_setlocal(asm, opnd!(val), function.type_of(val), ep_offset, level)),
Insn::GetConstantPath { ic, state } => gen_get_constant_path(jit, asm, *ic, &function.frame_state(*state)),
+ Insn::GetClassVar { id, ic, state } => gen_getclassvar(jit, asm, *id, *ic, &function.frame_state(*state)),
+ Insn::SetClassVar { id, val, ic, state } => no_output!(gen_setclassvar(jit, asm, *id, opnd!(val), *ic, &function.frame_state(*state))),
Insn::SetIvar { self_val, id, val, state } => no_output!(gen_setivar(jit, asm, opnd!(self_val), *id, opnd!(val), &function.frame_state(*state))),
Insn::SideExit { state, reason } => no_output!(gen_side_exit(jit, asm, reason, &function.frame_state(*state))),
Insn::PutSpecialObject { value_type } => gen_putspecialobject(asm, *value_type),
@@ -832,6 +834,20 @@ fn gen_setivar(jit: &mut JITState, asm: &mut Assembler, recv: Opnd, id: ID, val:
asm_ccall!(asm, rb_ivar_set, recv, id.0.into(), val);
}
+fn gen_getclassvar(jit: &mut JITState, asm: &mut Assembler, id: ID, ic: *const iseq_inline_cvar_cache_entry, state: &FrameState) -> Opnd {
+ gen_prepare_non_leaf_call(jit, asm, state);
+
+ let iseq = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_ISEQ));
+ asm_ccall!(asm, rb_vm_getclassvariable, iseq, CFP, id.0.into(), Opnd::const_ptr(ic))
+}
+
+fn gen_setclassvar(jit: &mut JITState, asm: &mut Assembler, id: ID, val: Opnd, ic: *const iseq_inline_cvar_cache_entry, state: &FrameState) {
+ gen_prepare_non_leaf_call(jit, asm, state);
+
+ let iseq = asm.load(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_ISEQ));
+ asm_ccall!(asm, rb_vm_setclassvariable, iseq, CFP, id.0.into(), val, Opnd::const_ptr(ic));
+}
+
/// Look up global variables
fn gen_getglobal(jit: &mut JITState, asm: &mut Assembler, id: ID, state: &FrameState) -> Opnd {
// `Warning` module's method `warn` can be called when reading certain global variables
diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs
index 834a33d23c33d2..9f422c0146cce9 100644
--- a/zjit/src/hir.rs
+++ b/zjit/src/hir.rs
@@ -648,6 +648,11 @@ pub enum Insn {
GetSpecialSymbol { symbol_type: SpecialBackrefSymbol, state: InsnId },
GetSpecialNumber { nth: u64, state: InsnId },
+ /// Get a class variable `id`
+ GetClassVar { id: ID, ic: *const iseq_inline_cvar_cache_entry, state: InsnId },
+ /// Set a class variable `id` to `val`
+ SetClassVar { id: ID, val: InsnId, ic: *const iseq_inline_cvar_cache_entry, state: InsnId },
+
/// Own a FrameState so that instructions can look up their dominating FrameState when
/// generating deopt side-exits and frame reconstruction metadata. Does not directly generate
/// any code.
@@ -811,7 +816,7 @@ impl Insn {
match self {
Insn::Jump(_)
| Insn::IfTrue { .. } | Insn::IfFalse { .. } | Insn::EntryPoint { .. } | Insn::Return { .. }
- | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::ArrayExtend { .. }
+ | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::SetClassVar { .. } | Insn::ArrayExtend { .. }
| Insn::ArrayPush { .. } | Insn::SideExit { .. } | Insn::SetGlobal { .. }
| Insn::SetLocal { .. } | Insn::Throw { .. } | Insn::IncrCounter(_) | Insn::IncrCounterPtr { .. }
| Insn::CheckInterrupts { .. } | Insn::GuardBlockParamProxy { .. } => false,
@@ -1130,6 +1135,8 @@ impl<'a> std::fmt::Display for InsnPrinter<'a> {
Insn::SetLocal { val, level, ep_offset } => write!(f, "SetLocal l{level}, EP@{ep_offset}, {val}"),
Insn::GetSpecialSymbol { symbol_type, .. } => write!(f, "GetSpecialSymbol {symbol_type:?}"),
Insn::GetSpecialNumber { nth, .. } => write!(f, "GetSpecialNumber {nth}"),
+ Insn::GetClassVar { id, .. } => write!(f, "GetClassVar :{}", id.contents_lossy()),
+ Insn::SetClassVar { id, val, .. } => write!(f, "SetClassVar :{}, {val}", id.contents_lossy()),
Insn::ToArray { val, .. } => write!(f, "ToArray {val}"),
Insn::ToNewArray { val, .. } => write!(f, "ToNewArray {val}"),
Insn::ArrayExtend { left, right, .. } => write!(f, "ArrayExtend {left}, {right}"),
@@ -1716,6 +1723,8 @@ impl Function {
&LoadIvarEmbedded { self_val, id, index } => LoadIvarEmbedded { self_val: find!(self_val), id, index },
&LoadIvarExtended { self_val, id, index } => LoadIvarExtended { self_val: find!(self_val), id, index },
&SetIvar { self_val, id, val, state } => SetIvar { self_val: find!(self_val), id, val: find!(val), state },
+ &GetClassVar { id, ic, state } => GetClassVar { id, ic, state },
+ &SetClassVar { id, val, ic, state } => SetClassVar { id, val: find!(val), ic, state },
&SetLocal { val, ep_offset, level } => SetLocal { val: find!(val), ep_offset, level },
&GetSpecialSymbol { symbol_type, state } => GetSpecialSymbol { symbol_type, state },
&GetSpecialNumber { nth, state } => GetSpecialNumber { nth, state },
@@ -1765,7 +1774,7 @@ impl Function {
Insn::Param { .. } => unimplemented!("params should not be present in block.insns"),
Insn::SetGlobal { .. } | Insn::Jump(_) | Insn::EntryPoint { .. }
| Insn::IfTrue { .. } | Insn::IfFalse { .. } | Insn::Return { .. } | Insn::Throw { .. }
- | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::ArrayExtend { .. }
+ | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::SetClassVar { .. } | Insn::ArrayExtend { .. }
| Insn::ArrayPush { .. } | Insn::SideExit { .. } | Insn::SetLocal { .. } | Insn::IncrCounter(_)
| Insn::CheckInterrupts { .. } | Insn::GuardBlockParamProxy { .. } | Insn::IncrCounterPtr { .. } =>
panic!("Cannot infer type of instruction with no output: {}", self.insns[insn.0]),
@@ -1848,6 +1857,7 @@ impl Function {
Insn::LoadIvarExtended { .. } => types::BasicObject,
Insn::GetSpecialSymbol { .. } => types::BasicObject,
Insn::GetSpecialNumber { .. } => types::BasicObject,
+ Insn::GetClassVar { .. } => types::BasicObject,
Insn::ToNewArray { .. } => types::ArrayExact,
Insn::ToArray { .. } => types::ArrayExact,
Insn::ObjToString { .. } => types::BasicObject,
@@ -3156,6 +3166,13 @@ impl Function {
worklist.push_back(val);
worklist.push_back(state);
}
+ &Insn::GetClassVar { state, .. } => {
+ worklist.push_back(state);
+ }
+ &Insn::SetClassVar { val, state, .. } => {
+ worklist.push_back(val);
+ worklist.push_back(state);
+ }
&Insn::ArrayPush { array, val, state } => {
worklist.push_back(array);
worklist.push_back(val);
@@ -4639,6 +4656,20 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result {
let val = state.stack_pop()?;
fun.push_insn(block, Insn::SetIvar { self_val: self_param, id, val, state: exit_id });
}
+ YARVINSN_getclassvariable => {
+ let id = ID(get_arg(pc, 0).as_u64());
+ let ic = get_arg(pc, 1).as_ptr();
+ let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state });
+ let result = fun.push_insn(block, Insn::GetClassVar { id, ic, state: exit_id });
+ state.stack_push(result);
+ }
+ YARVINSN_setclassvariable => {
+ let id = ID(get_arg(pc, 0).as_u64());
+ let ic = get_arg(pc, 1).as_ptr();
+ let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state });
+ let val = state.stack_pop()?;
+ fun.push_insn(block, Insn::SetClassVar { id, val, ic, state: exit_id });
+ }
YARVINSN_opt_reverse => {
// Reverse the order of the top N stack items.
let n = get_arg(pc, 0).as_usize();
@@ -7429,6 +7460,59 @@ mod tests {
assert_eq!(VALUE::fixnum_from_usize(1), result);
}
+ #[test]
+ fn test_getclassvariable() {
+ eval("
+ class Foo
+ def self.test = @@foo
+ end
+ ");
+ let iseq = crate::cruby::with_rubyvm(|| get_method_iseq("Foo", "test"));
+ assert!(iseq_contains_opcode(iseq, YARVINSN_getclassvariable), "iseq Foo.test does not contain getclassvariable");
+ let function = iseq_to_hir(iseq).unwrap();
+ assert_snapshot!(hir_string_function(&function), @r"
+ fn test@:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ Jump bb2(v1)
+ bb1(v4:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v4)
+ bb2(v6:BasicObject):
+ v11:BasicObject = GetClassVar :@@foo
+ CheckInterrupts
+ Return v11
+ ");
+ }
+
+ #[test]
+ fn test_setclassvariable() {
+ eval("
+ class Foo
+ def self.test = @@foo = 42
+ end
+ ");
+ let iseq = crate::cruby::with_rubyvm(|| get_method_iseq("Foo", "test"));
+ assert!(iseq_contains_opcode(iseq, YARVINSN_setclassvariable), "iseq Foo.test does not contain setclassvariable");
+ let function = iseq_to_hir(iseq).unwrap();
+ assert_snapshot!(hir_string_function(&function), @r"
+ fn test@:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ Jump bb2(v1)
+ bb1(v4:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v4)
+ bb2(v6:BasicObject):
+ v10:Fixnum[42] = Const Value(42)
+ SetClassVar :@@foo, v10
+ CheckInterrupts
+ Return v10
+ ");
+ }
+
#[test]
fn test_setglobal() {
eval("