diff --git a/class.c b/class.c
index 77f2fba51647bc..74dcbe5fa7b99a 100644
--- a/class.c
+++ b/class.c
@@ -79,6 +79,95 @@
#define METACLASS_OF(k) RBASIC(k)->klass
#define SET_METACLASS_OF(k, cls) RBASIC_SET_CLASS(k, cls)
+static enum rb_id_table_iterator_result
+cvar_table_free_i(VALUE value, void *ctx)
+{
+ xfree((void *)value);
+ return ID_TABLE_CONTINUE;
+}
+
+void
+rb_class_classext_free(VALUE klass, rb_classext_t *ext, bool is_prime)
+{
+ struct rb_id_table *tbl;
+
+ rb_id_table_free(RCLASSEXT_M_TBL(ext));
+
+ if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) {
+ rb_free_const_table(tbl);
+ }
+
+ if ((tbl = RCLASSEXT_CVC_TBL(ext)) != NULL) {
+ rb_id_table_foreach_values(tbl, cvar_table_free_i, NULL);
+ rb_id_table_free(tbl);
+ }
+
+ rb_class_classext_free_subclasses(ext, klass);
+
+ if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
+ RUBY_ASSERT(is_prime); // superclasses should only be used on prime
+ xfree(RCLASSEXT_SUPERCLASSES(ext));
+ }
+
+ if (!is_prime) { // the prime classext will be freed with RClass
+ xfree(ext);
+ }
+}
+
+void
+rb_iclass_classext_free(VALUE klass, rb_classext_t *ext, bool is_prime)
+{
+ if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
+ /* Method table is not shared for origin iclasses of classes */
+ rb_id_table_free(RCLASSEXT_M_TBL(ext));
+ }
+
+ if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) {
+ rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext));
+ }
+
+ rb_class_classext_free_subclasses(ext, klass);
+
+ if (!is_prime) { // the prime classext will be freed with RClass
+ xfree(ext);
+ }
+}
+
+struct rb_class_set_namespace_classext_args {
+ VALUE obj;
+ rb_classext_t *ext;
+};
+
+static int
+rb_class_set_namespace_classext_update(st_data_t *key_ptr, st_data_t *val_ptr, st_data_t a, int existing)
+{
+ struct rb_class_set_namespace_classext_args *args = (struct rb_class_set_namespace_classext_args *)a;
+
+ if (existing) {
+ if (BUILTIN_TYPE(args->obj) == T_ICLASS) {
+ rb_iclass_classext_free(args->obj, (rb_classext_t *)*val_ptr, false);
+ }
+ else {
+ rb_class_classext_free(args->obj, (rb_classext_t *)*val_ptr, false);
+ }
+ }
+
+ *val_ptr = (st_data_t)args->ext;
+
+ return ST_CONTINUE;
+}
+
+void
+rb_class_set_namespace_classext(VALUE obj, const rb_namespace_t *ns, rb_classext_t *ext)
+{
+ struct rb_class_set_namespace_classext_args args = {
+ .obj = obj,
+ .ext = ext,
+ };
+
+ st_update(RCLASS_CLASSEXT_TBL(obj), (st_data_t)ns->ns_object, rb_class_set_namespace_classext_update, (st_data_t)&args);
+}
+
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
struct duplicate_id_tbl_data {
diff --git a/depend b/depend
index 1f9f0c31eba695..fa61de77a00e62 100644
--- a/depend
+++ b/depend
@@ -9302,6 +9302,7 @@ namespace.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h
namespace.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h
namespace.$(OBJEXT): {$(VPATH)}config.h
namespace.$(OBJEXT): {$(VPATH)}constant.h
+namespace.$(OBJEXT): {$(VPATH)}darray.h
namespace.$(OBJEXT): {$(VPATH)}debug_counter.h
namespace.$(OBJEXT): {$(VPATH)}defines.h
namespace.$(OBJEXT): {$(VPATH)}encoding.h
diff --git a/doc/string/scan.rdoc b/doc/string/scan.rdoc
new file mode 100644
index 00000000000000..cbede5280f5c49
--- /dev/null
+++ b/doc/string/scan.rdoc
@@ -0,0 +1,36 @@
+Matches a pattern against +self+:
+
+- If +pattern+ is a Regexp, the pattern used is +pattern+ itself.
+- If +pattern+ is a string, the pattern used is Regexp.quote(pattern).
+
+Generates a collection of matching results
+and updates {regexp-related global variables}[rdoc-ref:Regexp@Global+Variables]:
+
+- If the pattern contains no groups, each result is a matched substring.
+- If the pattern contains groups, each result is an array
+ containing a matched substring for each group.
+
+With no block given, returns an array of the results:
+
+ 'cruel world'.scan(/\w+/) # => ["cruel", "world"]
+ 'cruel world'.scan(/.../) # => ["cru", "el ", "wor"]
+ 'cruel world'.scan(/(...)/) # => [["cru"], ["el "], ["wor"]]
+ 'cruel world'.scan(/(..)(..)/) # => [["cr", "ue"], ["l ", "wo"]]
+ 'тест'.scan(/../) # => ["те", "ст"]
+ 'こんにちは'.scan(/../) # => ["こん", "にち"]
+ 'abracadabra'.scan('ab') # => ["ab", "ab"]
+ 'abracadabra'.scan('nosuch') # => []
+
+With a block given, calls the block with each result; returns +self+:
+
+ 'cruel world'.scan(/\w+/) {|w| p w }
+ # => "cruel"
+ # => "world"
+ 'cruel world'.scan(/(.)(.)/) {|x, y| p [x, y] }
+ # => ["c", "r"]
+ # => ["u", "e"]
+ # => ["l", " "]
+ # => ["w", "o"]
+ # => ["r", "l"]
+
+Related: see {Converting to Non-String}[rdoc-ref:String@Converting+to+Non--5CString].
diff --git a/doc/string/scrub.rdoc b/doc/string/scrub.rdoc
index 1a5b1c79d07e22..5ace376cdbec85 100644
--- a/doc/string/scrub.rdoc
+++ b/doc/string/scrub.rdoc
@@ -1,25 +1,22 @@
Returns a copy of +self+ with each invalid byte sequence replaced
by the given +replacement_string+.
-With no block given and no argument, replaces each invalid sequence
-with the default replacement string
-("�" for a Unicode encoding, '?' otherwise):
+With no block given, replaces each invalid sequence
+with the given +default_replacement_string+
+(by default, "�" for a Unicode encoding, '?' otherwise):
- s = "foo\x81\x81bar"
- s.scrub # => "foo��bar"
+ "foo\x81\x81bar"scrub # => "foo��bar"
+ "foo\x81\x81bar".force_encoding('US-ASCII').scrub # => "foo??bar"
+ "foo\x81\x81bar".scrub('xyzzy') # => "fooxyzzyxyzzybar"
-With no block given and argument +replacement_string+ given,
-replaces each invalid sequence with that string:
+With a block given, calls the block with each invalid sequence,
+and replaces that sequence with the return value of the block:
- "foo\x81\x81bar".scrub('xyzzy') # => "fooxyzzyxyzzybar"
+ "foo\x81\x81bar".scrub {|sequence| p sequence; 'XYZZY' } # => "fooXYZZYXYZZYbar"
-With a block given, replaces each invalid sequence with the value
-of the block:
-
- "foo\x81\x81bar".scrub {|bytes| p bytes; 'XYZZY' }
- # => "fooXYZZYXYZZYbar"
-
-Output:
+Output :
"\x81"
"\x81"
+
+Related: see {Converting to New String}[rdoc-ref:String@Converting+to+New+String].
diff --git a/gc.c b/gc.c
index 42625e10046b59..897447c808e3d1 100644
--- a/gc.c
+++ b/gc.c
@@ -1160,13 +1160,6 @@ rb_objspace_data_type_name(VALUE obj)
}
}
-static enum rb_id_table_iterator_result
-cvar_table_free_i(VALUE value, void *ctx)
-{
- xfree((void *)value);
- return ID_TABLE_CONTINUE;
-}
-
static void
io_fptr_finalize(void *fptr)
{
@@ -1233,26 +1226,9 @@ struct classext_foreach_args {
static void
classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
{
- struct rb_id_table *tbl;
struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
- rb_id_table_free(RCLASSEXT_M_TBL(ext));
-
- if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) {
- rb_free_const_table(tbl);
- }
- if ((tbl = RCLASSEXT_CVC_TBL(ext)) != NULL) {
- rb_id_table_foreach_values(tbl, cvar_table_free_i, NULL);
- rb_id_table_free(tbl);
- }
- rb_class_classext_free_subclasses(ext, args->klass);
- if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
- RUBY_ASSERT(is_prime); // superclasses should only be used on prime
- xfree(RCLASSEXT_SUPERCLASSES(ext));
- }
- if (!is_prime) { // the prime classext will be freed with RClass
- xfree(ext);
- }
+ rb_class_classext_free(args->klass, ext, is_prime);
}
static void
@@ -1260,19 +1236,7 @@ classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *a
{
struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
- if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
- /* Method table is not shared for origin iclasses of classes */
- rb_id_table_free(RCLASSEXT_M_TBL(ext));
- }
- if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) {
- rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext));
- }
-
- rb_class_classext_free_subclasses(ext, args->klass);
-
- if (!is_prime) { // the prime classext will be freed with RClass
- xfree(ext);
- }
+ rb_iclass_classext_free(args->klass, ext, is_prime);
}
bool
diff --git a/internal/class.h b/internal/class.h
index f5c5142b452d78..a791672cadcacf 100644
--- a/internal/class.h
+++ b/internal/class.h
@@ -317,6 +317,8 @@ RCLASS_SET_CLASSEXT_TBL(VALUE klass, st_table *tbl)
rb_classext_t * rb_class_duplicate_classext(rb_classext_t *orig, VALUE obj, const rb_namespace_t *ns);
void rb_class_ensure_writable(VALUE obj);
+void rb_class_set_namespace_classext(VALUE obj, const rb_namespace_t *ns, rb_classext_t *ext);
+
static inline int
RCLASS_SET_NAMESPACE_CLASSEXT(VALUE obj, const rb_namespace_t *ns, rb_classext_t *ext)
{
@@ -332,7 +334,9 @@ RCLASS_SET_NAMESPACE_CLASSEXT(VALUE obj, const rb_namespace_t *ns, rb_classext_t
if (rb_st_table_size(tbl) == 0) {
first_set = 1;
}
- rb_st_insert(tbl, (st_data_t)ns->ns_object, (st_data_t)ext);
+
+ rb_class_set_namespace_classext(obj, ns, ext);
+
return first_set;
}
@@ -515,6 +519,9 @@ void rb_undef_methods_from(VALUE klass, VALUE super);
VALUE rb_class_inherited(VALUE, VALUE);
VALUE rb_keyword_error_new(const char *, VALUE);
+void rb_class_classext_free(VALUE klass, rb_classext_t *ext, bool is_prime);
+void rb_iclass_classext_free(VALUE klass, rb_classext_t *ext, bool is_prime);
+
RUBY_SYMBOL_EXPORT_BEGIN
/* for objspace */
diff --git a/jit.c b/jit.c
index b7cb05d1c34efd..2ff38c28e2d6d3 100644
--- a/jit.c
+++ b/jit.c
@@ -16,12 +16,20 @@
#include "vm_sync.h"
#include "internal/fixnum.h"
-// Field offsets for the RObject struct
-enum robject_offsets {
+enum jit_bindgen_constants {
+ // Field offsets for the RObject struct
ROBJECT_OFFSET_AS_HEAP_FIELDS = offsetof(struct RObject, as.heap.fields),
ROBJECT_OFFSET_AS_ARY = offsetof(struct RObject, as.ary),
+
+ // Field offsets for the RString struct
+ RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len)
};
+// Manually bound in rust since this is out-of-range of `int`,
+// so this can't be in a `enum`, and we avoid `static const`
+// to avoid allocating storage for the constant.
+const shape_id_t rb_invalid_shape_id = INVALID_SHAPE_ID;
+
unsigned int
rb_iseq_encoded_size(const rb_iseq_t *iseq)
{
@@ -157,6 +165,21 @@ rb_get_def_original_id(const rb_method_definition_t *def)
return def->original_id;
}
+VALUE
+rb_get_def_bmethod_proc(rb_method_definition_t *def)
+{
+ RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD);
+ return def->body.bmethod.proc;
+}
+
+rb_proc_t *
+rb_jit_get_proc_ptr(VALUE procv)
+{
+ rb_proc_t *proc;
+ GetProcPtr(procv, proc);
+ return proc;
+}
+
int
rb_get_mct_argc(const rb_method_cfunc_t *mct)
{
diff --git a/namespace.c b/namespace.c
index d4a990cb386e08..b85cbf57157924 100644
--- a/namespace.c
+++ b/namespace.c
@@ -16,6 +16,7 @@
#include "ruby/internal/globals.h"
#include "ruby/util.h"
#include "vm_core.h"
+#include "darray.h"
#include
@@ -207,6 +208,15 @@ free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
return ST_DELETE;
}
+static int
+free_loaded_feature_index_i(st_data_t key, st_data_t value, st_data_t arg)
+{
+ if (!FIXNUM_P(value)) {
+ rb_darray_free((void *)value);
+ }
+ return ST_CONTINUE;
+}
+
static void
namespace_root_free(void *ptr)
{
@@ -218,6 +228,7 @@ namespace_root_free(void *ptr)
}
if (ns->loaded_features_index) {
+ st_foreach(ns->loaded_features_index, free_loaded_feature_index_i, 0);
st_free_table(ns->loaded_features_index);
}
}
diff --git a/shape.h b/shape.h
index fdc2b3ddd6ff80..a20da1baa55668 100644
--- a/shape.h
+++ b/shape.h
@@ -47,9 +47,9 @@ enum shape_id_fl_type {
#undef RBIMPL_SHAPE_ID_FL
};
-// This masks allows to check if a shape_id contains any ivar.
-// It rely on ROOT_SHAPE_WITH_OBJ_ID==1.
-enum {
+// This mask allows to check if a shape_id contains any ivar.
+// It relies on ROOT_SHAPE_WITH_OBJ_ID==1.
+enum shape_id_mask {
SHAPE_ID_HAS_IVAR_MASK = SHAPE_ID_FL_TOO_COMPLEX | (SHAPE_ID_OFFSET_MASK - 1),
};
diff --git a/string.c b/string.c
index 65b9d407f7b914..92541622633211 100644
--- a/string.c
+++ b/string.c
@@ -6667,13 +6667,14 @@ rb_str_getbyte(VALUE str, VALUE index)
* call-seq:
* setbyte(index, integer) -> integer
*
- * Sets the byte at zero-based +index+ to +integer+; returns +integer+:
+ * Sets the byte at zero-based offset +index+ to the value of the given +integer+;
+ * returns +integer+:
*
- * s = 'abcde' # => "abcde"
- * s.setbyte(0, 98) # => 98
- * s # => "bbcde"
+ * s = 'xyzzy'
+ * s.setbyte(2, 129) # => 129
+ * s # => "xy\x81zy"
*
- * Related: String#getbyte.
+ * Related: see {Modifying}[rdoc-ref:String@Modifying].
*/
VALUE
rb_str_setbyte(VALUE str, VALUE index, VALUE value)
@@ -10443,7 +10444,7 @@ rb_str_rstrip_bang(VALUE str)
* call-seq:
* rstrip -> new_string
*
- * Returns a copy of the receiver with trailing whitespace removed;
+ * Returns a copy of +self+ with trailing whitespace removed;
* see {Whitespace in Strings}[rdoc-ref:String@Whitespace+in+Strings]:
*
* whitespace = "\x00\t\n\v\f\r "
@@ -10451,7 +10452,7 @@ rb_str_rstrip_bang(VALUE str)
* s # => "\u0000\t\n\v\f\r abc\u0000\t\n\v\f\r "
* s.rstrip # => "\u0000\t\n\v\f\r abc"
*
- * Related: String#lstrip, String#strip.
+ * Related: see {Converting to New String}[rdoc-ref:String@Converting+to+New+String].
*/
static VALUE
@@ -10596,40 +10597,10 @@ scan_once(VALUE str, VALUE pat, long *start, int set_backref_str)
/*
* call-seq:
- * scan(string_or_regexp) -> array
- * scan(string_or_regexp) {|matches| ... } -> self
- *
- * Matches a pattern against +self+; the pattern is:
- *
- * - +string_or_regexp+ itself, if it is a Regexp.
- * - Regexp.quote(string_or_regexp), if +string_or_regexp+ is a string.
- *
- * Iterates through +self+, generating a collection of matching results:
- *
- * - If the pattern contains no groups, each result is the
- * matched string, $&.
- * - If the pattern contains groups, each result is an array
- * containing one entry per group.
- *
- * With no block given, returns an array of the results:
- *
- * s = 'cruel world'
- * s.scan(/\w+/) # => ["cruel", "world"]
- * s.scan(/.../) # => ["cru", "el ", "wor"]
- * s.scan(/(...)/) # => [["cru"], ["el "], ["wor"]]
- * s.scan(/(..)(..)/) # => [["cr", "ue"], ["l ", "wo"]]
+ * scan(pattern) -> array_of_results
+ * scan(pattern) {|result| ... } -> self
*
- * With a block given, calls the block with each result; returns +self+:
- *
- * s.scan(/\w+/) {|w| print "<<#{w}>> " }
- * print "\n"
- * s.scan(/(.)(.)/) {|x,y| print y, x }
- * print "\n"
- *
- * Output:
- *
- * <> <>
- * rceu lowlr
+ * :include: doc/string/scan.rdoc
*
*/
@@ -11941,8 +11912,8 @@ enc_str_scrub(rb_encoding *enc, VALUE str, VALUE repl, int cr)
/*
* call-seq:
- * scrub(replacement_string = default_replacement) -> new_string
- * scrub{|bytes| ... } -> new_string
+ * scrub(replacement_string = default_replacement_string) -> new_string
+ * scrub{|sequence| ... } -> new_string
*
* :include: doc/string/scrub.rdoc
*
@@ -11957,11 +11928,15 @@ str_scrub(int argc, VALUE *argv, VALUE str)
/*
* call-seq:
- * scrub! -> self
- * scrub!(replacement_string = default_replacement) -> self
- * scrub!{|bytes| ... } -> self
+ * scrub!(replacement_string = default_replacement_string) -> self
+ * scrub!{|sequence| ... } -> self
*
- * Like String#scrub, except that any replacements are made in +self+.
+ * Like String#scrub, except that:
+ *
+ * - Any replacements are made in +self+.
+ * - Returns +self+.
+ *
+ * Related: see {Modifying}[rdoc-ref:String@Modifying].
*
*/
static VALUE
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index b0c717bc24b435..e151a022d1bc61 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -794,41 +794,71 @@ def test(x, y) = x | y
end
def test_fixnum_and
- assert_compiles '1', %q{
+ assert_compiles '[1, 2, 4]', %q{
def test(a, b) = a & b
- test(2, 2)
- test(2, 2)
- test(5, 3)
+ [
+ test(5, 3),
+ test(0b011, 0b110),
+ test(-0b011, 0b110)
+ ]
}, call_threshold: 2, insns: [:opt_and]
end
def test_fixnum_and_side_exit
- assert_compiles 'false', %q{
+ assert_compiles '[2, 2, false]', %q{
def test(a, b) = a & b
- test(2, 2)
- test(2, 2)
- test(true, false)
+ [
+ test(2, 2),
+ test(0b011, 0b110),
+ test(true, false)
+ ]
}, call_threshold: 2, insns: [:opt_and]
end
def test_fixnum_or
- assert_compiles '3', %q{
+ assert_compiles '[7, 3, -3]', %q{
def test(a, b) = a | b
- test(5, 3)
- test(5, 3)
- test(1, 2)
+ [
+ test(5, 3),
+ test(1, 2),
+ test(1, -4)
+ ]
}, call_threshold: 2, insns: [:opt_or]
end
def test_fixnum_or_side_exit
- assert_compiles 'true', %q{
+ assert_compiles '[3, 2, true]', %q{
def test(a, b) = a | b
- test(2, 2)
- test(2, 2)
- test(true, false)
+ [
+ test(1, 2),
+ test(2, 2),
+ test(true, false)
+ ]
}, call_threshold: 2, insns: [:opt_or]
end
+ def test_fixnum_xor
+ assert_compiles '[6, -8, 3]', %q{
+ def test(a, b) = a ^ b
+ [
+ test(5, 3),
+ test(-5, 3),
+ test(1, 2)
+ ]
+ }, call_threshold: 2
+ end
+
+ def test_fixnum_xor_side_exit
+ assert_compiles '[6, 6, true]', %q{
+ def test(a, b) = a ^ b
+ [
+ test(5, 3),
+ test(5, 3),
+ test(true, false)
+ ]
+ }, call_threshold: 2
+ end
+
def test_fixnum_mul
assert_compiles '12', %q{
C = 3
diff --git a/yjit.c b/yjit.c
index d0ab367b1c7bb1..807aec9e391172 100644
--- a/yjit.c
+++ b/yjit.c
@@ -38,11 +38,6 @@
#include
-// Field offsets for the RString struct
-enum rstring_offsets {
- RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len)
-};
-
// We need size_t to have a known size to simplify code generation and FFI.
// TODO(alan): check this in configure.ac to fail fast on 32 bit platforms.
STATIC_ASSERT(64b_size_t, SIZE_MAX == UINT64_MAX);
@@ -234,14 +229,6 @@ rb_iseq_set_yjit_payload(const rb_iseq_t *iseq, void *payload)
iseq->body->yjit_payload = payload;
}
-rb_proc_t *
-rb_yjit_get_proc_ptr(VALUE procv)
-{
- rb_proc_t *proc;
- GetProcPtr(procv, proc);
- return proc;
-}
-
// This is defined only as a named struct inside rb_iseq_constant_body.
// By giving it a separate typedef, we make it nameable by rust-bindgen.
// Bindgen's temp/anon name isn't guaranteed stable.
@@ -249,13 +236,6 @@ typedef struct rb_iseq_param_keyword rb_seq_param_keyword_struct;
ID rb_get_symbol_id(VALUE namep);
-VALUE
-rb_get_def_bmethod_proc(rb_method_definition_t *def)
-{
- RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD);
- return def->body.bmethod.proc;
-}
-
VALUE
rb_optimized_call(VALUE *recv, rb_execution_context_t *ec, int argc, VALUE *argv, int kw_splat, VALUE block_handler)
{
diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs
index 2b4f48d73ec4bd..df287e1bf84a2e 100644
--- a/yjit/bindgen/src/main.rs
+++ b/yjit/bindgen/src/main.rs
@@ -91,7 +91,7 @@ fn main() {
.allowlist_function("rb_yjit_shape_capacity")
.allowlist_function("rb_yjit_shape_index")
.allowlist_var("SHAPE_ID_NUM_BITS")
- .allowlist_var("SHAPE_ID_HAS_IVAR_MASK")
+ .allowlist_type("shape_id_mask")
.allowlist_function("rb_funcall")
.allowlist_function("rb_obj_is_kind_of")
.allowlist_function("rb_obj_frozen_p")
@@ -265,7 +265,7 @@ fn main() {
.allowlist_function("rb_RSTRING_PTR")
.allowlist_function("rb_RSTRING_LEN")
.allowlist_function("rb_ENCODING_GET")
- .allowlist_function("rb_yjit_get_proc_ptr")
+ .allowlist_function("rb_jit_get_proc_ptr")
.allowlist_function("rb_yjit_exit_locations_dict")
.allowlist_function("rb_jit_icache_invalidate")
.allowlist_function("rb_optimized_call")
@@ -280,7 +280,7 @@ fn main() {
.allowlist_function("rb_jit_vm_lock_then_barrier")
.allowlist_function("rb_jit_vm_unlock")
.allowlist_function("rb_jit_for_each_iseq")
- .allowlist_type("robject_offsets")
+ .allowlist_type("jit_bindgen_constants")
.allowlist_function("rb_vm_barrier")
// Not sure why it's picking these up, but don't.
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index bf758a4f62bd21..231655826109ab 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -7367,7 +7367,7 @@ fn gen_send_bmethod(
) -> Option {
let procv = unsafe { rb_get_def_bmethod_proc((*cme).def) };
- let proc = unsafe { rb_yjit_get_proc_ptr(procv) };
+ let proc = unsafe { rb_jit_get_proc_ptr(procv) };
let proc_block = unsafe { &(*proc).block };
if proc_block.type_ != block_type_iseq {
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index 74661e7ade9bf8..272586a79f3fb5 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -634,8 +634,8 @@ pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16;
pub type vm_frame_env_flags = u32;
pub type attr_index_t = u16;
pub type shape_id_t = u32;
-pub const SHAPE_ID_HAS_IVAR_MASK: _bindgen_ty_37 = 134742014;
-pub type _bindgen_ty_37 = u32;
+pub const SHAPE_ID_HAS_IVAR_MASK: shape_id_mask = 134742014;
+pub type shape_id_mask = u32;
#[repr(C)]
pub struct rb_cvar_class_tbl_entry {
pub index: u32,
@@ -941,12 +941,11 @@ pub const DEFINED_REF: defined_type = 15;
pub const DEFINED_FUNC: defined_type = 16;
pub const DEFINED_CONST_FROM: defined_type = 17;
pub type defined_type = u32;
-pub const RUBY_OFFSET_RSTRING_LEN: rstring_offsets = 16;
-pub type rstring_offsets = u32;
pub type rb_seq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword;
-pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: robject_offsets = 16;
-pub const ROBJECT_OFFSET_AS_ARY: robject_offsets = 16;
-pub type robject_offsets = u32;
+pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: jit_bindgen_constants = 16;
+pub const ROBJECT_OFFSET_AS_ARY: jit_bindgen_constants = 16;
+pub const RUBY_OFFSET_RSTRING_LEN: jit_bindgen_constants = 16;
+pub type jit_bindgen_constants = u32;
pub type rb_iseq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword;
extern "C" {
pub fn ruby_xfree(ptr: *mut ::std::os::raw::c_void);
@@ -1122,9 +1121,7 @@ extern "C" {
pub fn rb_full_cfunc_return(ec: *mut rb_execution_context_t, return_value: VALUE);
pub fn rb_iseq_get_yjit_payload(iseq: *const rb_iseq_t) -> *mut ::std::os::raw::c_void;
pub fn rb_iseq_set_yjit_payload(iseq: *const rb_iseq_t, payload: *mut ::std::os::raw::c_void);
- pub fn rb_yjit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t;
pub fn rb_get_symbol_id(namep: VALUE) -> ID;
- pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE;
pub fn rb_optimized_call(
recv: *mut VALUE,
ec: *mut rb_execution_context_t,
@@ -1200,6 +1197,8 @@ extern "C" {
) -> *mut rb_method_cfunc_t;
pub fn rb_get_def_method_serial(def: *const rb_method_definition_t) -> usize;
pub fn rb_get_def_original_id(def: *const rb_method_definition_t) -> ID;
+ pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE;
+ pub fn rb_jit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t;
pub fn rb_get_mct_argc(mct: *const rb_method_cfunc_t) -> ::std::os::raw::c_int;
pub fn rb_get_mct_func(mct: *const rb_method_cfunc_t) -> *mut ::std::os::raw::c_void;
pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t;
diff --git a/zjit.c b/zjit.c
index e17abc1b37ff29..fac087a605f4d1 100644
--- a/zjit.c
+++ b/zjit.c
@@ -235,10 +235,6 @@ rb_zjit_print_exception(void)
rb_warn("Ruby error: %"PRIsVALUE"", rb_funcall(exception, rb_intern("full_message"), 0));
}
-enum zjit_exported_constants {
- RB_INVALID_SHAPE_ID = INVALID_SHAPE_ID,
-};
-
bool
rb_zjit_singleton_class_p(VALUE klass)
{
diff --git a/zjit/bindgen/src/main.rs b/zjit/bindgen/src/main.rs
index f13b61acf04609..92f7a10e56f97c 100644
--- a/zjit/bindgen/src/main.rs
+++ b/zjit/bindgen/src/main.rs
@@ -100,6 +100,7 @@ fn main() {
.allowlist_function("rb_shape_id_offset")
.allowlist_function("rb_shape_get_iv_index")
.allowlist_function("rb_shape_transition_add_ivar_no_warnings")
+ .allowlist_var("rb_invalid_shape_id")
.allowlist_var("SHAPE_ID_NUM_BITS")
.allowlist_function("rb_obj_is_kind_of")
.allowlist_function("rb_obj_frozen_p")
@@ -293,9 +294,7 @@ fn main() {
.allowlist_function("rb_zjit_singleton_class_p")
.allowlist_function("rb_zjit_defined_ivar")
.allowlist_function("rb_zjit_insn_leaf")
- .allowlist_type("robject_offsets")
- .allowlist_type("rstring_offsets")
- .allowlist_type("zjit_exported_constants")
+ .allowlist_type("jit_bindgen_constants")
.allowlist_function("rb_assert_holding_vm_lock")
.allowlist_function("rb_jit_shape_too_complex_p")
.allowlist_function("rb_jit_multi_ractor_p")
@@ -303,7 +302,6 @@ fn main() {
.allowlist_function("rb_jit_vm_unlock")
.allowlist_function("rb_jit_for_each_iseq")
.allowlist_function("rb_iseq_reset_jit_func")
- .allowlist_type("robject_offsets")
.allowlist_function("rb_vm_barrier")
// Not sure why it's picking these up, but don't.
@@ -367,6 +365,7 @@ fn main() {
.allowlist_function("rb_get_mct_func")
.allowlist_function("rb_get_def_iseq_ptr")
.allowlist_function("rb_get_def_bmethod_proc")
+ .allowlist_function("rb_jit_get_proc_ptr")
.allowlist_function("rb_iseq_encoded_size")
.allowlist_function("rb_get_iseq_body_total_calls")
.allowlist_function("rb_get_iseq_body_local_iseq")
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index f7b335f1bfce89..402c500c8b17b9 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -398,6 +398,7 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
Insn::FixnumGe { left, right } => gen_fixnum_ge(asm, opnd!(left), opnd!(right)),
Insn::FixnumAnd { left, right } => gen_fixnum_and(asm, opnd!(left), opnd!(right)),
Insn::FixnumOr { left, right } => gen_fixnum_or(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumXor { left, right } => gen_fixnum_xor(asm, opnd!(left), opnd!(right)),
&Insn::FixnumMod { left, right, state } => gen_fixnum_mod(jit, asm, opnd!(left), opnd!(right), &function.frame_state(state)),
Insn::IsNil { val } => gen_isnil(asm, opnd!(val)),
&Insn::IsMethodCfunc { val, cd, cfunc, state: _ } => gen_is_method_cfunc(jit, asm, opnd!(val), cd, cfunc),
@@ -423,7 +424,7 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
&Insn::GetLocal { ep_offset, level, use_sp, .. } => gen_getlocal(asm, ep_offset, level, use_sp),
&Insn::SetLocal { val, ep_offset, level } => no_output!(gen_setlocal(asm, opnd!(val), function.type_of(val), ep_offset, level)),
Insn::GetConstantPath { ic, state } => gen_get_constant_path(jit, asm, *ic, &function.frame_state(*state)),
- Insn::SetIvar { self_val, id, val, state: _ } => no_output!(gen_setivar(asm, opnd!(self_val), *id, opnd!(val))),
+ Insn::SetIvar { self_val, id, val, state } => no_output!(gen_setivar(jit, asm, opnd!(self_val), *id, opnd!(val), &function.frame_state(*state))),
Insn::SideExit { state, reason } => no_output!(gen_side_exit(jit, asm, reason, &function.frame_state(*state))),
Insn::PutSpecialObject { value_type } => gen_putspecialobject(asm, *value_type),
Insn::AnyToString { val, str, state } => gen_anytostring(asm, opnd!(val), opnd!(str), &function.frame_state(*state)),
@@ -694,12 +695,23 @@ fn gen_ccall_with_frame(
gen_spill_stack(jit, asm, state);
gen_spill_locals(jit, asm, state);
+ let block_handler_specval = if let Some(block_iseq) = blockiseq {
+ // Change cfp->block_code in the current frame. See vm_caller_setup_arg_block().
+ // VM_CFP_TO_CAPTURED_BLOCK then turns &cfp->self into a block handler.
+ // rb_captured_block->code.iseq aliases with cfp->block_code.
+ asm.store(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_BLOCK_CODE), VALUE::from(block_iseq).into());
+ let cfp_self_addr = asm.lea(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
+ asm.or(cfp_self_addr, Opnd::Imm(1))
+ } else {
+ VM_BLOCK_HANDLER_NONE.into()
+ };
+
gen_push_frame(asm, args.len(), state, ControlFrame {
recv: args[0],
iseq: None,
cme,
frame_type: VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL,
- block_iseq: blockiseq,
+ specval: block_handler_specval,
});
asm_comment!(asm, "switch to new SP register");
@@ -755,7 +767,7 @@ fn gen_ccall_variadic(
iseq: None,
cme,
frame_type: VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL,
- block_iseq: None,
+ specval: VM_BLOCK_HANDLER_NONE.into(),
});
asm_comment!(asm, "switch to new SP register");
@@ -791,8 +803,10 @@ fn gen_getivar(asm: &mut Assembler, recv: Opnd, id: ID) -> Opnd {
}
/// Emit an uncached instance variable store
-fn gen_setivar(asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd) {
+fn gen_setivar(jit: &mut JITState, asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd, state: &FrameState) {
gen_incr_counter(asm, Counter::dynamic_setivar_count);
+ // Setting an ivar can raise FrozenError, so we need proper frame state for exception handling.
+ gen_prepare_non_leaf_call(jit, asm, state);
asm_ccall!(asm, rb_ivar_set, recv, id.0.into(), val);
}
@@ -1141,14 +1155,28 @@ fn gen_send_without_block_direct(
gen_spill_locals(jit, asm, state);
gen_spill_stack(jit, asm, state);
+ let (frame_type, specval) = if VM_METHOD_TYPE_BMETHOD == unsafe { get_cme_def_type(cme) } {
+ // Extract EP from the Proc instance
+ let procv = unsafe { rb_get_def_bmethod_proc((*cme).def) };
+ let proc = unsafe { rb_jit_get_proc_ptr(procv) };
+ let proc_block = unsafe { &(*proc).block };
+ let capture = unsafe { proc_block.as_.captured.as_ref() };
+ let bmethod_frame_type = VM_FRAME_MAGIC_BLOCK | VM_FRAME_FLAG_BMETHOD | VM_FRAME_FLAG_LAMBDA;
+ // Tag the captured EP like VM_GUARDED_PREV_EP() in vm_call_iseq_bmethod()
+ let bmethod_specval = (capture.ep.addr() | 1).into();
+ (bmethod_frame_type, bmethod_specval)
+ } else {
+ (VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, VM_BLOCK_HANDLER_NONE.into())
+ };
+
// Set up the new frame
// TODO: Lazily materialize caller frames on side exits or when needed
gen_push_frame(asm, args.len(), state, ControlFrame {
recv,
iseq: Some(iseq),
cme,
- frame_type: VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL,
- block_iseq: None,
+ frame_type,
+ specval,
});
asm_comment!(asm, "switch to new SP register");
@@ -1460,6 +1488,13 @@ fn gen_fixnum_or(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir:
asm.or(left, right)
}
+/// Compile Fixnum ^ Fixnum
+fn gen_fixnum_xor(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
+ // XOR and then re-tag the resulting fixnum
+ let out_val = asm.xor(left, right);
+ asm.add(out_val, Opnd::UImm(1))
+}
+
fn gen_fixnum_mod(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> lir::Opnd {
// Check for left % 0, which raises ZeroDivisionError
asm.cmp(right, Opnd::from(VALUE::fixnum_from_usize(0)));
@@ -1745,7 +1780,9 @@ struct ControlFrame {
iseq: Option,
cme: *const rb_callable_method_entry_t,
frame_type: u32,
- block_iseq: Option,
+ /// The [`VM_ENV_DATA_INDEX_SPECVAL`] slot of the frame.
+ /// For the type of frames we push, block handler or the parent EP.
+ specval: lir::Opnd,
}
/// Compile an interpreter frame
@@ -1761,21 +1798,10 @@ fn gen_push_frame(asm: &mut Assembler, argc: usize, state: &FrameState, frame: C
0
};
let ep_offset = state.stack().len() as i32 + local_size - argc as i32 + VM_ENV_DATA_SIZE as i32 - 1;
+ // ep[-2]: CME
asm.store(Opnd::mem(64, SP, (ep_offset - 2) * SIZEOF_VALUE_I32), VALUE::from(frame.cme).into());
-
- let block_handler_opnd = if let Some(block_iseq) = frame.block_iseq {
- // Change cfp->block_code in the current frame. See vm_caller_setup_arg_block().
- // VM_CFP_TO_CAPTURED_BLOCK does &cfp->self, rb_captured_block->code.iseq aliases
- // with cfp->block_code.
- asm.store(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_BLOCK_CODE), VALUE::from(block_iseq).into());
- let cfp_self_addr = asm.lea(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF));
- asm.or(cfp_self_addr, Opnd::Imm(1))
- } else {
- VM_BLOCK_HANDLER_NONE.into()
- };
-
- // ep[-1]: block_handler or prev EP
- asm.store(Opnd::mem(64, SP, (ep_offset - 1) * SIZEOF_VALUE_I32), block_handler_opnd);
+ // ep[-1]: specval
+ asm.store(Opnd::mem(64, SP, (ep_offset - 1) * SIZEOF_VALUE_I32), frame.specval);
// ep[0]: ENV_FLAGS
asm.store(Opnd::mem(64, SP, ep_offset * SIZEOF_VALUE_I32), frame.frame_type.into());
@@ -1998,9 +2024,10 @@ fn function_stub_hit_body(cb: &mut CodeBlock, iseq_call: &IseqCallRef) -> Result
})?;
// We currently don't support JIT-to-JIT calls for ISEQs with optional arguments.
- // So we only need to use jit_entry_ptrs[0] for now. TODO: Support optional arguments.
- assert_eq!(1, jit_entry_ptrs.len());
- let jit_entry_ptr = jit_entry_ptrs[0];
+ // So we only need to use jit_entry_ptrs[0] for now. TODO(Shopify/ruby#817): Support optional arguments.
+ let Some(&jit_entry_ptr) = jit_entry_ptrs.get(0) else {
+ return Err(CompileError::JitToJitOptional)
+ };
// Update the stub to call the code pointer
let code_addr = jit_entry_ptr.raw_ptr(cb);
diff --git a/zjit/src/cruby.rs b/zjit/src/cruby.rs
index a84e408861fc54..645891496edbae 100644
--- a/zjit/src/cruby.rs
+++ b/zjit/src/cruby.rs
@@ -273,7 +273,7 @@ pub type IseqPtr = *const rb_iseq_t;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct ShapeId(pub u32);
-pub const INVALID_SHAPE_ID: ShapeId = ShapeId(RB_INVALID_SHAPE_ID);
+pub const INVALID_SHAPE_ID: ShapeId = ShapeId(rb_invalid_shape_id);
impl ShapeId {
pub fn is_valid(self) -> bool {
@@ -450,6 +450,11 @@ impl VALUE {
!self.special_const_p()
}
+ /// Shareability between ractors. `RB_OBJ_SHAREABLE_P()`.
+ pub fn shareable_p(self) -> bool {
+ (self.builtin_flags() & RUBY_FL_SHAREABLE as usize) != 0
+ }
+
/// Return true if the value is a Ruby Fixnum (immediate-size integer)
pub fn fixnum_p(self) -> bool {
let VALUE(cval) = self;
@@ -1354,6 +1359,7 @@ pub(crate) mod ids {
name: ge content: b">="
name: and content: b"&"
name: or content: b"|"
+ name: xor content: b"^"
name: freeze
name: minusat content: b"-@"
name: aref content: b"[]"
diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs
index af604661b299b3..c9e5bc8fd1ebcb 100644
--- a/zjit/src/cruby_bindings.inc.rs
+++ b/zjit/src/cruby_bindings.inc.rs
@@ -1,5 +1,142 @@
/* automatically generated by rust-bindgen 0.71.1 */
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit {
+ storage: Storage,
+}
+impl __BindgenBitfieldUnit {
+ #[inline]
+ pub const fn new(storage: Storage) -> Self {
+ Self { storage }
+ }
+}
+impl __BindgenBitfieldUnit
+where
+ Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+ #[inline]
+ fn extract_bit(byte: u8, index: usize) -> bool {
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ byte & mask == mask
+ }
+ #[inline]
+ pub fn get_bit(&self, index: usize) -> bool {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = self.storage.as_ref()[byte_index];
+ Self::extract_bit(byte, index)
+ }
+ #[inline]
+ pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
+ debug_assert!(index / 8 < core::mem::size_of::());
+ let byte_index = index / 8;
+ let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
+ Self::extract_bit(byte, index)
+ }
+ #[inline]
+ fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ if val {
+ byte | mask
+ } else {
+ byte & !mask
+ }
+ }
+ #[inline]
+ pub fn set_bit(&mut self, index: usize, val: bool) {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = &mut self.storage.as_mut()[byte_index];
+ *byte = Self::change_bit(*byte, index, val);
+ }
+ #[inline]
+ pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
+ debug_assert!(index / 8 < core::mem::size_of::());
+ let byte_index = index / 8;
+ let byte =
+ (core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
+ *byte = Self::change_bit(*byte, index, val);
+ }
+ #[inline]
+ pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ let mut val = 0;
+ for i in 0..(bit_width as usize) {
+ if self.get_bit(i + bit_offset) {
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ val |= 1 << index;
+ }
+ }
+ val
+ }
+ #[inline]
+ pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < core::mem::size_of::());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::());
+ let mut val = 0;
+ for i in 0..(bit_width as usize) {
+ if Self::raw_get_bit(this, i + bit_offset) {
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ val |= 1 << index;
+ }
+ }
+ val
+ }
+ #[inline]
+ pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ for i in 0..(bit_width as usize) {
+ let mask = 1 << i;
+ let val_bit_is_set = val & mask == mask;
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ self.set_bit(index + bit_offset, val_bit_is_set);
+ }
+ }
+ #[inline]
+ pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < core::mem::size_of::());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::());
+ for i in 0..(bit_width as usize) {
+ let mask = 1 << i;
+ let val_bit_is_set = val & mask == mask;
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
+ }
+ }
+}
#[repr(C)]
#[derive(Default)]
pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]);
@@ -30,6 +167,49 @@ impl ::std::fmt::Debug for __IncompleteArrayField {
fmt.write_str("__IncompleteArrayField")
}
}
+#[repr(C)]
+pub struct __BindgenUnionField(::std::marker::PhantomData);
+impl __BindgenUnionField {
+ #[inline]
+ pub const fn new() -> Self {
+ __BindgenUnionField(::std::marker::PhantomData)
+ }
+ #[inline]
+ pub unsafe fn as_ref(&self) -> &T {
+ ::std::mem::transmute(self)
+ }
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut T {
+ ::std::mem::transmute(self)
+ }
+}
+impl ::std::default::Default for __BindgenUnionField {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+impl ::std::clone::Clone for __BindgenUnionField {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+impl ::std::marker::Copy for __BindgenUnionField {}
+impl ::std::fmt::Debug for __BindgenUnionField {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ fmt.write_str("__BindgenUnionField")
+ }
+}
+impl ::std::hash::Hash for __BindgenUnionField {
+ fn hash(&self, _state: &mut H) {}
+}
+impl ::std::cmp::PartialEq for __BindgenUnionField {
+ fn eq(&self, _other: &__BindgenUnionField) -> bool {
+ true
+ }
+}
+impl ::std::cmp::Eq for __BindgenUnionField {}
pub const ONIG_OPTION_IGNORECASE: u32 = 1;
pub const ONIG_OPTION_EXTEND: u32 = 2;
pub const ONIG_OPTION_MULTILINE: u32 = 4;
@@ -163,6 +343,16 @@ pub type ruby_rmodule_flags = u32;
pub const ROBJECT_HEAP: ruby_robject_flags = 65536;
pub type ruby_robject_flags = u32;
pub type rb_event_flag_t = u32;
+pub type rb_block_call_func = ::std::option::Option<
+ unsafe extern "C" fn(
+ yielded_arg: VALUE,
+ callback_arg: VALUE,
+ argc: ::std::os::raw::c_int,
+ argv: *const VALUE,
+ blockarg: VALUE,
+ ) -> VALUE,
+>;
+pub type rb_block_call_func_t = rb_block_call_func;
pub const RUBY_ENCODING_INLINE_MAX: ruby_encoding_consts = 127;
pub const RUBY_ENCODING_SHIFT: ruby_encoding_consts = 22;
pub const RUBY_ENCODING_MASK: ruby_encoding_consts = 532676608;
@@ -233,6 +423,20 @@ pub const imemo_callcache: imemo_type = 11;
pub const imemo_constcache: imemo_type = 12;
pub const imemo_fields: imemo_type = 13;
pub type imemo_type = u32;
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct vm_ifunc_argc {
+ pub min: ::std::os::raw::c_int,
+ pub max: ::std::os::raw::c_int,
+}
+#[repr(C)]
+pub struct vm_ifunc {
+ pub flags: VALUE,
+ pub svar_lep: *mut VALUE,
+ pub func: rb_block_call_func_t,
+ pub data: *const ::std::os::raw::c_void,
+ pub argc: vm_ifunc_argc,
+}
pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0;
pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1;
pub const METHOD_VISI_PRIVATE: rb_method_visibility_t = 2;
@@ -354,7 +558,166 @@ pub struct rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword {
pub table: *const ID,
pub default_values: *mut VALUE,
}
+#[repr(C)]
+pub struct rb_captured_block {
+ pub self_: VALUE,
+ pub ep: *const VALUE,
+ pub code: rb_captured_block__bindgen_ty_1,
+}
+#[repr(C)]
+pub struct rb_captured_block__bindgen_ty_1 {
+ pub iseq: __BindgenUnionField<*const rb_iseq_t>,
+ pub ifunc: __BindgenUnionField<*const vm_ifunc>,
+ pub val: __BindgenUnionField,
+ pub bindgen_union_field: u64,
+}
+pub const block_type_iseq: rb_block_type = 0;
+pub const block_type_ifunc: rb_block_type = 1;
+pub const block_type_symbol: rb_block_type = 2;
+pub const block_type_proc: rb_block_type = 3;
+pub type rb_block_type = u32;
+#[repr(C)]
+pub struct rb_block {
+ pub as_: rb_block__bindgen_ty_1,
+ pub type_: rb_block_type,
+}
+#[repr(C)]
+pub struct rb_block__bindgen_ty_1 {
+ pub captured: __BindgenUnionField,
+ pub symbol: __BindgenUnionField,
+ pub proc_: __BindgenUnionField,
+ pub bindgen_union_field: [u64; 3usize],
+}
pub type rb_control_frame_t = rb_control_frame_struct;
+#[repr(C)]
+pub struct rb_proc_t {
+ pub block: rb_block,
+ pub _bitfield_align_1: [u8; 0],
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
+ pub __bindgen_padding_0: [u8; 7usize],
+}
+impl rb_proc_t {
+ #[inline]
+ pub fn is_from_method(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_from_method(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub unsafe fn is_from_method_raw(this: *const Self) -> ::std::os::raw::c_uint {
+ unsafe {
+ ::std::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
+ ::std::ptr::addr_of!((*this)._bitfield_1),
+ 0usize,
+ 1u8,
+ ) as u32)
+ }
+ }
+ #[inline]
+ pub unsafe fn set_is_from_method_raw(this: *mut Self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
+ ::std::ptr::addr_of_mut!((*this)._bitfield_1),
+ 0usize,
+ 1u8,
+ val as u64,
+ )
+ }
+ }
+ #[inline]
+ pub fn is_lambda(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_lambda(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(1usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub unsafe fn is_lambda_raw(this: *const Self) -> ::std::os::raw::c_uint {
+ unsafe {
+ ::std::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
+ ::std::ptr::addr_of!((*this)._bitfield_1),
+ 1usize,
+ 1u8,
+ ) as u32)
+ }
+ }
+ #[inline]
+ pub unsafe fn set_is_lambda_raw(this: *mut Self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
+ ::std::ptr::addr_of_mut!((*this)._bitfield_1),
+ 1usize,
+ 1u8,
+ val as u64,
+ )
+ }
+ }
+ #[inline]
+ pub fn is_isolated(&self) -> ::std::os::raw::c_uint {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) }
+ }
+ #[inline]
+ pub fn set_is_isolated(&mut self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ self._bitfield_1.set(2usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub unsafe fn is_isolated_raw(this: *const Self) -> ::std::os::raw::c_uint {
+ unsafe {
+ ::std::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
+ ::std::ptr::addr_of!((*this)._bitfield_1),
+ 2usize,
+ 1u8,
+ ) as u32)
+ }
+ }
+ #[inline]
+ pub unsafe fn set_is_isolated_raw(this: *mut Self, val: ::std::os::raw::c_uint) {
+ unsafe {
+ let val: u32 = ::std::mem::transmute(val);
+ <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
+ ::std::ptr::addr_of_mut!((*this)._bitfield_1),
+ 2usize,
+ 1u8,
+ val as u64,
+ )
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ is_from_method: ::std::os::raw::c_uint,
+ is_lambda: ::std::os::raw::c_uint,
+ is_isolated: ::std::os::raw::c_uint,
+ ) -> __BindgenBitfieldUnit<[u8; 1usize]> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
+ __bindgen_bitfield_unit.set(0usize, 1u8, {
+ let is_from_method: u32 = unsafe { ::std::mem::transmute(is_from_method) };
+ is_from_method as u64
+ });
+ __bindgen_bitfield_unit.set(1usize, 1u8, {
+ let is_lambda: u32 = unsafe { ::std::mem::transmute(is_lambda) };
+ is_lambda as u64
+ });
+ __bindgen_bitfield_unit.set(2usize, 1u8, {
+ let is_isolated: u32 = unsafe { ::std::mem::transmute(is_isolated) };
+ is_isolated as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
pub const VM_CHECKMATCH_TYPE_WHEN: vm_check_match_type = 1;
pub const VM_CHECKMATCH_TYPE_CASE: vm_check_match_type = 2;
pub const VM_CHECKMATCH_TYPE_RESCUE: vm_check_match_type = 3;
@@ -730,11 +1093,11 @@ pub const DEFINED_REF: defined_type = 15;
pub const DEFINED_FUNC: defined_type = 16;
pub const DEFINED_CONST_FROM: defined_type = 17;
pub type defined_type = u32;
-pub const RB_INVALID_SHAPE_ID: zjit_exported_constants = 4294967295;
-pub type zjit_exported_constants = u32;
-pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: robject_offsets = 16;
-pub const ROBJECT_OFFSET_AS_ARY: robject_offsets = 16;
-pub type robject_offsets = u32;
+pub const ROBJECT_OFFSET_AS_HEAP_FIELDS: jit_bindgen_constants = 16;
+pub const ROBJECT_OFFSET_AS_ARY: jit_bindgen_constants = 16;
+pub const RUBY_OFFSET_RSTRING_LEN: jit_bindgen_constants = 16;
+pub type jit_bindgen_constants = u32;
+pub const rb_invalid_shape_id: shape_id_t = 4294967295;
pub type rb_iseq_param_keyword_struct = rb_iseq_constant_body__bindgen_ty_1_rb_iseq_param_keyword;
unsafe extern "C" {
pub fn ruby_xfree(ptr: *mut ::std::os::raw::c_void);
@@ -998,6 +1361,8 @@ unsafe extern "C" {
) -> *mut rb_method_cfunc_t;
pub fn rb_get_def_method_serial(def: *const rb_method_definition_t) -> usize;
pub fn rb_get_def_original_id(def: *const rb_method_definition_t) -> ID;
+ pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE;
+ pub fn rb_jit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t;
pub fn rb_get_mct_argc(mct: *const rb_method_cfunc_t) -> ::std::os::raw::c_int;
pub fn rb_get_mct_func(mct: *const rb_method_cfunc_t) -> *mut ::std::os::raw::c_void;
pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t;
diff --git a/zjit/src/cruby_methods.rs b/zjit/src/cruby_methods.rs
index 40fb0cbe442dab..656ccab7817c91 100644
--- a/zjit/src/cruby_methods.rs
+++ b/zjit/src/cruby_methods.rs
@@ -210,6 +210,7 @@ pub fn init() -> Annotations {
annotate!(rb_cBasicObject, "!", types::BoolExact, no_gc, leaf, elidable);
annotate!(rb_cBasicObject, "initialize", inline_basic_object_initialize);
annotate!(rb_cInteger, "succ", inline_integer_succ);
+ annotate!(rb_cInteger, "^", inline_integer_xor);
annotate!(rb_cString, "to_s", inline_string_to_s);
let thread_singleton = unsafe { rb_singleton_class(rb_cThread) };
annotate!(thread_singleton, "current", types::BasicObject, no_gc, leaf);
@@ -286,6 +287,17 @@ fn inline_integer_succ(fun: &mut hir::Function, block: hir::BlockId, recv: hir::
None
}
+fn inline_integer_xor(fun: &mut hir::Function, block: hir::BlockId, recv: hir::InsnId, args: &[hir::InsnId], state: hir::InsnId) -> Option {
+ let &[right] = args else { return None; };
+ if fun.likely_a(recv, types::Fixnum, state) && fun.likely_a(right, types::Fixnum, state) {
+ let left = fun.coerce_to(block, recv, types::Fixnum, state);
+ let right = fun.coerce_to(block, right, types::Fixnum, state);
+ let result = fun.push_insn(block, hir::Insn::FixnumXor { left, right });
+ return Some(result);
+ }
+ None
+}
+
fn inline_basic_object_initialize(fun: &mut hir::Function, block: hir::BlockId, _recv: hir::InsnId, args: &[hir::InsnId], _state: hir::InsnId) -> Option {
if !args.is_empty() { return None; }
let result = fun.push_insn(block, hir::Insn::Const { val: hir::Const::Value(Qnil) });
diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs
index 7083a082fba1a8..68ed867e4ddd4d 100644
--- a/zjit/src/hir.rs
+++ b/zjit/src/hir.rs
@@ -750,7 +750,7 @@ pub enum Insn {
/// Non-local control flow. See the throw YARV instruction
Throw { throw_state: u32, val: InsnId, state: InsnId },
- /// Fixnum +, -, *, /, %, ==, !=, <, <=, >, >=, &, |
+ /// Fixnum +, -, *, /, %, ==, !=, <, <=, >, >=, &, |, ^
FixnumAdd { left: InsnId, right: InsnId, state: InsnId },
FixnumSub { left: InsnId, right: InsnId, state: InsnId },
FixnumMult { left: InsnId, right: InsnId, state: InsnId },
@@ -764,6 +764,7 @@ pub enum Insn {
FixnumGe { left: InsnId, right: InsnId },
FixnumAnd { left: InsnId, right: InsnId },
FixnumOr { left: InsnId, right: InsnId },
+ FixnumXor { left: InsnId, right: InsnId },
// Distinct from `SendWithoutBlock` with `mid:to_s` because does not have a patch point for String to_s being redefined
ObjToString { val: InsnId, cd: *const rb_call_data, state: InsnId },
@@ -853,6 +854,7 @@ impl Insn {
Insn::FixnumGe { .. } => false,
Insn::FixnumAnd { .. } => false,
Insn::FixnumOr { .. } => false,
+ Insn::FixnumXor { .. } => false,
Insn::GetLocal { .. } => false,
Insn::IsNil { .. } => false,
Insn::LoadPC => false,
@@ -1051,6 +1053,7 @@ impl<'a> std::fmt::Display for InsnPrinter<'a> {
Insn::FixnumGe { left, right, .. } => { write!(f, "FixnumGe {left}, {right}") },
Insn::FixnumAnd { left, right, .. } => { write!(f, "FixnumAnd {left}, {right}") },
Insn::FixnumOr { left, right, .. } => { write!(f, "FixnumOr {left}, {right}") },
+ Insn::FixnumXor { left, right, .. } => { write!(f, "FixnumXor {left}, {right}") },
Insn::GuardType { val, guard_type, .. } => { write!(f, "GuardType {val}, {}", guard_type.print(self.ptr_map)) },
Insn::GuardTypeNot { val, guard_type, .. } => { write!(f, "GuardTypeNot {val}, {}", guard_type.print(self.ptr_map)) },
Insn::GuardBitEquals { val, expected, .. } => { write!(f, "GuardBitEquals {val}, {}", expected.print(self.ptr_map)) },
@@ -1541,6 +1544,7 @@ impl Function {
&FixnumLe { left, right } => FixnumLe { left: find!(left), right: find!(right) },
&FixnumAnd { left, right } => FixnumAnd { left: find!(left), right: find!(right) },
&FixnumOr { left, right } => FixnumOr { left: find!(left), right: find!(right) },
+ &FixnumXor { left, right } => FixnumXor { left: find!(left), right: find!(right) },
&ObjToString { val, cd, state } => ObjToString {
val: find!(val),
cd,
@@ -1740,6 +1744,7 @@ impl Function {
Insn::FixnumGe { .. } => types::BoolExact,
Insn::FixnumAnd { .. } => types::Fixnum,
Insn::FixnumOr { .. } => types::Fixnum,
+ Insn::FixnumXor { .. } => types::Fixnum,
Insn::PutSpecialObject { .. } => types::BasicObject,
Insn::SendWithoutBlock { .. } => types::BasicObject,
Insn::SendWithoutBlockDirect { .. } => types::BasicObject,
@@ -2101,6 +2106,45 @@ impl Function {
if klass.instance_can_have_singleton_class() {
self.push_insn(block, Insn::PatchPoint { invariant: Invariant::NoSingletonClass { klass }, state });
}
+ if let Some(profiled_type) = profiled_type {
+ recv = self.push_insn(block, Insn::GuardType { val: recv, guard_type: Type::from_profiled_type(profiled_type), state });
+ }
+ let send_direct = self.push_insn(block, Insn::SendWithoutBlockDirect { recv, cd, cme, iseq, args, state });
+ self.make_equal_to(insn_id, send_direct);
+ } else if def_type == VM_METHOD_TYPE_BMETHOD {
+ let procv = unsafe { rb_get_def_bmethod_proc((*cme).def) };
+ let proc = unsafe { rb_jit_get_proc_ptr(procv) };
+ let proc_block = unsafe { &(*proc).block };
+ // Target ISEQ bmethods. Can't handle for example, `define_method(:foo, &:foo)`
+ // which makes a `block_type_symbol` bmethod.
+ if proc_block.type_ != block_type_iseq {
+ self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodType(MethodType::Bmethod));
+ self.push_insn_id(block, insn_id); continue;
+ }
+ let capture = unsafe { proc_block.as_.captured.as_ref() };
+ let iseq = unsafe { *capture.code.iseq.as_ref() };
+
+ if !can_direct_send(iseq) {
+ self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodType(MethodType::Bmethod));
+ self.push_insn_id(block, insn_id); continue;
+ }
+ // Can't pass a block to a block for now
+ if (unsafe { rb_vm_ci_flag(ci) } & VM_CALL_ARGS_BLOCKARG) != 0 {
+ self.set_dynamic_send_reason(insn_id, SendWithoutBlockNotOptimizedMethodType(MethodType::Bmethod));
+ self.push_insn_id(block, insn_id); continue;
+ }
+
+ // Patch points:
+ // Check for "defined with an un-shareable Proc in a different Ractor"
+ if !procv.shareable_p() {
+ // TODO(alan): Turn this into a ractor belonging guard to work better in multi ractor mode.
+ self.push_insn(block, Insn::PatchPoint { invariant: Invariant::SingleRactorMode, state });
+ }
+ self.push_insn(block, Insn::PatchPoint { invariant: Invariant::MethodRedefined { klass, method: mid, cme }, state });
+ if klass.instance_can_have_singleton_class() {
+ self.push_insn(block, Insn::PatchPoint { invariant: Invariant::NoSingletonClass { klass }, state });
+ }
+
if let Some(profiled_type) = profiled_type {
recv = self.push_insn(block, Insn::GuardType { val: recv, guard_type: Type::from_profiled_type(profiled_type), state });
}
@@ -2928,6 +2972,7 @@ impl Function {
| &Insn::FixnumNeq { left, right }
| &Insn::FixnumAnd { left, right }
| &Insn::FixnumOr { left, right }
+ | &Insn::FixnumXor { left, right }
| &Insn::IsBitEqual { left, right }
=> {
worklist.push_back(left);
@@ -7218,6 +7263,33 @@ mod tests {
");
}
+ #[test]
+ fn test_set_ivar_rescue_frozen() {
+ let result = eval("
+ class Foo
+ attr_accessor :bar
+ def initialize
+ @bar = 1
+ freeze
+ end
+ end
+
+ def test(foo)
+ begin
+ foo.bar = 2
+ rescue FrozenError
+ end
+ end
+
+ foo = Foo.new
+ test(foo)
+ test(foo)
+
+ foo.bar
+ ");
+ assert_eq!(VALUE::fixnum_from_usize(1), result);
+ }
+
#[test]
fn test_setglobal() {
eval("
@@ -12116,6 +12188,120 @@ mod opt_tests {
");
}
+ #[test]
+ fn test_bmethod_send_direct() {
+ eval("
+ define_method(:zero) { :b }
+ define_method(:one) { |arg| arg }
+
+ def test = one(zero)
+ test
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:5:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ Jump bb2(v1)
+ bb1(v4:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v4)
+ bb2(v6:BasicObject):
+ PatchPoint SingleRactorMode
+ PatchPoint MethodRedefined(Object@0x1000, zero@0x1008, cme:0x1010)
+ PatchPoint NoSingletonClass(Object@0x1000)
+ v22:HeapObject[class_exact*:Object@VALUE(0x1000)] = GuardType v6, HeapObject[class_exact*:Object@VALUE(0x1000)]
+ v23:BasicObject = SendWithoutBlockDirect v22, :zero (0x1038)
+ PatchPoint SingleRactorMode
+ PatchPoint MethodRedefined(Object@0x1000, one@0x1040, cme:0x1048)
+ PatchPoint NoSingletonClass(Object@0x1000)
+ v27:HeapObject[class_exact*:Object@VALUE(0x1000)] = GuardType v6, HeapObject[class_exact*:Object@VALUE(0x1000)]
+ v28:BasicObject = SendWithoutBlockDirect v27, :one (0x1038), v23
+ CheckInterrupts
+ Return v28
+ ");
+ }
+
+ #[test]
+ fn test_symbol_block_bmethod() {
+ eval("
+ define_method(:identity, &:itself)
+ def test = identity(100)
+ test
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ Jump bb2(v1)
+ bb1(v4:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v4)
+ bb2(v6:BasicObject):
+ v10:Fixnum[100] = Const Value(100)
+ v12:BasicObject = SendWithoutBlock v6, :identity, v10
+ CheckInterrupts
+ Return v12
+ ");
+ }
+
+ #[test]
+ fn test_call_bmethod_with_block() {
+ eval("
+ define_method(:bmethod) { :b }
+ def test = (bmethod {})
+ test
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ Jump bb2(v1)
+ bb1(v4:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v4)
+ bb2(v6:BasicObject):
+ v11:BasicObject = Send v6, 0x1000, :bmethod
+ CheckInterrupts
+ Return v11
+ ");
+ }
+
+ #[test]
+ fn test_call_shareable_bmethod() {
+ eval("
+ class Foo
+ class << self
+ define_method(:identity, &(Ractor.make_shareable ->(val){val}))
+ end
+ end
+ def test = Foo.identity(100)
+ test
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:7:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ Jump bb2(v1)
+ bb1(v4:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v4)
+ bb2(v6:BasicObject):
+ PatchPoint SingleRactorMode
+ PatchPoint StableConstantNames(0x1000, Foo)
+ v22:Class[VALUE(0x1008)] = Const Value(VALUE(0x1008))
+ v12:Fixnum[100] = Const Value(100)
+ PatchPoint MethodRedefined(Class@0x1010, identity@0x1018, cme:0x1020)
+ PatchPoint NoSingletonClass(Class@0x1010)
+ v25:BasicObject = SendWithoutBlockDirect v22, :identity (0x1048), v12
+ CheckInterrupts
+ Return v25
+ ");
+ }
+
#[test]
fn test_nil_nil_specialized_to_ccall() {
eval("
@@ -13756,6 +13942,160 @@ mod opt_tests {
");
}
+ #[test]
+ fn test_inline_integer_xor_with_fixnum() {
+ eval("
+ def test(x, y) = x ^ y
+ test(1, 2)
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:2:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(Integer@0x1000, ^@0x1008, cme:0x1010)
+ v25:Fixnum = GuardType v11, Fixnum
+ v26:Fixnum = GuardType v12, Fixnum
+ v27:Fixnum = FixnumXor v25, v26
+ IncrCounter inline_cfunc_optimized_send_count
+ CheckInterrupts
+ Return v27
+ ");
+ }
+
+ #[test]
+ fn test_eliminate_integer_xor() {
+ eval(r#"
+ def test(x, y)
+ x ^ y
+ 42
+ end
+ test(1, 2)
+ "#);
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:3:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(Integer@0x1000, ^@0x1008, cme:0x1010)
+ v28:Fixnum = GuardType v11, Fixnum
+ v29:Fixnum = GuardType v12, Fixnum
+ IncrCounter inline_cfunc_optimized_send_count
+ v20:Fixnum[42] = Const Value(42)
+ CheckInterrupts
+ Return v20
+ ");
+ }
+
+ #[test]
+ fn test_dont_inline_integer_xor_with_bignum_or_boolean() {
+ eval("
+ def test(x, y) = x ^ y
+ test(4 << 70, 1)
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:2:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(Integer@0x1000, ^@0x1008, cme:0x1010)
+ v25:Integer = GuardType v11, Integer
+ v26:BasicObject = CCallWithFrame ^@0x1038, v25, v12
+ CheckInterrupts
+ Return v26
+ ");
+
+ eval("
+ def test(x, y) = x ^ y
+ test(1, 4 << 70)
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:2:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(Integer@0x1000, ^@0x1008, cme:0x1010)
+ v25:Fixnum = GuardType v11, Fixnum
+ v26:BasicObject = CCallWithFrame ^@0x1038, v25, v12
+ CheckInterrupts
+ Return v26
+ ");
+
+ eval("
+ def test(x, y) = x ^ y
+ test(true, 0)
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:2:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ PatchPoint MethodRedefined(TrueClass@0x1000, ^@0x1008, cme:0x1010)
+ v25:TrueClass = GuardType v11, TrueClass
+ v26:BasicObject = CCallWithFrame ^@0x1038, v25, v12
+ CheckInterrupts
+ Return v26
+ ");
+ }
+
+ #[test]
+ fn test_dont_inline_integer_xor_with_args() {
+ eval("
+ def test(x, y) = x.^()
+ ");
+ assert_snapshot!(hir_string("test"), @r"
+ fn test@:2:
+ bb0():
+ EntryPoint interpreter
+ v1:BasicObject = LoadSelf
+ v2:BasicObject = GetLocal l0, SP@5
+ v3:BasicObject = GetLocal l0, SP@4
+ Jump bb2(v1, v2, v3)
+ bb1(v6:BasicObject, v7:BasicObject, v8:BasicObject):
+ EntryPoint JIT(0)
+ Jump bb2(v6, v7, v8)
+ bb2(v10:BasicObject, v11:BasicObject, v12:BasicObject):
+ v17:BasicObject = SendWithoutBlock v11, :^
+ CheckInterrupts
+ Return v17
+ ");
+ }
+
#[test]
fn test_specialize_hash_size() {
eval("
diff --git a/zjit/src/stats.rs b/zjit/src/stats.rs
index 33f29fb3aaed22..913a72fa5646ff 100644
--- a/zjit/src/stats.rs
+++ b/zjit/src/stats.rs
@@ -187,6 +187,7 @@ make_counters! {
compile_error_iseq_stack_too_large,
compile_error_exception_handler,
compile_error_out_of_memory,
+ compile_error_jit_to_jit_optional,
compile_error_register_spill_on_ccall,
compile_error_register_spill_on_alloc,
compile_error_parse_stack_underflow,
@@ -286,6 +287,7 @@ pub enum CompileError {
RegisterSpillOnAlloc,
RegisterSpillOnCCall,
ParseError(ParseError),
+ JitToJitOptional,
}
/// Return a raw pointer to the exit counter for a given CompileError
@@ -300,6 +302,7 @@ pub fn exit_counter_for_compile_error(compile_error: &CompileError) -> Counter {
OutOfMemory => compile_error_out_of_memory,
RegisterSpillOnAlloc => compile_error_register_spill_on_alloc,
RegisterSpillOnCCall => compile_error_register_spill_on_ccall,
+ JitToJitOptional => compile_error_jit_to_jit_optional,
ParseError(parse_error) => match parse_error {
StackUnderflow(_) => compile_error_parse_stack_underflow,
MalformedIseq(_) => compile_error_parse_malformed_iseq,