Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions cranelift/codegen/src/isle_prelude.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,49 @@ macro_rules! isle_common_prelude_methods {
Imm64::new((x >> y) & ty_mask)
}

#[inline]
fn imm64_rotl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now that we have these helpers in the prelude, we should be able to implement rotl/rotr constant propagation rules too, right? It seems odd to have just the rule in this PR (for rotr-of-select-of-constants) -- can we add simple cprop as well?

let bits = ty.bits();
assert!(bits <= 64);

let ty_mask = self.ty_mask(ty);
let x = (x.bits() as u64) & ty_mask;

// Mask off any excess rotate bits so the rotate stays within `ty`.
let shift_mask = bits - 1;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we put a debug_assert here that bits is a power of two? This is true for all of our types today but I don't want to bake the assumption in without something guarding it.

let y = ((y.bits() as u64) & u64::from(shift_mask)) as u32;

// In Rust, x >> 64 or x << 64 panics.
let result = if y == 0 {
x
} else {
(x << y) | (x >> (u32::from(bits) - y))
};

Imm64::new((result & ty_mask) as i64)
}

#[inline]
fn imm64_rotr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
let bits = ty.bits();
assert!(bits <= 64);

let ty_mask = self.ty_mask(ty);
let x = (x.bits() as u64) & ty_mask;

// Mask off any excess rotate bits so the rotate stays within `ty`.
let shift_mask = bits - 1;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Likewise here (debug-assert power-of-two).

let y = ((y.bits() as u64) & u64::from(shift_mask)) as u32;

let result = if y == 0 {
x
} else {
(x >> y) | (x << (u32::from(bits) - y))
};

Imm64::new((result & ty_mask) as i64)
}

#[inline]
fn i64_sextend_u64(&mut self, ty: Type, x: u64) -> i64 {
let shift_amt = core::cmp::max(0, 64 - ty.bits());
Expand Down
5 changes: 4 additions & 1 deletion cranelift/codegen/src/opts/shifts.isle
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,10 @@
(rule (simplify (rotl ty (rotr ty x y @ (value_type kty)) z @ (value_type kty)))
(rotr ty x (isub_uextend y z)))

(rule
(simplify (rotr ty (iconst ty p) (select ty x (iconst ty y) (iconst ty z))))
(select ty x (iconst ty (imm64_rotr ty p y)) (iconst ty (imm64_rotr ty p z))))

;; Convert shifts into rotates. We always normalize into a rotate left.
;;
;; (bor (ishl x k1) (ushr x k2)) == (rotl x k1) if k2 == ty_bits - k1
Expand Down Expand Up @@ -311,4 +315,3 @@
(rule (simplify (iadd ty (ishl ty x z) (ishl ty y z))) (ishl ty (iadd ty x y) z))

(rule (simplify (ushr ty (band ty (ishl ty x y) z) y)) (band ty x (ushr ty z y)))

6 changes: 6 additions & 0 deletions cranelift/codegen/src/prelude.isle
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,12 @@
(decl pure imm64_sshr (Type Imm64 Imm64) Imm64)
(extern constructor imm64_sshr imm64_sshr)

(decl pure imm64_rotl (Type Imm64 Imm64) Imm64)
(extern constructor imm64_rotl imm64_rotl)

(decl pure imm64_rotr (Type Imm64 Imm64) Imm64)
(extern constructor imm64_rotr imm64_rotr)

;; Sign extends a u64 from ty bits up to 64bits
(decl pure i64_sextend_u64 (Type u64) i64)
(extern constructor i64_sextend_u64 i64_sextend_u64)
Expand Down
15 changes: 15 additions & 0 deletions cranelift/filetests/filetests/egraph/shifts.clif
Original file line number Diff line number Diff line change
Expand Up @@ -758,6 +758,21 @@ block0(v0: i8, v1: i8):

; check: return v0

function %rotr_iconst_select_iconsts(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i8 1
v2 = iconst.i8 1
v3 = iconst.i8 2
v4 = select v0, v2, v3
v5 = rotr.i8 v1, v4
return v5
}

; check: v6 = iconst.i8 -128
; check: v7 = iconst.i8 64
; check: v8 = select v0, v6, v7
; check: return v8

function %shifts_to_rotl(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i16 3
Expand Down
24 changes: 24 additions & 0 deletions cranelift/filetests/filetests/runtests/rotr.clif
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,30 @@ block0(v0: i8, v1: i8):
; run: %rotr_i8_i8(0xe0, 65) == 0x70
; run: %rotr_i8_i8(0xe0, 66) == 0x38

function %rotr_iconst_select_iconsts_i8(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i8 1
v2 = iconst.i8 1
v3 = iconst.i8 2
v4 = select v0, v2, v3
v5 = rotr.i8 v1, v4
return v5
}
; run: %rotr_iconst_select_iconsts_i8(0) == 0x40
; run: %rotr_iconst_select_iconsts_i8(1) == 0x80

function %rotr_iconst_select_iconsts_i32(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 1
v2 = iconst.i32 1
v3 = iconst.i32 2
v4 = select v0, v2, v3
v5 = rotr.i32 v1, v4
return v5
}
; run: %rotr_iconst_select_iconsts_i32(0) == 0x40000000
; run: %rotr_iconst_select_iconsts_i32(1) == 0x80000000



;; This is a regression test for rotates on x64
Expand Down
Loading