diff --git a/cranelift/codegen/src/isa/aarch64/abi.rs b/cranelift/codegen/src/isa/aarch64/abi.rs index 5c786eced9d3..35071ff398b1 100644 --- a/cranelift/codegen/src/isa/aarch64/abi.rs +++ b/cranelift/codegen/src/isa/aarch64/abi.rs @@ -130,17 +130,8 @@ impl ABIMachineSpec for AArch64MachineDeps { // number of register values returned in the other class. That is, // we can return values in up to 8 integer and // 8 vector registers at once. - // - // In Wasmtime, we can only use one register for return - // value for all the register classes. That is, we can't - // return values in both one integer and one vector - // register; only one return value may be in a register. ArgsOrRets::Rets => { - if call_conv.extends_wasmtime() { - (1, 1) // x0 or v0, but not both - } else { - (8, 16) // x0-x7 and v0-v7 - } + (8, 16) // x0-x7 and v0-v7 } }; @@ -290,10 +281,8 @@ impl ABIMachineSpec for AArch64MachineDeps { // Compute the stack slot's size. let size = (ty_bits(param.value_type) / 8) as u32; - let size = if is_apple_cc - || (call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets) - { - // MacOS aarch64 and Wasmtime allow stack slots with + let size = if is_apple_cc { + // MacOS aarch64 allows stack slots with // sizes less than 8 bytes. They still need to be // properly aligned on their natural data alignment, // though. diff --git a/cranelift/codegen/src/isa/call_conv.rs b/cranelift/codegen/src/isa/call_conv.rs index 08b2f0708323..eeaf9699c30a 100644 --- a/cranelift/codegen/src/isa/call_conv.rs +++ b/cranelift/codegen/src/isa/call_conv.rs @@ -30,18 +30,9 @@ pub enum CallConv { Probestack, /// Wasmtime equivalent of SystemV, not ABI-stable. /// - /// Currently only differs in how multiple return values are handled, - /// returning the first return value in a register and everything else - /// through a return-pointer. + /// FIXME: remove this when Wasmtime uses the "tail" calling convention for + /// all wasm functions. WasmtimeSystemV, - /// Wasmtime equivalent of WindowsFastcall, not ABI-stable. - /// - /// Differs from fastcall in the same way as `WasmtimeSystemV`. - WasmtimeFastcall, - /// Wasmtime equivalent of AppleAarch64, not ABI-stable. - /// - /// Differs from apple-aarch64 in the same way as `WasmtimeSystemV`. - WasmtimeAppleAarch64, } impl CallConv { @@ -81,7 +72,7 @@ impl CallConv { /// Is the calling convention extending the Windows Fastcall ABI? pub fn extends_windows_fastcall(self) -> bool { match self { - Self::WindowsFastcall | Self::WasmtimeFastcall => true, + Self::WindowsFastcall => true, _ => false, } } @@ -89,15 +80,7 @@ impl CallConv { /// Is the calling convention extending the Apple aarch64 ABI? pub fn extends_apple_aarch64(self) -> bool { match self { - Self::AppleAarch64 | Self::WasmtimeAppleAarch64 => true, - _ => false, - } - } - - /// Is the calling convention extending the Wasmtime ABI? - pub fn extends_wasmtime(self) -> bool { - match self { - Self::WasmtimeSystemV | Self::WasmtimeFastcall | Self::WasmtimeAppleAarch64 => true, + Self::AppleAarch64 => true, _ => false, } } @@ -114,8 +97,6 @@ impl fmt::Display for CallConv { Self::AppleAarch64 => "apple_aarch64", Self::Probestack => "probestack", Self::WasmtimeSystemV => "wasmtime_system_v", - Self::WasmtimeFastcall => "wasmtime_fastcall", - Self::WasmtimeAppleAarch64 => "wasmtime_apple_aarch64", }) } } @@ -132,8 +113,6 @@ impl str::FromStr for CallConv { "apple_aarch64" => Ok(Self::AppleAarch64), "probestack" => Ok(Self::Probestack), "wasmtime_system_v" => Ok(Self::WasmtimeSystemV), - "wasmtime_fastcall" => Ok(Self::WasmtimeFastcall), - "wasmtime_apple_aarch64" => Ok(Self::WasmtimeAppleAarch64), _ => Err(()), } } diff --git a/cranelift/codegen/src/isa/riscv64/abi.rs b/cranelift/codegen/src/isa/riscv64/abi.rs index 5eb0145cda87..a02f81d2e4f6 100644 --- a/cranelift/codegen/src/isa/riscv64/abi.rs +++ b/cranelift/codegen/src/isa/riscv64/abi.rs @@ -106,16 +106,12 @@ impl ABIMachineSpec for Riscv64MachineDeps { let (x_start, x_end, f_start, f_end) = match (call_conv, args_or_rets) { (isa::CallConv::Tail, _) => (9, 29, 0, 31), (_, ArgsOrRets::Args) => (10, 17, 10, 17), - (_, ArgsOrRets::Rets) => { - let end = if call_conv.extends_wasmtime() { 10 } else { 11 }; - (10, end, 10, end) - } + (_, ArgsOrRets::Rets) => (10, 11, 10, 11), }; let mut next_x_reg = x_start; let mut next_f_reg = f_start; // Stack space. let mut next_stack: u32 = 0; - let mut return_one_register_used = false; for param in params { if let ir::ArgumentPurpose::StructArgument(size) = param.purpose { @@ -135,27 +131,17 @@ impl ABIMachineSpec for Riscv64MachineDeps { let (rcs, reg_tys) = Inst::rc_for_type(param.value_type)?; let mut slots = ABIArgSlotVec::new(); for (rc, reg_ty) in rcs.iter().zip(reg_tys.iter()) { - let next_reg = - if (next_x_reg <= x_end) && *rc == RegClass::Int && !return_one_register_used { - let x = Some(x_reg(next_x_reg)); - if args_or_rets == ArgsOrRets::Rets && call_conv.extends_wasmtime() { - return_one_register_used = true; - } - next_x_reg += 1; - x - } else if (next_f_reg <= f_end) - && *rc == RegClass::Float - && !return_one_register_used - { - let x = Some(f_reg(next_f_reg)); - if args_or_rets == ArgsOrRets::Rets && call_conv.extends_wasmtime() { - return_one_register_used = true; - } - next_f_reg += 1; - x - } else { - None - }; + let next_reg = if (next_x_reg <= x_end) && *rc == RegClass::Int { + let x = Some(x_reg(next_x_reg)); + next_x_reg += 1; + x + } else if (next_f_reg <= f_end) && *rc == RegClass::Float { + let x = Some(f_reg(next_f_reg)); + next_f_reg += 1; + x + } else { + None + }; if let Some(reg) = next_reg { slots.push(ABIArgSlot::Reg { reg: reg.to_real_reg().unwrap(), @@ -163,20 +149,10 @@ impl ABIMachineSpec for Riscv64MachineDeps { extension: param.extension, }); } else { - // Compute size. For the wasmtime ABI it differs from native - // ABIs in how multiple values are returned, so we take a - // leaf out of arm64's book by not rounding everything up to - // 8 bytes. For all ABI arguments, and other ABI returns, - // though, each slot takes a minimum of 8 bytes. - // - // Note that in all cases 16-byte stack alignment happens + // Compute size and 16-byte stack alignment happens // separately after all args. let size = reg_ty.bits() / 8; - let size = if args_or_rets == ArgsOrRets::Rets && call_conv.extends_wasmtime() { - size - } else { - std::cmp::max(size, 8) - }; + let size = std::cmp::max(size, 8); // Align. debug_assert!(size.is_power_of_two()); next_stack = align_to(next_stack, size); diff --git a/cranelift/codegen/src/isa/s390x/abi.rs b/cranelift/codegen/src/isa/s390x/abi.rs index 1482e29c9c4a..f915138007f3 100644 --- a/cranelift/codegen/src/isa/s390x/abi.rs +++ b/cranelift/codegen/src/isa/s390x/abi.rs @@ -248,13 +248,12 @@ impl ABIMachineSpec for S390xMachineDeps { } // In the SystemV ABI, the return area pointer is the first argument, - // so we need to leave room for it if required. (In the Wasmtime ABI, - // the return area pointer is the last argument and is handled below.) - if add_ret_area_ptr && !call_conv.extends_wasmtime() { + // so we need to leave room for it if required. + if add_ret_area_ptr { next_gpr += 1; } - for (i, mut param) in params.into_iter().copied().enumerate() { + for mut param in params.into_iter().copied() { let intreg = in_int_reg(param.value_type); let fltreg = in_flt_reg(param.value_type); let vecreg = in_vec_reg(param.value_type); @@ -278,8 +277,6 @@ impl ABIMachineSpec for S390xMachineDeps { ArgsOrRets::Rets => get_vecreg_for_ret(next_vr), }; (&mut next_vr, candidate, None) - } else if call_conv.extends_wasmtime() { - panic!("i128 args/return values not supported in the Wasmtime ABI"); } else { // We must pass this by implicit reference. if args_or_rets == ArgsOrRets::Rets { @@ -294,14 +291,6 @@ impl ABIMachineSpec for S390xMachineDeps { } }; - // In the Wasmtime ABI only the first return value can be in a register. - let candidate = - if call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets && i > 0 { - None - } else { - candidate - }; - let slot = if let Some(reg) = candidate { *next_reg += 1; ABIArgSlot::Reg { @@ -311,14 +300,9 @@ impl ABIMachineSpec for S390xMachineDeps { } } else { // Compute size. Every argument or return value takes a slot of - // at least 8 bytes, except for return values in the Wasmtime ABI. + // at least 8 bytes. let size = (ty_bits(param.value_type) / 8) as u32; - let slot_size = if call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets - { - size - } else { - std::cmp::max(size, 8) - }; + let slot_size = std::cmp::max(size, 8); // Align the stack slot. debug_assert!(slot_size.is_power_of_two()); @@ -372,14 +356,8 @@ impl ABIMachineSpec for S390xMachineDeps { let extra_arg = if add_ret_area_ptr { debug_assert!(args_or_rets == ArgsOrRets::Args); - // The return pointer is passed either as first argument - // (in the SystemV ABI) or as last argument (Wasmtime ABI). - let next_gpr = if call_conv.extends_wasmtime() { - next_gpr - } else { - 0 - }; - if let Some(reg) = get_intreg_for_arg(next_gpr) { + // The return pointer is passed as first argument. + if let Some(reg) = get_intreg_for_arg(0) { args.push(ABIArg::reg( reg.to_real_reg().unwrap(), types::I64, diff --git a/cranelift/codegen/src/isa/s390x/lower/isle.rs b/cranelift/codegen/src/isa/s390x/lower/isle.rs index f32d38b79a61..72fe9c326992 100644 --- a/cranelift/codegen/src/isa/s390x/lower/isle.rs +++ b/cranelift/codegen/src/isa/s390x/lower/isle.rs @@ -968,7 +968,7 @@ impl generated_code::Context for IsleContext<'_, '_, MInst, S390xBackend> { /// Lane order to be used for a given calling convention. #[inline] fn lane_order_for_call_conv(call_conv: CallConv) -> LaneOrder { - if call_conv.extends_wasmtime() { + if call_conv == CallConv::WasmtimeSystemV { LaneOrder::LittleEndian } else { LaneOrder::BigEndian diff --git a/cranelift/codegen/src/isa/x64/abi.rs b/cranelift/codegen/src/isa/x64/abi.rs index 1ae2de4b785f..ab6f50e32a9e 100644 --- a/cranelift/codegen/src/isa/x64/abi.rs +++ b/cranelift/codegen/src/isa/x64/abi.rs @@ -200,18 +200,14 @@ impl ABIMachineSpec for X64ABIMachineSpec { ArgsOrRets::Args => { get_intreg_for_arg(&call_conv, next_gpr, next_param_idx) } - ArgsOrRets::Rets => { - get_intreg_for_retval(&call_conv, next_gpr, next_param_idx) - } + ArgsOrRets::Rets => get_intreg_for_retval(&call_conv, next_gpr), } } else { match args_or_rets { ArgsOrRets::Args => { get_fltreg_for_arg(&call_conv, next_vreg, next_param_idx) } - ArgsOrRets::Rets => { - get_fltreg_for_retval(&call_conv, next_vreg, next_param_idx) - } + ArgsOrRets::Rets => get_fltreg_for_retval(&call_conv, next_vreg), } }; next_param_idx += 1; @@ -227,20 +223,8 @@ impl ABIMachineSpec for X64ABIMachineSpec { extension: param.extension, }); } else { - // Compute size. For the wasmtime ABI it differs from native - // ABIs in how multiple values are returned, so we take a - // leaf out of arm64's book by not rounding everything up to - // 8 bytes. For all ABI arguments, and other ABI returns, - // though, each slot takes a minimum of 8 bytes. - // - // Note that in all cases 16-byte stack alignment happens - // separately after all args. let size = reg_ty.bits() / 8; - let size = if args_or_rets == ArgsOrRets::Rets && call_conv.extends_wasmtime() { - size - } else { - std::cmp::max(size, 8) - }; + let size = std::cmp::max(size, 8); // Align. debug_assert!(size.is_power_of_two()); next_stack = align_to(next_stack, size); @@ -798,18 +782,18 @@ impl ABIMachineSpec for X64ABIMachineSpec { // The `tail` calling convention doesn't have any callee-save // registers. CallConv::Tail => vec![], - CallConv::Fast | CallConv::Cold | CallConv::SystemV | CallConv::WasmtimeSystemV => regs + CallConv::Fast | CallConv::Cold | CallConv::SystemV => regs .iter() .cloned() .filter(|r| is_callee_save_systemv(r.to_reg(), flags.enable_pinned_reg())) .collect(), - CallConv::WindowsFastcall | CallConv::WasmtimeFastcall => regs + CallConv::WindowsFastcall => regs .iter() .cloned() .filter(|r| is_callee_save_fastcall(r.to_reg(), flags.enable_pinned_reg())) .collect(), CallConv::Probestack => todo!("probestack?"), - CallConv::AppleAarch64 | CallConv::WasmtimeAppleAarch64 => unreachable!(), + CallConv::WasmtimeSystemV | CallConv::AppleAarch64 => unreachable!(), }; // Sort registers for deterministic code output. We can do an unstable sort because the // registers will be unique (there are no dups). @@ -927,11 +911,7 @@ fn get_fltreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Optio } } -fn get_intreg_for_retval( - call_conv: &CallConv, - intreg_idx: usize, - retval_idx: usize, -) -> Option { +fn get_intreg_for_retval(call_conv: &CallConv, intreg_idx: usize) -> Option { match call_conv { CallConv::Tail => match intreg_idx { 0 => Some(regs::rax()), @@ -955,28 +935,17 @@ fn get_intreg_for_retval( 1 => Some(regs::rdx()), _ => None, }, - CallConv::WasmtimeSystemV | CallConv::WasmtimeFastcall => { - if intreg_idx == 0 && retval_idx == 0 { - Some(regs::rax()) - } else { - None - } - } CallConv::WindowsFastcall => match intreg_idx { 0 => Some(regs::rax()), 1 => Some(regs::rdx()), // The Rust ABI for i128s needs this. _ => None, }, CallConv::Probestack => todo!(), - CallConv::AppleAarch64 | CallConv::WasmtimeAppleAarch64 => unreachable!(), + CallConv::WasmtimeSystemV | CallConv::AppleAarch64 => unreachable!(), } } -fn get_fltreg_for_retval( - call_conv: &CallConv, - fltreg_idx: usize, - retval_idx: usize, -) -> Option { +fn get_fltreg_for_retval(call_conv: &CallConv, fltreg_idx: usize) -> Option { match call_conv { CallConv::Tail => match fltreg_idx { 0 => Some(regs::xmm0()), @@ -994,19 +963,12 @@ fn get_fltreg_for_retval( 1 => Some(regs::xmm1()), _ => None, }, - CallConv::WasmtimeFastcall | CallConv::WasmtimeSystemV => { - if fltreg_idx == 0 && retval_idx == 0 { - Some(regs::xmm0()) - } else { - None - } - } CallConv::WindowsFastcall => match fltreg_idx { 0 => Some(regs::xmm0()), _ => None, }, CallConv::Probestack => todo!(), - CallConv::AppleAarch64 | CallConv::WasmtimeAppleAarch64 => unreachable!(), + CallConv::WasmtimeSystemV | CallConv::AppleAarch64 => unreachable!(), } } diff --git a/cranelift/codegen/src/machinst/abi.rs b/cranelift/codegen/src/machinst/abi.rs index 54dc09c2c90d..4e13f49f37a9 100644 --- a/cranelift/codegen/src/machinst/abi.rs +++ b/cranelift/codegen/src/machinst/abi.rs @@ -1083,9 +1083,8 @@ impl Callee { || call_conv == isa::CallConv::Fast || call_conv == isa::CallConv::Cold || call_conv.extends_windows_fastcall() - || call_conv == isa::CallConv::AppleAarch64 || call_conv == isa::CallConv::WasmtimeSystemV - || call_conv == isa::CallConv::WasmtimeAppleAarch64, + || call_conv == isa::CallConv::AppleAarch64, "Unsupported calling convention: {:?}", call_conv ); diff --git a/cranelift/filetests/filetests/isa/aarch64/call.clif b/cranelift/filetests/filetests/isa/aarch64/call.clif index 801dc8a33fb1..7f371334e244 100644 --- a/cranelift/filetests/filetests/isa/aarch64/call.clif +++ b/cranelift/filetests/filetests/isa/aarch64/call.clif @@ -818,29 +818,6 @@ block0(v0: i128, v1: i64): ; ldp x29, x30, [sp], #0x10 ; ret -function %f16() -> i32, i32 wasmtime_system_v { -block0: - v0 = iconst.i32 0 - v1 = iconst.i32 1 - return v0, v1 -} - -; VCode: -; block0: -; mov x6, x0 -; movz w0, #0 -; movz w3, #1 -; str w3, [x6] -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; mov x6, x0 -; mov w0, #0 -; mov w3, #1 -; stur w3, [x6] -; ret - function %f17(i64 sret) { block0(v0: i64): v1 = iconst.i64 42 diff --git a/cranelift/filetests/filetests/isa/x64/call-conv.clif b/cranelift/filetests/filetests/isa/x64/call-conv.clif index 6040fa4675e7..a4e383e817f0 100644 --- a/cranelift/filetests/filetests/isa/x64/call-conv.clif +++ b/cranelift/filetests/filetests/isa/x64/call-conv.clif @@ -377,289 +377,6 @@ block0(v0: i32, v1: f32, v2: i64, v3: f64, v4: i32, v5: i32, v6: i32, v7: f32, v ; popq %rbp ; retq -function %wasmtime_mix1(i32) wasmtime_system_v { - sig0 = (i32) system_v -block0(v0: i32): - call_indirect sig0, v0(v0) - return -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; block0: -; call *%rdi -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; block1: ; offset 0x4 -; callq *%rdi -; movq %rbp, %rsp -; popq %rbp -; retq - -function %wasmtime_mix2(i32) system_v { - sig0 = (i32) wasmtime_system_v -block0(v0: i32): - call_indirect sig0, v0(v0) - return -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; block0: -; call *%rdi -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; block1: ; offset 0x4 -; callq *%rdi -; movq %rbp, %rsp -; popq %rbp -; retq - -function %wasmtime_mix2() -> i32, i32 system_v { - sig0 = () -> i32, i32 wasmtime_system_v -block0: - v2 = iconst.i32 1 - v0, v1 = call_indirect sig0, v2() - return v0, v1 -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; block0: -; movl $1, %ecx -; subq %rsp, $16, %rsp -; virtual_sp_offset_adjust 16 -; lea 0(%rsp), %rdi -; call *%rcx -; movq 0(%rsp), %rdx -; addq %rsp, $16, %rsp -; virtual_sp_offset_adjust -16 -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; block1: ; offset 0x4 -; movl $1, %ecx -; subq $0x10, %rsp -; leaq (%rsp), %rdi -; callq *%rcx -; movq (%rsp), %rdx -; addq $0x10, %rsp -; movq %rbp, %rsp -; popq %rbp -; retq - -function %wasmtime_mix3() -> i32, i32 wasmtime_system_v { - sig0 = () -> i32, i32 system_v -block0: - v2 = iconst.i32 1 - v0, v1 = call_indirect sig0, v2() - return v0, v1 -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; subq %rsp, $16, %rsp -; movq %rbx, 0(%rsp) -; block0: -; movq %rdi, %rbx -; movl $1, %edx -; call *%rdx -; movq %rbx, %rdi -; movl %edx, 0(%rdi) -; movq 0(%rsp), %rbx -; addq %rsp, $16, %rsp -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; subq $0x10, %rsp -; movq %rbx, (%rsp) -; block1: ; offset 0xc -; movq %rdi, %rbx -; movl $1, %edx -; callq *%rdx -; movq %rbx, %rdi -; movl %edx, (%rdi) -; movq (%rsp), %rbx -; addq $0x10, %rsp -; movq %rbp, %rsp -; popq %rbp -; retq - -function %wasmtime_mix4() -> i32, i64, i32 wasmtime_system_v { - sig0 = () -> i32, i64, i32 system_v -block0: - v3 = iconst.i32 1 - v0, v1, v2 = call_indirect sig0, v3() - return v0, v1, v2 -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; subq %rsp, $16, %rsp -; movq %r13, 0(%rsp) -; block0: -; movq %rdi, %r13 -; movl $1, %r9d -; subq %rsp, $16, %rsp -; virtual_sp_offset_adjust 16 -; lea 0(%rsp), %rdi -; call *%r9 -; movq 0(%rsp), %rsi -; addq %rsp, $16, %rsp -; virtual_sp_offset_adjust -16 -; movq %r13, %rdi -; movq %rdx, 0(%rdi) -; movl %esi, 8(%rdi) -; movq 0(%rsp), %r13 -; addq %rsp, $16, %rsp -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; subq $0x10, %rsp -; movq %r13, (%rsp) -; block1: ; offset 0xc -; movq %rdi, %r13 -; movl $1, %r9d -; subq $0x10, %rsp -; leaq (%rsp), %rdi -; callq *%r9 -; movq (%rsp), %rsi -; addq $0x10, %rsp -; movq %r13, %rdi -; movq %rdx, (%rdi) -; movl %esi, 8(%rdi) -; movq (%rsp), %r13 -; addq $0x10, %rsp -; movq %rbp, %rsp -; popq %rbp -; retq - -function %wasmtime_mix5() -> f32, i64, i32, f32 wasmtime_system_v { - sig0 = () -> f32, i64, i32, f32 system_v -block0: - v5 = iconst.i32 1 - v0, v1, v2, v3 = call_indirect sig0, v5() - return v0, v1, v2, v3 -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; subq %rsp, $16, %rsp -; movq %r13, 0(%rsp) -; block0: -; movq %rdi, %r13 -; movl $1, %r9d -; call *%r9 -; movq %r13, %rdi -; movq %rax, 0(%rdi) -; movl %edx, 8(%rdi) -; movss %xmm1, 12(%rdi) -; movq 0(%rsp), %r13 -; addq %rsp, $16, %rsp -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; subq $0x10, %rsp -; movq %r13, (%rsp) -; block1: ; offset 0xc -; movq %rdi, %r13 -; movl $1, %r9d -; callq *%r9 -; movq %r13, %rdi -; movq %rax, (%rdi) -; movl %edx, 8(%rdi) -; movss %xmm1, 0xc(%rdi) -; movq (%rsp), %r13 -; addq $0x10, %rsp -; movq %rbp, %rsp -; popq %rbp -; retq - -function %wasmtime_mix6(f32, i64, i32, f32) -> f32, i64, i32, f32 wasmtime_system_v { - sig0 = (f32, i64, i32, f32) -> f32, i64, i32, f32 system_v -block0(v0: f32, v1: i64, v2: i32, v3: f32): - v4 = iconst.i32 1 - v5, v6, v7, v8 = call_indirect sig0, v4(v0, v1, v2, v3) - return v5, v6, v7, v8 -} - -; VCode: -; pushq %rbp -; movq %rsp, %rbp -; subq %rsp, $16, %rsp -; movq %r12, 0(%rsp) -; block0: -; movq %rdx, %r12 -; movl $1, %eax -; call *%rax -; movq %r12, %r8 -; movq %rax, 0(%r8) -; movl %edx, 8(%r8) -; movss %xmm1, 12(%r8) -; movq 0(%rsp), %r12 -; addq %rsp, $16, %rsp -; movq %rbp, %rsp -; popq %rbp -; ret -; -; Disassembled: -; block0: ; offset 0x0 -; pushq %rbp -; movq %rsp, %rbp -; subq $0x10, %rsp -; movq %r12, (%rsp) -; block1: ; offset 0xc -; movq %rdx, %r12 -; movl $1, %eax -; callq *%rax -; movq %r12, %r8 -; movq %rax, (%r8) -; movl %edx, 8(%r8) -; movss %xmm1, 0xc(%r8) -; movq (%rsp), %r12 -; addq $0x10, %rsp -; movq %rbp, %rsp -; popq %rbp -; retq - function %fastcall_m128i_param(i32, i8x16) system_v { sig0 = (i8x16) windows_fastcall block0(v0: i32, v1: i8x16): diff --git a/cranelift/filetests/filetests/runtests/call.clif b/cranelift/filetests/filetests/runtests/call.clif index 78fa62ad03e8..002e6c347001 100644 --- a/cranelift/filetests/filetests/runtests/call.clif +++ b/cranelift/filetests/filetests/runtests/call.clif @@ -74,14 +74,14 @@ block0(v0: i8): ; Tests calling across different calling conventions -function %callee_wasm_i64(i64) -> i64 wasmtime_system_v { +function %callee_wasm_i64(i64) -> i64 windows_fastcall { block0(v0: i64): v1 = iadd_imm.i64 v0, 10 return v1 } function %call_sysv_i64(i64) -> i64 system_v { - fn0 = %callee_wasm_i64(i64) -> i64 wasmtime_system_v + fn0 = %callee_wasm_i64(i64) -> i64 windows_fastcall block0(v0: i64): v1 = call fn0(v0) diff --git a/crates/cranelift/src/compiler.rs b/crates/cranelift/src/compiler.rs index d9d96dbf194a..027c7a51f9eb 100644 --- a/crates/cranelift/src/compiler.rs +++ b/crates/cranelift/src/compiler.rs @@ -335,12 +335,15 @@ impl wasmtime_environ::Compiler for Compiler { vmctx, ); + let ret = NativeRet::classify(pointer_type, wasm_func_ty); + let wasm_args = ret.native_args(&args); + // Then call into Wasm. - let call = declare_and_call(&mut builder, wasm_call_sig, func_index.as_u32(), &args); + let call = declare_and_call(&mut builder, wasm_call_sig, func_index.as_u32(), wasm_args); // Forward the results along. let results = builder.func.dfg.inst_results(call).to_vec(); - builder.ins().return_(&results); + ret.native_return(&mut builder, block0, &results); builder.finalize(); Ok(Box::new(compiler.finish()?)) @@ -360,10 +363,12 @@ impl wasmtime_environ::Compiler for Compiler { let func = ir::Function::with_name_signature(Default::default(), wasm_call_sig); let (mut builder, block0) = compiler.builder(func); - let args = builder.func.dfg.block_params(block0).to_vec(); + let mut args = builder.func.dfg.block_params(block0).to_vec(); let callee_vmctx = args[0]; let caller_vmctx = args[1]; + let ret = NativeRet::classify(pointer_type, wasm_func_ty); + // We are exiting Wasm, so save our PC and FP. // // Assert that the caller vmctx really is a core Wasm vmctx, since @@ -383,6 +388,19 @@ impl wasmtime_environ::Compiler for Compiler { ); save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &offsets.ptr, limits); + // If the native call signature for this function uses a return pointer + // then allocate the return pointer here on the stack and pass it as the + // last argument. + let slot = match &ret { + NativeRet::Bare => None, + NativeRet::Retptr { size, .. } => Some(builder.func.create_sized_stack_slot( + ir::StackSlotData::new(ir::StackSlotKind::ExplicitSlot, *size), + )), + }; + if let Some(slot) = slot { + args.push(builder.ins().stack_addr(pointer_type, slot, 0)); + } + // Load the actual callee out of the // `VMNativeCallHostFuncContext::host_func`. let ptr_size = isa.pointer_bytes(); @@ -398,8 +416,22 @@ impl wasmtime_environ::Compiler for Compiler { let callee_signature = builder.func.import_signature(native_call_sig); let call = builder.ins().call_indirect(callee_signature, callee, &args); - // Forward the results back to the caller. - let results = builder.func.dfg.inst_results(call).to_vec(); + // Forward the results back to the caller. If a return pointer was in + // use for the native call then load the results from the return pointer + // to pass through as native return values in the wasm abi. + let mut results = builder.func.dfg.inst_results(call).to_vec(); + if let NativeRet::Retptr { offsets, .. } = ret { + let slot = *args.last().unwrap(); + assert_eq!(offsets.len(), wasm_func_ty.returns().len() - 1); + for (ty, offset) in wasm_func_ty.returns()[1..].iter().zip(offsets) { + let ty = crate::value_type(isa, *ty); + results.push( + builder + .ins() + .load(ty, MemFlags::trusted(), slot, offset as i32), + ); + } + } builder.ins().return_(&results); builder.finalize(); @@ -695,20 +727,18 @@ impl Compiler { let mut compiler = self.function_compiler(); let func = ir::Function::with_name_signature(Default::default(), native_call_sig); let (mut builder, block0) = compiler.builder(func); + let args = builder.func.dfg.block_params(block0).to_vec(); + + let ret = NativeRet::classify(pointer_type, ty); + let wasm_args = &ret.native_args(&args)[2..]; let (values_vec_ptr, values_vec_len) = - self.allocate_stack_array_and_spill_args(ty, &mut builder, block0); + self.allocate_stack_array_and_spill_args(ty, &mut builder, wasm_args); let values_vec_len = builder .ins() .iconst(pointer_type, i64::from(values_vec_len)); - let block_params = builder.func.dfg.block_params(block0); - let callee_args = [ - block_params[0], - block_params[1], - values_vec_ptr, - values_vec_len, - ]; + let callee_args = [args[0], args[1], values_vec_ptr, values_vec_len]; let new_sig = builder.import_signature(array_call_sig); let callee_value = builder.ins().iconst(pointer_type, host_fn as i64); @@ -718,7 +748,7 @@ impl Compiler { let results = self.load_values_from_array(ty.returns(), &mut builder, values_vec_ptr, values_vec_len); - builder.ins().return_(&results); + ret.native_return(&mut builder, block0, &results); builder.finalize(); compiler.finish() @@ -760,7 +790,8 @@ impl Compiler { let mut compiler = self.function_compiler(); let func = ir::Function::with_name_signature(Default::default(), wasm_call_sig); let (mut builder, block0) = compiler.builder(func); - let caller_vmctx = builder.func.dfg.block_params(block0)[1]; + let args = builder.func.dfg.block_params(block0).to_vec(); + let caller_vmctx = args[1]; // Assert that we were really given a core Wasm vmctx, since that's // what we are assuming with our offsets below. @@ -780,7 +811,7 @@ impl Compiler { save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr_size, limits); let (values_vec_ptr, values_vec_len) = - self.allocate_stack_array_and_spill_args(ty, &mut builder, block0); + self.allocate_stack_array_and_spill_args(ty, &mut builder, &args[2..]); let values_vec_len = builder .ins() .iconst(pointer_type, i64::from(values_vec_len)); @@ -820,7 +851,7 @@ impl Compiler { &self, ty: &WasmFuncType, builder: &mut FunctionBuilder, - block0: ir::Block, + args: &[ir::Value], ) -> (Value, u32) { let isa = &*self.isa; let pointer_type = isa.pointer_type(); @@ -837,14 +868,11 @@ impl Compiler { )); let values_vec_ptr = builder.ins().stack_addr(pointer_type, slot, 0); - // NB: `2..` because the vmctx and caller vmctx don't go in the array. - let args = builder.func.dfg.block_params(block0)[2..].to_vec(); - { let values_vec_len = builder .ins() .iconst(ir::types::I32, i64::try_from(values_vec_len).unwrap()); - self.store_values_to_array(builder, ty.params(), &args, values_vec_ptr, values_vec_len); + self.store_values_to_array(builder, ty.params(), args, values_vec_ptr, values_vec_len); } (values_vec_ptr, values_vec_len) @@ -1198,3 +1226,72 @@ fn save_last_wasm_exit_fp_and_pc( ptr.vmruntime_limits_last_wasm_exit_pc(), ); } + +enum NativeRet { + Bare, + Retptr { offsets: Vec, size: u32 }, +} + +impl NativeRet { + fn classify(pointer_type: ir::Type, ty: &WasmFuncType) -> NativeRet { + fn align_to(val: u32, align: u32) -> u32 { + (val + (align - 1)) & !(align - 1) + } + + match ty.returns() { + [] | [_] => NativeRet::Bare, + other => { + let mut offset = 0; + let mut offsets = Vec::new(); + let mut max_align = 1; + for ty in other[1..].iter() { + let size = match ty { + WasmType::I32 | WasmType::F32 => 4, + WasmType::I64 | WasmType::F64 => 8, + WasmType::Ref(_) => pointer_type.bytes(), + WasmType::V128 => 16, + }; + offset = align_to(offset, size); + offsets.push(offset); + offset += size; + max_align = max_align.max(size); + } + NativeRet::Retptr { + offsets, + size: align_to(offset, max_align), + } + } + } + } + + fn native_args<'a>(&self, args: &'a [ir::Value]) -> &'a [ir::Value] { + match self { + NativeRet::Bare => args, + NativeRet::Retptr { .. } => &args[..args.len() - 1], + } + } + + fn native_return( + &self, + builder: &mut FunctionBuilder<'_>, + block0: ir::Block, + results: &[ir::Value], + ) { + match self { + NativeRet::Bare => { + builder.ins().return_(&results); + } + NativeRet::Retptr { offsets, .. } => { + let ptr = *builder.func.dfg.block_params(block0).last().unwrap(); + let (first, rest) = results.split_first().unwrap(); + assert_eq!(rest.len(), offsets.len()); + for (arg, offset) in rest.iter().zip(offsets) { + builder + .ins() + .store(MemFlags::trusted(), *arg, ptr, *offset as i32); + } + builder.ins().return_(&[*first]); + } + } + } +} diff --git a/crates/cranelift/src/compiler/component.rs b/crates/cranelift/src/compiler/component.rs index 76c3dfc7e0d3..9f73032601ba 100644 --- a/crates/cranelift/src/compiler/component.rs +++ b/crates/cranelift/src/compiler/component.rs @@ -1,8 +1,9 @@ //! Compilation support for the component model. -use crate::compiler::Compiler; +use crate::compiler::{Compiler, NativeRet}; use anyhow::Result; use cranelift_codegen::ir::{self, InstBuilder, MemFlags}; +use cranelift_codegen::isa::CallConv; use cranelift_frontend::FunctionBuilder; use std::any::Any; use wasmtime_cranelift_shared::ALWAYS_TRAP_CODE; @@ -42,13 +43,24 @@ impl Compiler { }, ); let (mut builder, block0) = compiler.builder(func); + let args = builder.func.dfg.block_params(block0).to_vec(); + let vmctx = args[0]; + + // More handling is necessary here if this changes + assert!(matches!( + NativeRet::classify(pointer_type, wasm_func_ty), + NativeRet::Bare + )); // Start off by spilling all the wasm arguments into a stack slot to be // passed to the host function. let (values_vec_ptr, values_vec_len) = match abi { Abi::Wasm | Abi::Native => { - let (ptr, len) = - self.allocate_stack_array_and_spill_args(wasm_func_ty, &mut builder, block0); + let (ptr, len) = self.allocate_stack_array_and_spill_args( + wasm_func_ty, + &mut builder, + &args[2..], + ); let len = builder.ins().iconst(pointer_type, i64::from(len)); (ptr, len) } @@ -57,7 +69,6 @@ impl Compiler { (params[2], params[3]) } }; - let vmctx = builder.func.dfg.block_params(block0)[0]; // If we are crossing the Wasm-to-native boundary, we need to save the // exit FP and return address for stack walking purposes. However, we @@ -84,7 +95,7 @@ impl Compiler { // function we're calling as well as the list of arguments since the // list is somewhat long. let mut callee_args = Vec::new(); - let mut host_sig = ir::Signature::new(crate::wasmtime_call_conv(isa)); + let mut host_sig = ir::Signature::new(CallConv::triple_default(isa.triple())); let CanonicalOptions { instance, @@ -411,6 +422,14 @@ impl Compiler { let Transcoder { to64, from64, .. } = *transcoder; let mut args = Vec::new(); + let uses_retptr = match transcoder.op { + Transcode::Utf16ToUtf8 + | Transcode::Latin1ToUtf8 + | Transcode::Utf8ToLatin1 + | Transcode::Utf16ToLatin1 => true, + _ => false, + }; + // Most transcoders share roughly the same signature despite doing very // different things internally, so most libcalls are lumped together // here. @@ -441,8 +460,23 @@ impl Compiler { args.push(len_param(builder, 4, to64)); } }; + if uses_retptr { + let slot = builder.func.create_sized_stack_slot(ir::StackSlotData::new( + ir::StackSlotKind::ExplicitSlot, + pointer_type.bytes(), + )); + args.push(builder.ins().stack_addr(pointer_type, slot, 0)); + } let call = builder.ins().call_indirect(sig, transcode_libcall, &args); - let results = builder.func.dfg.inst_results(call).to_vec(); + let mut results = builder.func.dfg.inst_results(call).to_vec(); + if uses_retptr { + results.push(builder.ins().load( + pointer_type, + ir::MemFlags::trusted(), + *args.last().unwrap(), + 0, + )); + } let mut raw_results = Vec::new(); // Helper to cast a host pointer integer type to the destination type. @@ -515,6 +549,7 @@ impl Compiler { mod host { use crate::compiler::Compiler; use cranelift_codegen::ir::{self, AbiParam}; + use cranelift_codegen::isa::CallConv; macro_rules! host_transcode { ( @@ -526,7 +561,7 @@ mod host { $( pub(super) fn $name(compiler: &Compiler, func: &mut ir::Function) -> (ir::SigRef, u32) { let pointer_type = compiler.isa.pointer_type(); - let params = vec![ + let mut params = vec![ $( AbiParam::new(host_transcode!(@ty pointer_type $param)) ),* ]; let mut returns = Vec::new(); @@ -534,7 +569,7 @@ mod host { let sig = func.import_signature(ir::Signature { params, returns, - call_conv: crate::wasmtime_call_conv(&*compiler.isa), + call_conv: CallConv::triple_default(compiler.isa.triple()), }); (sig, offsets::$name) @@ -548,7 +583,7 @@ mod host { (@push_return $ptr:ident $params:ident $returns:ident size) => ($returns.push(AbiParam::new($ptr));); (@push_return $ptr:ident $params:ident $returns:ident size_pair) => ({ - $returns.push(AbiParam::new($ptr)); + $params.push(AbiParam::new($ptr)); $returns.push(AbiParam::new($ptr)); }); } diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 9f681def4d1c..9b182c1a75f1 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -4,7 +4,7 @@ use cranelift_codegen::ir::condcodes::*; use cranelift_codegen::ir::immediates::{Imm64, Offset32, Uimm64}; use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature}; -use cranelift_codegen::isa::{self, TargetFrontendConfig, TargetIsa}; +use cranelift_codegen::isa::{self, CallConv, TargetFrontendConfig, TargetIsa}; use cranelift_entity::{EntityRef, PrimaryMap}; use cranelift_frontend::FunctionBuilder; use cranelift_frontend::Variable; @@ -166,7 +166,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { ir::types::I64 => ir::types::R64, _ => panic!(), }, - crate::wasmtime_call_conv(isa), + CallConv::triple_default(isa.triple()), ); Self { isa, diff --git a/crates/cranelift/src/lib.rs b/crates/cranelift/src/lib.rs index ec3642b3d75f..cc6ae5358128 100644 --- a/crates/cranelift/src/lib.rs +++ b/crates/cranelift/src/lib.rs @@ -7,7 +7,7 @@ use cranelift_codegen::ir; use cranelift_codegen::isa::{CallConv, TargetIsa}; use cranelift_entity::PrimaryMap; use cranelift_wasm::{DefinedFuncIndex, WasmFuncType, WasmType}; -use target_lexicon::{Architecture, CallingConvention}; +use target_lexicon::CallingConvention; use wasmtime_cranelift_shared::CompiledFunctionMetadata; pub use builder::builder; @@ -38,29 +38,6 @@ fn blank_sig(isa: &dyn TargetIsa, call_conv: CallConv) -> ir::Signature { return sig; } -/// Returns the default calling convention for the `isa` provided. -/// -/// Note that this calling convention is used for exported functions. -fn wasmtime_call_conv(isa: &dyn TargetIsa) -> CallConv { - match isa.triple().default_calling_convention() { - Ok(CallingConvention::AppleAarch64) => CallConv::WasmtimeAppleAarch64, - Ok(CallingConvention::SystemV) | Err(()) => CallConv::WasmtimeSystemV, - Ok(CallingConvention::WindowsFastcall) => CallConv::WasmtimeFastcall, - Ok(unimp) => unimplemented!("calling convention: {:?}", unimp), - } -} - -/// Appends the types of the `wasm` function signature into the `sig` signature -/// provided. -/// -/// Typically the `sig` signature will have been created from [`blank_sig`] -/// above. -fn push_types(isa: &dyn TargetIsa, sig: &mut ir::Signature, wasm: &WasmFuncType) { - let cvt = |ty: &WasmType| ir::AbiParam::new(value_type(isa, *ty)); - sig.params.extend(wasm.params().iter().map(&cvt)); - sig.returns.extend(wasm.returns().iter().map(&cvt)); -} - /// Returns the corresponding cranelift type for the provided wasm type. fn value_type(isa: &dyn TargetIsa, ty: WasmType) -> ir::types::Type { match ty { @@ -110,8 +87,15 @@ fn value_type(isa: &dyn TargetIsa, ty: WasmType) -> ir::types::Type { /// where the first result is returned directly and the rest via the return /// pointer. fn native_call_signature(isa: &dyn TargetIsa, wasm: &WasmFuncType) -> ir::Signature { - let mut sig = blank_sig(isa, wasmtime_call_conv(isa)); - push_types(isa, &mut sig, wasm); + let mut sig = blank_sig(isa, CallConv::triple_default(isa.triple())); + let cvt = |ty: &WasmType| ir::AbiParam::new(value_type(isa, *ty)); + sig.params.extend(wasm.params().iter().map(&cvt)); + if let Some(first_ret) = wasm.returns().get(0) { + sig.returns.push(cvt(first_ret)); + } + if wasm.returns().len() > 1 { + sig.params.push(ir::AbiParam::new(isa.pointer_type())); + } sig } @@ -149,17 +133,15 @@ fn wasm_call_signature(isa: &dyn TargetIsa, wasm_func_ty: &WasmFuncType) -> ir:: // Cranelift's ABI implementation generates unwinding directives // about pointer authentication usage, so we can't just use // `CallConv::Fast`. - CallConv::WasmtimeAppleAarch64 - } else if isa.triple().architecture == Architecture::S390x { - // On S390x we need a Wasmtime calling convention to ensure - // we're using little-endian vector lane order. - wasmtime_call_conv(isa) + CallConv::AppleAarch64 } else { CallConv::Fast }; let mut sig = blank_sig(isa, call_conv); - push_types(isa, &mut sig, wasm_func_ty); + let cvt = |ty: &WasmType| ir::AbiParam::new(value_type(isa, *ty)); + sig.params.extend(wasm_func_ty.params().iter().map(&cvt)); + sig.returns.extend(wasm_func_ty.returns().iter().map(&cvt)); sig } diff --git a/winch/codegen/src/isa/mod.rs b/winch/codegen/src/isa/mod.rs index 1943a5c2f6df..c90d4264c271 100644 --- a/winch/codegen/src/isa/mod.rs +++ b/winch/codegen/src/isa/mod.rs @@ -80,10 +80,10 @@ pub(crate) enum LookupError { pub enum CallingConvention { /// See [cranelift_codegen::isa::CallConv::WasmtimeSystemV] WasmtimeSystemV, - /// See [cranelift_codegen::isa::CallConv::WasmtimeFastcall] - WasmtimeFastcall, - /// See [cranelift_codegen::isa::CallConv::WasmtimeAppleAarch64] - WasmtimeAppleAarch64, + /// See [cranelift_codegen::isa::CallConv::WindowsFastcall] + WindowsFastcall, + /// See [cranelift_codegen::isa::CallConv::AppleAarch64] + AppleAarch64, /// The default calling convention for Winch. It largely follows SystemV /// for parameter and result handling. This calling convention is part of /// Winch's default ABI [crate::abi::ABI]. @@ -94,7 +94,7 @@ impl CallingConvention { /// Returns true if the current calling convention is `WasmtimeFastcall`. fn is_fastcall(&self) -> bool { match &self { - CallingConvention::WasmtimeFastcall => true, + CallingConvention::WindowsFastcall => true, _ => false, } } @@ -110,7 +110,7 @@ impl CallingConvention { /// Returns true if the current calling convention is `WasmtimeAppleAarch64`. fn is_apple_aarch64(&self) -> bool { match &self { - CallingConvention::WasmtimeAppleAarch64 => true, + CallingConvention::AppleAarch64 => true, _ => false, } } @@ -162,9 +162,9 @@ pub trait TargetIsa: Send + Sync { /// calling convention. fn wasmtime_call_conv(&self) -> CallingConvention { match self.default_call_conv() { - CallConv::AppleAarch64 => CallingConvention::WasmtimeAppleAarch64, + CallConv::AppleAarch64 => CallingConvention::AppleAarch64, CallConv::SystemV => CallingConvention::WasmtimeSystemV, - CallConv::WindowsFastcall => CallingConvention::WasmtimeFastcall, + CallConv::WindowsFastcall => CallingConvention::WindowsFastcall, cc => unimplemented!("calling convention: {:?}", cc), } } diff --git a/winch/codegen/src/isa/x64/abi.rs b/winch/codegen/src/isa/x64/abi.rs index e06ce59c08b7..a9d115403aee 100644 --- a/winch/codegen/src/isa/x64/abi.rs +++ b/winch/codegen/src/isa/x64/abi.rs @@ -342,7 +342,7 @@ mod tests { [].into(), ); - let sig = X64ABI::sig(&wasm_sig, &CallingConvention::WasmtimeFastcall); + let sig = X64ABI::sig(&wasm_sig, &CallingConvention::WindowsFastcall); let params = sig.params; match_reg_arg(params.get(0).unwrap(), F32, regs::xmm0()); diff --git a/winch/codegen/src/isa/x64/regs.rs b/winch/codegen/src/isa/x64/regs.rs index 23f5228c1f45..405b31e20e9b 100644 --- a/winch/codegen/src/isa/x64/regs.rs +++ b/winch/codegen/src/isa/x64/regs.rs @@ -184,7 +184,7 @@ pub(crate) fn callee_saved(call_conv: &CallingConvention) -> SmallVec<[Reg; 9]> } // TODO: Once float registers are supported, // account for callee-saved float registers. - WasmtimeFastcall => { + WindowsFastcall => { smallvec![rbx(), rdi(), rsi(), r12(), r13(), r14(), r15(),] } _ => unreachable!(), diff --git a/winch/codegen/src/trampoline.rs b/winch/codegen/src/trampoline.rs index a276dbd75127..13beec606ada 100644 --- a/winch/codegen/src/trampoline.rs +++ b/winch/codegen/src/trampoline.rs @@ -21,7 +21,7 @@ use std::mem; use wasmtime_environ::{FuncIndex, PtrSize, WasmFuncType, WasmType}; /// The supported trampoline kinds. -/// See https://github.com/bytecodealliance/rfcs/blob/main/accepted/tail-calls.md#new-trampolines-and-vmcallercheckedanyfunc-changes +/// See /// for more details. pub enum TrampolineKind { /// Calling from native to Wasm, using the array calling convention.