Skip to content

Commit 003827f

Browse files
committed
cg_llvm: sve_cast intrinsic
Abstract over the existing `simd_cast` intrinsic to implement a new `sve_cast` intrinsic - this is better than allowing scalable vectors to be used with all of the generic `simd_*` intrinsics.
1 parent b1f5b46 commit 003827f

6 files changed

Lines changed: 205 additions & 90 deletions

File tree

compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use std::fmt::{self, Write};
33
use std::hash::{Hash, Hasher};
44
use std::path::PathBuf;
55
use std::sync::Arc;
6-
use std::{iter, ptr};
6+
use std::{assert_matches, iter, ptr};
77

88
use libc::{c_longlong, c_uint};
99
use rustc_abi::{Align, Layout, NumScalableVectors, Size};

compiler/rustc_codegen_llvm/src/intrinsic.rs

Lines changed: 114 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -597,6 +597,27 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
597597
self.pointercast(val, self.type_ptr())
598598
}
599599

600+
sym::sve_cast => {
601+
let Some((in_cnt, in_elem, in_num_vecs)) =
602+
args[0].layout.ty.scalable_vector_parts(self.cx.tcx)
603+
else {
604+
bug!("input parameter to `sve_cast` was not scalable vector");
605+
};
606+
let out_layout = self.layout_of(fn_args.type_at(1));
607+
let Some((out_cnt, out_elem, out_num_vecs)) =
608+
out_layout.ty.scalable_vector_parts(self.cx.tcx)
609+
else {
610+
bug!("output parameter to `sve_cast` was not scalable vector");
611+
};
612+
assert_eq!(in_cnt, out_cnt);
613+
assert_eq!(in_num_vecs, out_num_vecs);
614+
let out_llty = self.backend_type(out_layout);
615+
match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) {
616+
Some(val) => val,
617+
_ => bug!("could not cast scalable vectors"),
618+
}
619+
}
620+
600621
sym::sve_tuple_create2 => {
601622
assert_matches!(
602623
self.layout_of(fn_args.type_at(0)).backend_repr,
@@ -2763,96 +2784,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
27632784
out_len
27642785
}
27652786
);
2766-
// casting cares about nominal type, not just structural type
2767-
if in_elem == out_elem {
2768-
return Ok(args[0].immediate());
2769-
}
2770-
2771-
#[derive(Copy, Clone)]
2772-
enum Sign {
2773-
Unsigned,
2774-
Signed,
2775-
}
2776-
use Sign::*;
2777-
2778-
enum Style {
2779-
Float,
2780-
Int(Sign),
2781-
Unsupported,
2782-
}
2783-
2784-
let (in_style, in_width) = match in_elem.kind() {
2785-
// vectors of pointer-sized integers should've been
2786-
// disallowed before here, so this unwrap is safe.
2787-
ty::Int(i) => (
2788-
Style::Int(Signed),
2789-
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2790-
),
2791-
ty::Uint(u) => (
2792-
Style::Int(Unsigned),
2793-
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2794-
),
2795-
ty::Float(f) => (Style::Float, f.bit_width()),
2796-
_ => (Style::Unsupported, 0),
2797-
};
2798-
let (out_style, out_width) = match out_elem.kind() {
2799-
ty::Int(i) => (
2800-
Style::Int(Signed),
2801-
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2802-
),
2803-
ty::Uint(u) => (
2804-
Style::Int(Unsigned),
2805-
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2806-
),
2807-
ty::Float(f) => (Style::Float, f.bit_width()),
2808-
_ => (Style::Unsupported, 0),
2809-
};
2810-
2811-
match (in_style, out_style) {
2812-
(Style::Int(sign), Style::Int(_)) => {
2813-
return Ok(match in_width.cmp(&out_width) {
2814-
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2815-
Ordering::Equal => args[0].immediate(),
2816-
Ordering::Less => match sign {
2817-
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2818-
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2819-
},
2820-
});
2821-
}
2822-
(Style::Int(Sign::Signed), Style::Float) => {
2823-
return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2824-
}
2825-
(Style::Int(Sign::Unsigned), Style::Float) => {
2826-
return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2827-
}
2828-
(Style::Float, Style::Int(sign)) => {
2829-
return Ok(match (sign, name == sym::simd_as) {
2830-
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2831-
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2832-
(_, true) => bx.cast_float_to_int(
2833-
matches!(sign, Sign::Signed),
2834-
args[0].immediate(),
2835-
llret_ty,
2836-
),
2837-
});
2838-
}
2839-
(Style::Float, Style::Float) => {
2840-
return Ok(match in_width.cmp(&out_width) {
2841-
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2842-
Ordering::Equal => args[0].immediate(),
2843-
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2844-
});
2845-
}
2846-
_ => { /* Unsupported. Fallthrough. */ }
2787+
match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) {
2788+
Some(val) => return Ok(val),
2789+
None => return_error!(InvalidMonomorphization::UnsupportedCast {
2790+
span,
2791+
name,
2792+
in_ty,
2793+
in_elem,
2794+
ret_ty,
2795+
out_elem
2796+
}),
28472797
}
2848-
return_error!(InvalidMonomorphization::UnsupportedCast {
2849-
span,
2850-
name,
2851-
in_ty,
2852-
in_elem,
2853-
ret_ty,
2854-
out_elem
2855-
});
28562798
}
28572799
macro_rules! arith_binary {
28582800
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
@@ -3026,3 +2968,86 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
30262968

30272969
span_bug!(span, "unknown SIMD intrinsic");
30282970
}
2971+
2972+
/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`.
2973+
fn simd_cast<'ll, 'tcx>(
2974+
bx: &mut Builder<'_, 'll, 'tcx>,
2975+
name: Symbol,
2976+
args: &[OperandRef<'tcx, &'ll Value>],
2977+
llret_ty: &'ll Type,
2978+
in_elem: Ty<'tcx>,
2979+
out_elem: Ty<'tcx>,
2980+
) -> Option<&'ll Value> {
2981+
// Casting cares about nominal type, not just structural type
2982+
if in_elem == out_elem {
2983+
return Some(args[0].immediate());
2984+
}
2985+
2986+
#[derive(Copy, Clone)]
2987+
enum Sign {
2988+
Unsigned,
2989+
Signed,
2990+
}
2991+
use Sign::*;
2992+
2993+
enum Style {
2994+
Float,
2995+
Int(Sign),
2996+
Unsupported,
2997+
}
2998+
2999+
let (in_style, in_width) = match in_elem.kind() {
3000+
// vectors of pointer-sized integers should've been
3001+
// disallowed before here, so this unwrap is safe.
3002+
ty::Int(i) => (
3003+
Style::Int(Signed),
3004+
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3005+
),
3006+
ty::Uint(u) => (
3007+
Style::Int(Unsigned),
3008+
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3009+
),
3010+
ty::Float(f) => (Style::Float, f.bit_width()),
3011+
_ => (Style::Unsupported, 0),
3012+
};
3013+
let (out_style, out_width) = match out_elem.kind() {
3014+
ty::Int(i) => (
3015+
Style::Int(Signed),
3016+
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3017+
),
3018+
ty::Uint(u) => (
3019+
Style::Int(Unsigned),
3020+
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3021+
),
3022+
ty::Float(f) => (Style::Float, f.bit_width()),
3023+
_ => (Style::Unsupported, 0),
3024+
};
3025+
3026+
match (in_style, out_style) {
3027+
(Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) {
3028+
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
3029+
Ordering::Equal => args[0].immediate(),
3030+
Ordering::Less => match sign {
3031+
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
3032+
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
3033+
},
3034+
}),
3035+
(Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)),
3036+
(Style::Int(Sign::Unsigned), Style::Float) => {
3037+
Some(bx.uitofp(args[0].immediate(), llret_ty))
3038+
}
3039+
(Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) {
3040+
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
3041+
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
3042+
(_, true) => {
3043+
bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty)
3044+
}
3045+
}),
3046+
(Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) {
3047+
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
3048+
Ordering::Equal => args[0].immediate(),
3049+
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
3050+
}),
3051+
_ => None,
3052+
}
3053+
}

compiler/rustc_hir_analysis/src/check/intrinsic.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -789,6 +789,7 @@ pub(crate) fn check_intrinsic_type(
789789
sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)),
790790
sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)),
791791

792+
sym::sve_cast => (2, 0, vec![param(0)], param(1)),
792793
sym::sve_tuple_create2 => (2, 0, vec![param(0), param(0)], param(1)),
793794
sym::sve_tuple_create3 => (2, 0, vec![param(0), param(0), param(0)], param(1)),
794795
sym::sve_tuple_create4 => (2, 0, vec![param(0), param(0), param(0), param(0)], param(1)),

compiler/rustc_span/src/symbol.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1974,6 +1974,7 @@ symbols! {
19741974
suggestion,
19751975
super_let,
19761976
supertrait_item_shadowing,
1977+
sve_cast,
19771978
sve_tuple_create2,
19781979
sve_tuple_create3,
19791980
sve_tuple_create4,

library/core/src/intrinsics/simd/scalable.rs

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,29 @@
22
//!
33
//! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type.
44
5+
/// Numerically casts a vector, elementwise.
6+
///
7+
/// `T` and `U` must be vectors of integers or floats, and must have the same length.
8+
///
9+
/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
10+
/// When casting integers to floats, the result is rounded.
11+
/// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
12+
///
13+
/// # Safety
14+
/// Casting from integer types is always safe.
15+
/// Casting between two float types is also always safe.
16+
///
17+
/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
18+
/// Specifically, each element must:
19+
/// * Not be `NaN`
20+
/// * Not be infinite
21+
/// * Be representable in the return type, after truncating off its fractional part
22+
#[cfg(target_arch = "aarch64")]
23+
#[rustc_intrinsic]
24+
#[rustc_nounwind]
25+
#[target_feature(enable = "sve")]
26+
pub unsafe fn sve_cast<T, U>(x: T) -> U;
27+
528
/// Create a tuple of two vectors.
629
///
730
/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
//@ check-pass
2+
//@ only-aarch64
3+
#![crate_type = "lib"]
4+
#![allow(incomplete_features, internal_features, improper_ctypes)]
5+
#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)]
6+
7+
use std::intrinsics::simd::scalable::sve_cast;
8+
9+
#[derive(Copy, Clone)]
10+
#[rustc_scalable_vector(16)]
11+
#[allow(non_camel_case_types)]
12+
pub struct svbool_t(bool);
13+
14+
#[derive(Copy, Clone)]
15+
#[rustc_scalable_vector(2)]
16+
#[allow(non_camel_case_types)]
17+
pub struct svbool2_t(bool);
18+
19+
#[derive(Copy, Clone)]
20+
#[rustc_scalable_vector(2)]
21+
#[allow(non_camel_case_types)]
22+
pub struct svint64_t(i64);
23+
24+
#[derive(Copy, Clone)]
25+
#[rustc_scalable_vector(2)]
26+
#[allow(non_camel_case_types)]
27+
pub struct nxv2i16(i16);
28+
29+
pub trait SveInto<T>: Sized {
30+
unsafe fn sve_into(self) -> T;
31+
}
32+
33+
impl SveInto<svbool2_t> for svbool_t {
34+
#[target_feature(enable = "sve")]
35+
unsafe fn sve_into(self) -> svbool2_t {
36+
unsafe extern "C" {
37+
#[cfg_attr(
38+
target_arch = "aarch64",
39+
link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv2i1")
40+
)]
41+
fn convert_from_svbool(b: svbool_t) -> svbool2_t;
42+
}
43+
unsafe { convert_from_svbool(self) }
44+
}
45+
}
46+
47+
#[target_feature(enable = "sve")]
48+
pub unsafe fn svld1sh_gather_s64offset_s64(
49+
pg: svbool_t,
50+
base: *const i16,
51+
offsets: svint64_t,
52+
) -> svint64_t {
53+
unsafe extern "unadjusted" {
54+
#[cfg_attr(
55+
target_arch = "aarch64",
56+
link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
57+
)]
58+
fn _svld1sh_gather_s64offset_s64(
59+
pg: svbool2_t,
60+
base: *const i16,
61+
offsets: svint64_t,
62+
) -> nxv2i16;
63+
}
64+
sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
65+
}

0 commit comments

Comments
 (0)