Skip to content

Commit 4329945

Browse files
committed
cg_llvm: sve_cast intrinsic
Abstract over the existing `simd_cast` intrinsic to implement a new `sve_cast` intrinsic - this is better than allowing scalable vectors to be used with all of the generic `simd_*` intrinsics.
1 parent e76eec9 commit 4329945

5 files changed

Lines changed: 203 additions & 89 deletions

File tree

compiler/rustc_codegen_llvm/src/intrinsic.rs

Lines changed: 114 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -582,6 +582,27 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
582582
self.pointercast(val, self.type_ptr())
583583
}
584584

585+
sym::sve_cast => {
586+
let Some((in_cnt, in_elem, in_num_vecs)) =
587+
args[0].layout.ty.scalable_vector_parts(self.cx.tcx)
588+
else {
589+
bug!("input parameter to `sve_cast` was not scalable vector");
590+
};
591+
let out_layout = self.layout_of(fn_args.type_at(1));
592+
let Some((out_cnt, out_elem, out_num_vecs)) =
593+
out_layout.ty.scalable_vector_parts(self.cx.tcx)
594+
else {
595+
bug!("output parameter to `sve_cast` was not scalable vector");
596+
};
597+
assert_eq!(in_cnt, out_cnt);
598+
assert_eq!(in_num_vecs, out_num_vecs);
599+
let out_llty = self.backend_type(out_layout);
600+
match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) {
601+
Some(val) => val,
602+
_ => bug!("could not cast scalable vectors"),
603+
}
604+
}
605+
585606
sym::sve_tuple_create2 => {
586607
assert_matches!(
587608
self.layout_of(fn_args.type_at(0)).backend_repr,
@@ -2752,96 +2773,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
27522773
out_len
27532774
}
27542775
);
2755-
// casting cares about nominal type, not just structural type
2756-
if in_elem == out_elem {
2757-
return Ok(args[0].immediate());
2758-
}
2759-
2760-
#[derive(Copy, Clone)]
2761-
enum Sign {
2762-
Unsigned,
2763-
Signed,
2764-
}
2765-
use Sign::*;
2766-
2767-
enum Style {
2768-
Float,
2769-
Int(Sign),
2770-
Unsupported,
2771-
}
2772-
2773-
let (in_style, in_width) = match in_elem.kind() {
2774-
// vectors of pointer-sized integers should've been
2775-
// disallowed before here, so this unwrap is safe.
2776-
ty::Int(i) => (
2777-
Style::Int(Signed),
2778-
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2779-
),
2780-
ty::Uint(u) => (
2781-
Style::Int(Unsigned),
2782-
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2783-
),
2784-
ty::Float(f) => (Style::Float, f.bit_width()),
2785-
_ => (Style::Unsupported, 0),
2786-
};
2787-
let (out_style, out_width) = match out_elem.kind() {
2788-
ty::Int(i) => (
2789-
Style::Int(Signed),
2790-
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2791-
),
2792-
ty::Uint(u) => (
2793-
Style::Int(Unsigned),
2794-
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2795-
),
2796-
ty::Float(f) => (Style::Float, f.bit_width()),
2797-
_ => (Style::Unsupported, 0),
2798-
};
2799-
2800-
match (in_style, out_style) {
2801-
(Style::Int(sign), Style::Int(_)) => {
2802-
return Ok(match in_width.cmp(&out_width) {
2803-
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2804-
Ordering::Equal => args[0].immediate(),
2805-
Ordering::Less => match sign {
2806-
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2807-
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2808-
},
2809-
});
2810-
}
2811-
(Style::Int(Sign::Signed), Style::Float) => {
2812-
return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2813-
}
2814-
(Style::Int(Sign::Unsigned), Style::Float) => {
2815-
return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2816-
}
2817-
(Style::Float, Style::Int(sign)) => {
2818-
return Ok(match (sign, name == sym::simd_as) {
2819-
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2820-
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2821-
(_, true) => bx.cast_float_to_int(
2822-
matches!(sign, Sign::Signed),
2823-
args[0].immediate(),
2824-
llret_ty,
2825-
),
2826-
});
2827-
}
2828-
(Style::Float, Style::Float) => {
2829-
return Ok(match in_width.cmp(&out_width) {
2830-
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2831-
Ordering::Equal => args[0].immediate(),
2832-
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2833-
});
2834-
}
2835-
_ => { /* Unsupported. Fallthrough. */ }
2776+
match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) {
2777+
Some(val) => return Ok(val),
2778+
None => return_error!(InvalidMonomorphization::UnsupportedCast {
2779+
span,
2780+
name,
2781+
in_ty,
2782+
in_elem,
2783+
ret_ty,
2784+
out_elem
2785+
}),
28362786
}
2837-
return_error!(InvalidMonomorphization::UnsupportedCast {
2838-
span,
2839-
name,
2840-
in_ty,
2841-
in_elem,
2842-
ret_ty,
2843-
out_elem
2844-
});
28452787
}
28462788
macro_rules! arith_binary {
28472789
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
@@ -3015,3 +2957,86 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
30152957

30162958
span_bug!(span, "unknown SIMD intrinsic");
30172959
}
2960+
2961+
/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`.
2962+
fn simd_cast<'ll, 'tcx>(
2963+
bx: &mut Builder<'_, 'll, 'tcx>,
2964+
name: Symbol,
2965+
args: &[OperandRef<'tcx, &'ll Value>],
2966+
llret_ty: &'ll Type,
2967+
in_elem: Ty<'tcx>,
2968+
out_elem: Ty<'tcx>,
2969+
) -> Option<&'ll Value> {
2970+
// Casting cares about nominal type, not just structural type
2971+
if in_elem == out_elem {
2972+
return Some(args[0].immediate());
2973+
}
2974+
2975+
#[derive(Copy, Clone)]
2976+
enum Sign {
2977+
Unsigned,
2978+
Signed,
2979+
}
2980+
use Sign::*;
2981+
2982+
enum Style {
2983+
Float,
2984+
Int(Sign),
2985+
Unsupported,
2986+
}
2987+
2988+
let (in_style, in_width) = match in_elem.kind() {
2989+
// vectors of pointer-sized integers should've been
2990+
// disallowed before here, so this unwrap is safe.
2991+
ty::Int(i) => (
2992+
Style::Int(Signed),
2993+
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2994+
),
2995+
ty::Uint(u) => (
2996+
Style::Int(Unsigned),
2997+
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2998+
),
2999+
ty::Float(f) => (Style::Float, f.bit_width()),
3000+
_ => (Style::Unsupported, 0),
3001+
};
3002+
let (out_style, out_width) = match out_elem.kind() {
3003+
ty::Int(i) => (
3004+
Style::Int(Signed),
3005+
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3006+
),
3007+
ty::Uint(u) => (
3008+
Style::Int(Unsigned),
3009+
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3010+
),
3011+
ty::Float(f) => (Style::Float, f.bit_width()),
3012+
_ => (Style::Unsupported, 0),
3013+
};
3014+
3015+
match (in_style, out_style) {
3016+
(Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) {
3017+
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
3018+
Ordering::Equal => args[0].immediate(),
3019+
Ordering::Less => match sign {
3020+
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
3021+
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
3022+
},
3023+
}),
3024+
(Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)),
3025+
(Style::Int(Sign::Unsigned), Style::Float) => {
3026+
Some(bx.uitofp(args[0].immediate(), llret_ty))
3027+
}
3028+
(Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) {
3029+
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
3030+
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
3031+
(_, true) => {
3032+
bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty)
3033+
}
3034+
}),
3035+
(Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) {
3036+
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
3037+
Ordering::Equal => args[0].immediate(),
3038+
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
3039+
}),
3040+
_ => None,
3041+
}
3042+
}

compiler/rustc_hir_analysis/src/check/intrinsic.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -785,6 +785,7 @@ pub(crate) fn check_intrinsic_type(
785785
sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)),
786786
sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)),
787787

788+
sym::sve_cast => (2, 0, vec![param(0)], param(1)),
788789
sym::sve_tuple_create2 => (2, 0, vec![param(0), param(0)], param(1)),
789790
sym::sve_tuple_create3 => (2, 0, vec![param(0), param(0), param(0)], param(1)),
790791
sym::sve_tuple_create4 => (2, 0, vec![param(0), param(0), param(0), param(0)], param(1)),

compiler/rustc_span/src/symbol.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1984,6 +1984,7 @@ symbols! {
19841984
suggestion,
19851985
super_let,
19861986
supertrait_item_shadowing,
1987+
sve_cast,
19871988
sve_tuple_create2,
19881989
sve_tuple_create3,
19891990
sve_tuple_create4,

library/core/src/intrinsics/simd/scalable.rs

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,29 @@
22
//!
33
//! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type.
44
5+
/// Numerically casts a vector, elementwise.
6+
///
7+
/// `T` and `U` must be vectors of integers or floats, and must have the same length.
8+
///
9+
/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
10+
/// When casting integers to floats, the result is rounded.
11+
/// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
12+
///
13+
/// # Safety
14+
/// Casting from integer types is always safe.
15+
/// Casting between two float types is also always safe.
16+
///
17+
/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
18+
/// Specifically, each element must:
19+
/// * Not be `NaN`
20+
/// * Not be infinite
21+
/// * Be representable in the return type, after truncating off its fractional part
22+
#[cfg(target_arch = "aarch64")]
23+
#[rustc_intrinsic]
24+
#[rustc_nounwind]
25+
#[target_feature(enable = "sve")]
26+
pub unsafe fn sve_cast<T, U>(x: T) -> U;
27+
528
/// Create a tuple of two vectors.
629
///
730
/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
//@ check-pass
2+
#![crate_type = "lib"]
3+
#![allow(incomplete_features, internal_features, improper_ctypes)]
4+
#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)]
5+
6+
use std::intrinsics::simd::scalable::sve_cast;
7+
8+
#[derive(Copy, Clone)]
9+
#[rustc_scalable_vector(16)]
10+
#[allow(non_camel_case_types)]
11+
pub struct svbool_t(bool);
12+
13+
#[derive(Copy, Clone)]
14+
#[rustc_scalable_vector(2)]
15+
#[allow(non_camel_case_types)]
16+
pub struct svbool2_t(bool);
17+
18+
#[derive(Copy, Clone)]
19+
#[rustc_scalable_vector(2)]
20+
#[allow(non_camel_case_types)]
21+
pub struct svint64_t(i64);
22+
23+
#[derive(Copy, Clone)]
24+
#[rustc_scalable_vector(2)]
25+
#[allow(non_camel_case_types)]
26+
pub struct nxv2i16(i16);
27+
28+
pub trait SveInto<T>: Sized {
29+
unsafe fn sve_into(self) -> T;
30+
}
31+
32+
impl SveInto<svbool2_t> for svbool_t {
33+
#[target_feature(enable = "sve")]
34+
unsafe fn sve_into(self) -> svbool2_t {
35+
unsafe extern "C" {
36+
#[cfg_attr(
37+
target_arch = "aarch64",
38+
link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv2i1")
39+
)]
40+
fn convert_from_svbool(b: svbool_t) -> svbool2_t;
41+
}
42+
unsafe { convert_from_svbool(self) }
43+
}
44+
}
45+
46+
#[target_feature(enable = "sve")]
47+
pub unsafe fn svld1sh_gather_s64offset_s64(
48+
pg: svbool_t,
49+
base: *const i16,
50+
offsets: svint64_t,
51+
) -> svint64_t {
52+
unsafe extern "unadjusted" {
53+
#[cfg_attr(
54+
target_arch = "aarch64",
55+
link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
56+
)]
57+
fn _svld1sh_gather_s64offset_s64(
58+
pg: svbool2_t,
59+
base: *const i16,
60+
offsets: svint64_t,
61+
) -> nxv2i16;
62+
}
63+
sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
64+
}

0 commit comments

Comments
 (0)