Skip to content

Commit f8b4bc8

Browse files
committed
cg_llvm: sve_cast intrinsic
Abstract over the existing `simd_cast` intrinsic to implement a new `sve_cast` intrinsic - this is better than allowing scalable vectors to be used with all of the generic `simd_*` intrinsics.
1 parent 40b31e2 commit f8b4bc8

5 files changed

Lines changed: 203 additions & 89 deletions

File tree

compiler/rustc_codegen_llvm/src/intrinsic.rs

Lines changed: 114 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -582,6 +582,27 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
582582
self.pointercast(val, self.type_ptr())
583583
}
584584

585+
sym::sve_cast => {
586+
let Some((in_cnt, in_elem, in_num_vecs)) =
587+
args[0].layout.ty.scalable_vector_parts(self.cx.tcx)
588+
else {
589+
bug!("input parameter to `sve_cast` was not scalable vector");
590+
};
591+
let out_layout = self.layout_of(fn_args.type_at(1));
592+
let Some((out_cnt, out_elem, out_num_vecs)) =
593+
out_layout.ty.scalable_vector_parts(self.cx.tcx)
594+
else {
595+
bug!("output parameter to `sve_cast` was not scalable vector");
596+
};
597+
assert_eq!(in_cnt, out_cnt);
598+
assert_eq!(in_num_vecs, out_num_vecs);
599+
let out_llty = self.backend_type(out_layout);
600+
match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) {
601+
Some(val) => val,
602+
_ => bug!("could not cast scalable vectors"),
603+
}
604+
}
605+
585606
sym::sve_tuple_create2 => {
586607
assert_matches!(
587608
self.layout_of(fn_args.type_at(0)).backend_repr,
@@ -2769,96 +2790,17 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
27692790
out_len
27702791
}
27712792
);
2772-
// casting cares about nominal type, not just structural type
2773-
if in_elem == out_elem {
2774-
return Ok(args[0].immediate());
2775-
}
2776-
2777-
#[derive(Copy, Clone)]
2778-
enum Sign {
2779-
Unsigned,
2780-
Signed,
2781-
}
2782-
use Sign::*;
2783-
2784-
enum Style {
2785-
Float,
2786-
Int(Sign),
2787-
Unsupported,
2788-
}
2789-
2790-
let (in_style, in_width) = match in_elem.kind() {
2791-
// vectors of pointer-sized integers should've been
2792-
// disallowed before here, so this unwrap is safe.
2793-
ty::Int(i) => (
2794-
Style::Int(Signed),
2795-
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2796-
),
2797-
ty::Uint(u) => (
2798-
Style::Int(Unsigned),
2799-
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2800-
),
2801-
ty::Float(f) => (Style::Float, f.bit_width()),
2802-
_ => (Style::Unsupported, 0),
2803-
};
2804-
let (out_style, out_width) = match out_elem.kind() {
2805-
ty::Int(i) => (
2806-
Style::Int(Signed),
2807-
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2808-
),
2809-
ty::Uint(u) => (
2810-
Style::Int(Unsigned),
2811-
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
2812-
),
2813-
ty::Float(f) => (Style::Float, f.bit_width()),
2814-
_ => (Style::Unsupported, 0),
2815-
};
2816-
2817-
match (in_style, out_style) {
2818-
(Style::Int(sign), Style::Int(_)) => {
2819-
return Ok(match in_width.cmp(&out_width) {
2820-
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
2821-
Ordering::Equal => args[0].immediate(),
2822-
Ordering::Less => match sign {
2823-
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
2824-
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
2825-
},
2826-
});
2827-
}
2828-
(Style::Int(Sign::Signed), Style::Float) => {
2829-
return Ok(bx.sitofp(args[0].immediate(), llret_ty));
2830-
}
2831-
(Style::Int(Sign::Unsigned), Style::Float) => {
2832-
return Ok(bx.uitofp(args[0].immediate(), llret_ty));
2833-
}
2834-
(Style::Float, Style::Int(sign)) => {
2835-
return Ok(match (sign, name == sym::simd_as) {
2836-
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
2837-
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
2838-
(_, true) => bx.cast_float_to_int(
2839-
matches!(sign, Sign::Signed),
2840-
args[0].immediate(),
2841-
llret_ty,
2842-
),
2843-
});
2844-
}
2845-
(Style::Float, Style::Float) => {
2846-
return Ok(match in_width.cmp(&out_width) {
2847-
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
2848-
Ordering::Equal => args[0].immediate(),
2849-
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
2850-
});
2851-
}
2852-
_ => { /* Unsupported. Fallthrough. */ }
2793+
match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) {
2794+
Some(val) => return Ok(val),
2795+
None => return_error!(InvalidMonomorphization::UnsupportedCast {
2796+
span,
2797+
name,
2798+
in_ty,
2799+
in_elem,
2800+
ret_ty,
2801+
out_elem
2802+
}),
28532803
}
2854-
return_error!(InvalidMonomorphization::UnsupportedCast {
2855-
span,
2856-
name,
2857-
in_ty,
2858-
in_elem,
2859-
ret_ty,
2860-
out_elem
2861-
});
28622804
}
28632805
macro_rules! arith_binary {
28642806
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
@@ -3032,3 +2974,86 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
30322974

30332975
span_bug!(span, "unknown SIMD intrinsic");
30342976
}
2977+
2978+
/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`.
2979+
fn simd_cast<'ll, 'tcx>(
2980+
bx: &mut Builder<'_, 'll, 'tcx>,
2981+
name: Symbol,
2982+
args: &[OperandRef<'tcx, &'ll Value>],
2983+
llret_ty: &'ll Type,
2984+
in_elem: Ty<'tcx>,
2985+
out_elem: Ty<'tcx>,
2986+
) -> Option<&'ll Value> {
2987+
// Casting cares about nominal type, not just structural type
2988+
if in_elem == out_elem {
2989+
return Some(args[0].immediate());
2990+
}
2991+
2992+
#[derive(Copy, Clone)]
2993+
enum Sign {
2994+
Unsigned,
2995+
Signed,
2996+
}
2997+
use Sign::*;
2998+
2999+
enum Style {
3000+
Float,
3001+
Int(Sign),
3002+
Unsupported,
3003+
}
3004+
3005+
let (in_style, in_width) = match in_elem.kind() {
3006+
// vectors of pointer-sized integers should've been
3007+
// disallowed before here, so this unwrap is safe.
3008+
ty::Int(i) => (
3009+
Style::Int(Signed),
3010+
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3011+
),
3012+
ty::Uint(u) => (
3013+
Style::Int(Unsigned),
3014+
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3015+
),
3016+
ty::Float(f) => (Style::Float, f.bit_width()),
3017+
_ => (Style::Unsupported, 0),
3018+
};
3019+
let (out_style, out_width) = match out_elem.kind() {
3020+
ty::Int(i) => (
3021+
Style::Int(Signed),
3022+
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3023+
),
3024+
ty::Uint(u) => (
3025+
Style::Int(Unsigned),
3026+
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3027+
),
3028+
ty::Float(f) => (Style::Float, f.bit_width()),
3029+
_ => (Style::Unsupported, 0),
3030+
};
3031+
3032+
match (in_style, out_style) {
3033+
(Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) {
3034+
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
3035+
Ordering::Equal => args[0].immediate(),
3036+
Ordering::Less => match sign {
3037+
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
3038+
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
3039+
},
3040+
}),
3041+
(Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)),
3042+
(Style::Int(Sign::Unsigned), Style::Float) => {
3043+
Some(bx.uitofp(args[0].immediate(), llret_ty))
3044+
}
3045+
(Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) {
3046+
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
3047+
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
3048+
(_, true) => {
3049+
bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty)
3050+
}
3051+
}),
3052+
(Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) {
3053+
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
3054+
Ordering::Equal => args[0].immediate(),
3055+
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
3056+
}),
3057+
_ => None,
3058+
}
3059+
}

compiler/rustc_hir_analysis/src/check/intrinsic.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -785,6 +785,7 @@ pub(crate) fn check_intrinsic_type(
785785
sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)),
786786
sym::simd_shuffle_const_generic => (2, 1, vec![param(0), param(0)], param(1)),
787787

788+
sym::sve_cast => (2, 0, vec![param(0)], param(1)),
788789
sym::sve_tuple_create2 => (2, 0, vec![param(0), param(0)], param(1)),
789790
sym::sve_tuple_create3 => (2, 0, vec![param(0), param(0), param(0)], param(1)),
790791
sym::sve_tuple_create4 => (2, 0, vec![param(0), param(0), param(0), param(0)], param(1)),

compiler/rustc_span/src/symbol.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1984,6 +1984,7 @@ symbols! {
19841984
suggestion,
19851985
super_let,
19861986
supertrait_item_shadowing,
1987+
sve_cast,
19871988
sve_tuple_create2,
19881989
sve_tuple_create3,
19891990
sve_tuple_create4,

library/core/src/intrinsics/simd/scalable.rs

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,29 @@
22
//!
33
//! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type.
44
5+
/// Numerically casts a vector, elementwise.
6+
///
7+
/// `T` and `U` must be vectors of integers or floats, and must have the same length.
8+
///
9+
/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
10+
/// When casting integers to floats, the result is rounded.
11+
/// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
12+
///
13+
/// # Safety
14+
/// Casting from integer types is always safe.
15+
/// Casting between two float types is also always safe.
16+
///
17+
/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
18+
/// Specifically, each element must:
19+
/// * Not be `NaN`
20+
/// * Not be infinite
21+
/// * Be representable in the return type, after truncating off its fractional part
22+
#[cfg(target_arch = "aarch64")]
23+
#[rustc_intrinsic]
24+
#[rustc_nounwind]
25+
#[target_feature(enable = "sve")]
26+
pub unsafe fn sve_cast<T, U>(x: T) -> U;
27+
528
/// Create a tuple of two vectors.
629
///
730
/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
//@ check-pass
2+
#![crate_type = "lib"]
3+
#![allow(incomplete_features, internal_features, improper_ctypes)]
4+
#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)]
5+
6+
use std::intrinsics::simd::scalable::sve_cast;
7+
8+
#[derive(Copy, Clone)]
9+
#[rustc_scalable_vector(16)]
10+
#[allow(non_camel_case_types)]
11+
pub struct svbool_t(bool);
12+
13+
#[derive(Copy, Clone)]
14+
#[rustc_scalable_vector(2)]
15+
#[allow(non_camel_case_types)]
16+
pub struct svbool2_t(bool);
17+
18+
#[derive(Copy, Clone)]
19+
#[rustc_scalable_vector(2)]
20+
#[allow(non_camel_case_types)]
21+
pub struct svint64_t(i64);
22+
23+
#[derive(Copy, Clone)]
24+
#[rustc_scalable_vector(2)]
25+
#[allow(non_camel_case_types)]
26+
pub struct nxv2i16(i16);
27+
28+
pub trait SveInto<T>: Sized {
29+
unsafe fn sve_into(self) -> T;
30+
}
31+
32+
impl SveInto<svbool2_t> for svbool_t {
33+
#[target_feature(enable = "sve")]
34+
unsafe fn sve_into(self) -> svbool2_t {
35+
unsafe extern "C" {
36+
#[cfg_attr(
37+
target_arch = "aarch64",
38+
link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv2i1")
39+
)]
40+
fn convert_from_svbool(b: svbool_t) -> svbool2_t;
41+
}
42+
unsafe { convert_from_svbool(self) }
43+
}
44+
}
45+
46+
#[target_feature(enable = "sve")]
47+
pub unsafe fn svld1sh_gather_s64offset_s64(
48+
pg: svbool_t,
49+
base: *const i16,
50+
offsets: svint64_t,
51+
) -> svint64_t {
52+
unsafe extern "unadjusted" {
53+
#[cfg_attr(
54+
target_arch = "aarch64",
55+
link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
56+
)]
57+
fn _svld1sh_gather_s64offset_s64(
58+
pg: svbool2_t,
59+
base: *const i16,
60+
offsets: svint64_t,
61+
) -> nxv2i16;
62+
}
63+
sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
64+
}

0 commit comments

Comments
 (0)