diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index 227d227f4ebb2..bba024635a807 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -2317,182 +2317,182 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - vsli_n_s8_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_imm3!(N); + vsli_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - vsliq_n_s8_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_imm3!(N); + vsliq_n_s8_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - vsli_n_s16_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_imm4!(N); + vsli_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - vsliq_n_s16_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_imm4!(N); + vsliq_n_s16_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vsli_n_s32_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + vsli_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vsliq_n_s32_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + vsliq_n_s32_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vsli_n_s64_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N: i32 where N >= 0 && N <= 63); + vsli_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vsliq_n_s64_(a, b, n) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N: i32 where N >= 0 && N <= 63); + vsliq_n_s64_(a, b, N) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsli_n_s8_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_imm3!(N); + transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsliq_n_s8_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_imm3!(N); + transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsli_n_s16_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_imm4!(N); + transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsliq_n_s16_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_imm4!(N); + transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - transmute(vsli_n_s32_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + transmute(vsli_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - transmute(vsliq_n_s32_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + transmute(vsliq_n_s32_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - transmute(vsli_n_s64_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N: i32 where N >= 0 && N <= 63); + transmute(vsli_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - transmute(vsliq_n_s64_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N: i32 where N >= 0 && N <= 63); + transmute(vsliq_n_s64_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsli_n_s8_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_imm3!(N); + transmute(vsli_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - transmute(vsliq_n_s8_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_imm3!(N); + transmute(vsliq_n_s8_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsli_n_s16_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_imm4!(N); + transmute(vsli_n_s16_(transmute(a), transmute(b), N)) } /// Shift Left and Insert (immediate) #[inline] #[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(sli, n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - transmute(vsliq_n_s16_(transmute(a), transmute(b), n)) +#[cfg_attr(test, assert_instr(sli, N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_imm4!(N); + transmute(vsliq_n_s16_(transmute(a), transmute(b), N)) } /// Shift Right and Insert (immediate) diff --git a/crates/core_arch/src/arm/neon/mod.rs b/crates/core_arch/src/arm/neon/mod.rs index b4ce393cdc5df..9fd178f359504 100644 --- a/crates/core_arch/src/arm/neon/mod.rs +++ b/crates/core_arch/src/arm/neon/mod.rs @@ -3585,22 +3585,22 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { + static_assert_imm3!(N); + let n = N as i8; vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { + static_assert_imm3!(N); + let n = N as i8; vshiftins_v16i8( a, b, @@ -3611,73 +3611,73 @@ pub unsafe fn vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { + static_assert_imm4!(N); + let n = N as i16; vshiftins_v4i16(a, b, int16x4_t(n, n, n, n)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { + static_assert_imm4!(N); + let n = N as i16; vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vshiftins_v2i32(a, b, int32x2_t(n, n)) +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + vshiftins_v2i32(a, b, int32x2_t(N, N)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - vshiftins_v4i32(a, b, int32x4_t(n, n, n, n)) +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + vshiftins_v4i32(a, b, int32x4_t(N, N, N, N)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vshiftins_v1i64(a, b, int64x1_t(n as i64)) +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { + static_assert!(N : i32 where 0 <= N && N <= 63); + vshiftins_v1i64(a, b, int64x1_t(N as i64)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); - vshiftins_v2i64(a, b, int64x2_t(n as i64, n as i64)) +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { + static_assert!(N : i32 where 0 <= N && N <= 63); + vshiftins_v2i64(a, b, int64x2_t(N as i64, N as i64)) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3688,11 +3688,11 @@ pub unsafe fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t, n: i32) -> uint8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3703,11 +3703,11 @@ pub unsafe fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t, n: i32) -> uint8x16_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -3718,11 +3718,11 @@ pub unsafe fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t, n: i32) -> uint16x4_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), @@ -3733,63 +3733,63 @@ pub unsafe fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t, n: i32) -> uint16x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t, n: i32) -> uint32x2_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); - transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(n, n))) +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { + static_assert!(N: i32 where N >= 0 && N <= 31); + transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(N, N))) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.32", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t, n: i32) -> uint32x4_t { - assert!(0 <= n && n <= 31, "must have 0 ≤ n ≤ 31, but n = {}", n); +#[cfg_attr(test, assert_instr("vsli.32", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { + static_assert!(N: i32 where N >= 0 && N <= 31); transmute(vshiftins_v4i32( transmute(a), transmute(b), - int32x4_t(n, n, n, n), + int32x4_t(N, N, N, N), )) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t, n: i32) -> uint64x1_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { + static_assert!(N : i32 where 0 <= N && N <= 63); transmute(vshiftins_v1i64( transmute(a), transmute(b), - int64x1_t(n as i64), + int64x1_t(N as i64), )) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.64", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t, n: i32) -> uint64x2_t { - assert!(0 <= n && n <= 63, "must have 0 ≤ n ≤ 63, but n = {}", n); +#[cfg_attr(test, assert_instr("vsli.64", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { + static_assert!(N : i32 where 0 <= N && N <= 63); transmute(vshiftins_v2i64( transmute(a), transmute(b), - int64x2_t(n as i64, n as i64), + int64x2_t(N as i64, N as i64), )) } /// Shift Left and Insert (immediate) #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v8i8( transmute(a), transmute(b), @@ -3800,11 +3800,11 @@ pub unsafe fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t, n: i32) -> poly8x8_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.8", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { - assert!(0 <= n && n <= 7, "must have 0 ≤ n ≤ 7, but n = {}", n); - let n = n as i8; +#[cfg_attr(test, assert_instr("vsli.8", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { + static_assert_imm3!(N); + let n = N as i8; transmute(vshiftins_v16i8( transmute(a), transmute(b), @@ -3815,11 +3815,11 @@ pub unsafe fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t, n: i32) -> poly8x16_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v4i16( transmute(a), transmute(b), @@ -3830,11 +3830,11 @@ pub unsafe fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t, n: i32) -> poly16x4_t { #[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] -#[cfg_attr(test, assert_instr("vsli.16", n = 1))] -#[rustc_args_required_const(2)] -pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t, n: i32) -> poly16x8_t { - assert!(0 <= n && n <= 15, "must have 0 ≤ n ≤ 15, but n = {}", n); - let n = n as i16; +#[cfg_attr(test, assert_instr("vsli.16", N = 1))] +#[rustc_legacy_const_generics(2)] +pub unsafe fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { + static_assert_imm4!(N); + let n = N as i16; transmute(vshiftins_v8i16( transmute(a), transmute(b), diff --git a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs index a556789245120..04e623f24e3d5 100644 --- a/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs +++ b/crates/core_arch/src/arm/neon/shift_and_insert_tests.rs @@ -23,7 +23,7 @@ macro_rules! test_vsli { let b = [$($b as $t),*]; let n_bit_mask: $t = (1 << $n) - 1; let e = [$(($a as $t & n_bit_mask) | ($b as $t << $n)),*]; - let r = $fn_id(transmute(a), transmute(b), $n); + let r = $fn_id::<$n>(transmute(a), transmute(b)); let mut d = e; d = transmute(r); assert_eq!(d, e);