@@ -21987,7 +21987,8 @@ pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t {
21987
21987
#[rustc_legacy_const_generics(1)]
21988
21988
pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
21989
21989
static_assert!(N : i32 where N >= 1 && N <= 8);
21990
- simd_shr(a, vdup_n_s8(N.try_into().unwrap()))
21990
+ let n: i32 = if N == 8 { 7 } else { N };
21991
+ simd_shr(a, vdup_n_s8(n.try_into().unwrap()))
21991
21992
}
21992
21993
21993
21994
/// Shift right
@@ -21999,7 +22000,8 @@ pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
21999
22000
#[rustc_legacy_const_generics(1)]
22000
22001
pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
22001
22002
static_assert!(N : i32 where N >= 1 && N <= 8);
22002
- simd_shr(a, vdupq_n_s8(N.try_into().unwrap()))
22003
+ let n: i32 = if N == 8 { 7 } else { N };
22004
+ simd_shr(a, vdupq_n_s8(n.try_into().unwrap()))
22003
22005
}
22004
22006
22005
22007
/// Shift right
@@ -22011,7 +22013,8 @@ pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
22011
22013
#[rustc_legacy_const_generics(1)]
22012
22014
pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
22013
22015
static_assert!(N : i32 where N >= 1 && N <= 16);
22014
- simd_shr(a, vdup_n_s16(N.try_into().unwrap()))
22016
+ let n: i32 = if N == 16 { 15 } else { N };
22017
+ simd_shr(a, vdup_n_s16(n.try_into().unwrap()))
22015
22018
}
22016
22019
22017
22020
/// Shift right
@@ -22023,7 +22026,8 @@ pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
22023
22026
#[rustc_legacy_const_generics(1)]
22024
22027
pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
22025
22028
static_assert!(N : i32 where N >= 1 && N <= 16);
22026
- simd_shr(a, vdupq_n_s16(N.try_into().unwrap()))
22029
+ let n: i32 = if N == 16 { 15 } else { N };
22030
+ simd_shr(a, vdupq_n_s16(n.try_into().unwrap()))
22027
22031
}
22028
22032
22029
22033
/// Shift right
@@ -22035,7 +22039,8 @@ pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
22035
22039
#[rustc_legacy_const_generics(1)]
22036
22040
pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
22037
22041
static_assert!(N : i32 where N >= 1 && N <= 32);
22038
- simd_shr(a, vdup_n_s32(N.try_into().unwrap()))
22042
+ let n: i32 = if N == 32 { 31 } else { N };
22043
+ simd_shr(a, vdup_n_s32(n.try_into().unwrap()))
22039
22044
}
22040
22045
22041
22046
/// Shift right
@@ -22047,7 +22052,8 @@ pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
22047
22052
#[rustc_legacy_const_generics(1)]
22048
22053
pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
22049
22054
static_assert!(N : i32 where N >= 1 && N <= 32);
22050
- simd_shr(a, vdupq_n_s32(N.try_into().unwrap()))
22055
+ let n: i32 = if N == 32 { 31 } else { N };
22056
+ simd_shr(a, vdupq_n_s32(n.try_into().unwrap()))
22051
22057
}
22052
22058
22053
22059
/// Shift right
@@ -22059,7 +22065,8 @@ pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
22059
22065
#[rustc_legacy_const_generics(1)]
22060
22066
pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
22061
22067
static_assert!(N : i32 where N >= 1 && N <= 64);
22062
- simd_shr(a, vdup_n_s64(N.try_into().unwrap()))
22068
+ let n: i32 = if N == 64 { 63 } else { N };
22069
+ simd_shr(a, vdup_n_s64(n.try_into().unwrap()))
22063
22070
}
22064
22071
22065
22072
/// Shift right
@@ -22071,7 +22078,8 @@ pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
22071
22078
#[rustc_legacy_const_generics(1)]
22072
22079
pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
22073
22080
static_assert!(N : i32 where N >= 1 && N <= 64);
22074
- simd_shr(a, vdupq_n_s64(N.try_into().unwrap()))
22081
+ let n: i32 = if N == 64 { 63 } else { N };
22082
+ simd_shr(a, vdupq_n_s64(n.try_into().unwrap()))
22075
22083
}
22076
22084
22077
22085
/// Shift right
@@ -22083,7 +22091,8 @@ pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
22083
22091
#[rustc_legacy_const_generics(1)]
22084
22092
pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
22085
22093
static_assert!(N : i32 where N >= 1 && N <= 8);
22086
- simd_shr(a, vdup_n_u8(N.try_into().unwrap()))
22094
+ let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N };
22095
+ simd_shr(a, vdup_n_u8(n.try_into().unwrap()))
22087
22096
}
22088
22097
22089
22098
/// Shift right
@@ -22095,7 +22104,8 @@ pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
22095
22104
#[rustc_legacy_const_generics(1)]
22096
22105
pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
22097
22106
static_assert!(N : i32 where N >= 1 && N <= 8);
22098
- simd_shr(a, vdupq_n_u8(N.try_into().unwrap()))
22107
+ let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N };
22108
+ simd_shr(a, vdupq_n_u8(n.try_into().unwrap()))
22099
22109
}
22100
22110
22101
22111
/// Shift right
@@ -22107,7 +22117,8 @@ pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
22107
22117
#[rustc_legacy_const_generics(1)]
22108
22118
pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
22109
22119
static_assert!(N : i32 where N >= 1 && N <= 16);
22110
- simd_shr(a, vdup_n_u16(N.try_into().unwrap()))
22120
+ let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N };
22121
+ simd_shr(a, vdup_n_u16(n.try_into().unwrap()))
22111
22122
}
22112
22123
22113
22124
/// Shift right
@@ -22119,7 +22130,8 @@ pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
22119
22130
#[rustc_legacy_const_generics(1)]
22120
22131
pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
22121
22132
static_assert!(N : i32 where N >= 1 && N <= 16);
22122
- simd_shr(a, vdupq_n_u16(N.try_into().unwrap()))
22133
+ let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N };
22134
+ simd_shr(a, vdupq_n_u16(n.try_into().unwrap()))
22123
22135
}
22124
22136
22125
22137
/// Shift right
@@ -22131,7 +22143,8 @@ pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
22131
22143
#[rustc_legacy_const_generics(1)]
22132
22144
pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
22133
22145
static_assert!(N : i32 where N >= 1 && N <= 32);
22134
- simd_shr(a, vdup_n_u32(N.try_into().unwrap()))
22146
+ let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N };
22147
+ simd_shr(a, vdup_n_u32(n.try_into().unwrap()))
22135
22148
}
22136
22149
22137
22150
/// Shift right
@@ -22143,7 +22156,8 @@ pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
22143
22156
#[rustc_legacy_const_generics(1)]
22144
22157
pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
22145
22158
static_assert!(N : i32 where N >= 1 && N <= 32);
22146
- simd_shr(a, vdupq_n_u32(N.try_into().unwrap()))
22159
+ let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N };
22160
+ simd_shr(a, vdupq_n_u32(n.try_into().unwrap()))
22147
22161
}
22148
22162
22149
22163
/// Shift right
@@ -22155,7 +22169,8 @@ pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
22155
22169
#[rustc_legacy_const_generics(1)]
22156
22170
pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
22157
22171
static_assert!(N : i32 where N >= 1 && N <= 64);
22158
- simd_shr(a, vdup_n_u64(N.try_into().unwrap()))
22172
+ let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N };
22173
+ simd_shr(a, vdup_n_u64(n.try_into().unwrap()))
22159
22174
}
22160
22175
22161
22176
/// Shift right
@@ -22167,7 +22182,8 @@ pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
22167
22182
#[rustc_legacy_const_generics(1)]
22168
22183
pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
22169
22184
static_assert!(N : i32 where N >= 1 && N <= 64);
22170
- simd_shr(a, vdupq_n_u64(N.try_into().unwrap()))
22185
+ let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N };
22186
+ simd_shr(a, vdupq_n_u64(n.try_into().unwrap()))
22171
22187
}
22172
22188
22173
22189
/// Shift right narrow
0 commit comments