Skip to content

Commit a8671e8

Browse files
committed
Merge branch 'master' into stabilize-aarch64-sha3-intrinsics
Also bumps nightly version to 1.79.0
2 parents 194a1e4 + 24068c7 commit a8671e8

File tree

4 files changed

+122
-121
lines changed

4 files changed

+122
-121
lines changed

crates/core_arch/src/aarch64/neon/generated.rs

+21-21
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ use stdarch_test::assert_instr;
1515
#[inline]
1616
#[target_feature(enable = "neon,sha3")]
1717
#[cfg_attr(test, assert_instr(eor3))]
18-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
18+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1919
pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
2020
#[allow(improper_ctypes)]
2121
extern "unadjusted" {
@@ -31,7 +31,7 @@ pub unsafe fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
3131
#[inline]
3232
#[target_feature(enable = "neon,sha3")]
3333
#[cfg_attr(test, assert_instr(eor3))]
34-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
34+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
3535
pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
3636
#[allow(improper_ctypes)]
3737
extern "unadjusted" {
@@ -47,7 +47,7 @@ pub unsafe fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t
4747
#[inline]
4848
#[target_feature(enable = "neon,sha3")]
4949
#[cfg_attr(test, assert_instr(eor3))]
50-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
50+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
5151
pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
5252
#[allow(improper_ctypes)]
5353
extern "unadjusted" {
@@ -63,7 +63,7 @@ pub unsafe fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t
6363
#[inline]
6464
#[target_feature(enable = "neon,sha3")]
6565
#[cfg_attr(test, assert_instr(eor3))]
66-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
66+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
6767
pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
6868
#[allow(improper_ctypes)]
6969
extern "unadjusted" {
@@ -79,7 +79,7 @@ pub unsafe fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t
7979
#[inline]
8080
#[target_feature(enable = "neon,sha3")]
8181
#[cfg_attr(test, assert_instr(eor3))]
82-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
82+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
8383
pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
8484
#[allow(improper_ctypes)]
8585
extern "unadjusted" {
@@ -95,7 +95,7 @@ pub unsafe fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16
9595
#[inline]
9696
#[target_feature(enable = "neon,sha3")]
9797
#[cfg_attr(test, assert_instr(eor3))]
98-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
98+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9999
pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
100100
#[allow(improper_ctypes)]
101101
extern "unadjusted" {
@@ -111,7 +111,7 @@ pub unsafe fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x
111111
#[inline]
112112
#[target_feature(enable = "neon,sha3")]
113113
#[cfg_attr(test, assert_instr(eor3))]
114-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
114+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
115115
pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
116116
#[allow(improper_ctypes)]
117117
extern "unadjusted" {
@@ -127,7 +127,7 @@ pub unsafe fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x
127127
#[inline]
128128
#[target_feature(enable = "neon,sha3")]
129129
#[cfg_attr(test, assert_instr(eor3))]
130-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
130+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
131131
pub unsafe fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
132132
#[allow(improper_ctypes)]
133133
extern "unadjusted" {
@@ -9977,7 +9977,7 @@ pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
99779977
#[inline]
99789978
#[target_feature(enable = "neon,sha3")]
99799979
#[cfg_attr(test, assert_instr(bcax))]
9980-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
9980+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
99819981
pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
99829982
#[allow(improper_ctypes)]
99839983
extern "unadjusted" {
@@ -9993,7 +9993,7 @@ pub unsafe fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
99939993
#[inline]
99949994
#[target_feature(enable = "neon,sha3")]
99959995
#[cfg_attr(test, assert_instr(bcax))]
9996-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
9996+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
99979997
pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
99989998
#[allow(improper_ctypes)]
99999999
extern "unadjusted" {
@@ -10009,7 +10009,7 @@ pub unsafe fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t
1000910009
#[inline]
1001010010
#[target_feature(enable = "neon,sha3")]
1001110011
#[cfg_attr(test, assert_instr(bcax))]
10012-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
10012+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1001310013
pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
1001410014
#[allow(improper_ctypes)]
1001510015
extern "unadjusted" {
@@ -10025,7 +10025,7 @@ pub unsafe fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t
1002510025
#[inline]
1002610026
#[target_feature(enable = "neon,sha3")]
1002710027
#[cfg_attr(test, assert_instr(bcax))]
10028-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
10028+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1002910029
pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
1003010030
#[allow(improper_ctypes)]
1003110031
extern "unadjusted" {
@@ -10041,7 +10041,7 @@ pub unsafe fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t
1004110041
#[inline]
1004210042
#[target_feature(enable = "neon,sha3")]
1004310043
#[cfg_attr(test, assert_instr(bcax))]
10044-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
10044+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1004510045
pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
1004610046
#[allow(improper_ctypes)]
1004710047
extern "unadjusted" {
@@ -10057,7 +10057,7 @@ pub unsafe fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16
1005710057
#[inline]
1005810058
#[target_feature(enable = "neon,sha3")]
1005910059
#[cfg_attr(test, assert_instr(bcax))]
10060-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
10060+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1006110061
pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
1006210062
#[allow(improper_ctypes)]
1006310063
extern "unadjusted" {
@@ -10073,7 +10073,7 @@ pub unsafe fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x
1007310073
#[inline]
1007410074
#[target_feature(enable = "neon,sha3")]
1007510075
#[cfg_attr(test, assert_instr(bcax))]
10076-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
10076+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1007710077
pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
1007810078
#[allow(improper_ctypes)]
1007910079
extern "unadjusted" {
@@ -10089,7 +10089,7 @@ pub unsafe fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x
1008910089
#[inline]
1009010090
#[target_feature(enable = "neon,sha3")]
1009110091
#[cfg_attr(test, assert_instr(bcax))]
10092-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
10092+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1009310093
pub unsafe fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1009410094
#[allow(improper_ctypes)]
1009510095
extern "unadjusted" {
@@ -15261,7 +15261,7 @@ pub unsafe fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
1526115261
#[inline]
1526215262
#[target_feature(enable = "neon,sha3")]
1526315263
#[cfg_attr(test, assert_instr(rax1))]
15264-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
15264+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1526515265
pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1526615266
#[allow(improper_ctypes)]
1526715267
extern "unadjusted" {
@@ -15277,7 +15277,7 @@ pub unsafe fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1527715277
#[inline]
1527815278
#[target_feature(enable = "neon,sha3")]
1527915279
#[cfg_attr(test, assert_instr(sha512h))]
15280-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
15280+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1528115281
pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1528215282
#[allow(improper_ctypes)]
1528315283
extern "unadjusted" {
@@ -15293,7 +15293,7 @@ pub unsafe fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint
1529315293
#[inline]
1529415294
#[target_feature(enable = "neon,sha3")]
1529515295
#[cfg_attr(test, assert_instr(sha512h2))]
15296-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
15296+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1529715297
pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1529815298
#[allow(improper_ctypes)]
1529915299
extern "unadjusted" {
@@ -15309,7 +15309,7 @@ pub unsafe fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uin
1530915309
#[inline]
1531015310
#[target_feature(enable = "neon,sha3")]
1531115311
#[cfg_attr(test, assert_instr(sha512su0))]
15312-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
15312+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1531315313
pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1531415314
#[allow(improper_ctypes)]
1531515315
extern "unadjusted" {
@@ -15325,7 +15325,7 @@ pub unsafe fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1532515325
#[inline]
1532615326
#[target_feature(enable = "neon,sha3")]
1532715327
#[cfg_attr(test, assert_instr(sha512su1))]
15328-
#[stable(feature = "stdarch_neon_sha3", since = "1.78.0")]
15328+
#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1532915329
pub unsafe fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1533015330
#[allow(improper_ctypes)]
1533115331
extern "unadjusted" {

crates/core_arch/src/x86/avx512f.rs

+8-10
Original file line numberDiff line numberDiff line change
@@ -28363,8 +28363,7 @@ pub unsafe fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i {
2836328363
#[target_feature(enable = "avx512f")]
2836428364
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
2836528365
pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
28366-
let r = i64x8::new(d, c, b, a, d, c, b, a);
28367-
transmute(r)
28366+
_mm512_set_epi64(d, c, b, a, d, c, b, a)
2836828367
}
2836928368

2837028369
/// Set packed 64-bit integers in dst with the repeated 4 element sequence in reverse order.
@@ -28374,8 +28373,7 @@ pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
2837428373
#[target_feature(enable = "avx512f")]
2837528374
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
2837628375
pub unsafe fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
28377-
let r = i64x8::new(a, b, c, d, a, b, c, d);
28378-
transmute(r)
28376+
_mm512_set_epi64(a, b, c, d, a, b, c, d)
2837928377
}
2838028378

2838128379
/// Compare packed single-precision (32-bit) floating-point elements in a and b for less-than, and store the results in mask vector k.
@@ -34486,7 +34484,7 @@ pub unsafe fn _mm_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m
3448634484
dst
3448734485
}
3448834486

34489-
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
34487+
/// Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
3449034488
///
3449134489
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_pd)
3449234490
#[inline]
@@ -34508,7 +34506,7 @@ pub unsafe fn _mm512_mask_expandloadu_pd(
3450834506
dst
3450934507
}
3451034508

34511-
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
34509+
/// Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
3451234510
///
3451334511
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_pd)
3451434512
#[inline]
@@ -34526,7 +34524,7 @@ pub unsafe fn _mm512_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) ->
3452634524
dst
3452734525
}
3452834526

34529-
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
34527+
/// Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
3453034528
///
3453134529
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_pd)
3453234530
#[inline]
@@ -34548,7 +34546,7 @@ pub unsafe fn _mm256_mask_expandloadu_pd(
3454834546
dst
3454934547
}
3455034548

34551-
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
34549+
/// Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
3455234550
///
3455334551
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_pd)
3455434552
#[inline]
@@ -34566,7 +34564,7 @@ pub unsafe fn _mm256_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) ->
3456634564
dst
3456734565
}
3456834566

34569-
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
34567+
/// Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
3457034568
///
3457134569
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_pd)
3457234570
#[inline]
@@ -34584,7 +34582,7 @@ pub unsafe fn _mm_mask_expandloadu_pd(src: __m128d, k: __mmask8, mem_addr: *cons
3458434582
dst
3458534583
}
3458634584

34587-
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
34585+
/// Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
3458834586
///
3458934587
/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_pd)
3459034588
#[inline]

crates/intrinsic-test/src/json_parser.rs

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ pub enum ArgPrep {
1919
Register {
2020
#[allow(dead_code)]
2121
#[serde(rename = "register")]
22+
#[allow(dead_code)]
2223
reg: String,
2324
},
2425
Immediate {

0 commit comments

Comments
 (0)