@@ -1277,6 +1277,15 @@ define <vscale x 2 x i64> @trn1_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
1277
1277
ret <vscale x 2 x i64 > %out
1278
1278
}
1279
1279
1280
+ define <vscale x 2 x half > @trn1_f16_v2 (<vscale x 2 x half > %a , <vscale x 2 x half > %b ) {
1281
+ ; CHECK-LABEL: trn1_f16_v2:
1282
+ ; CHECK: trn1 z0.d, z0.d, z1.d
1283
+ ; CHECK-NEXT: ret
1284
+ %out = call <vscale x 2 x half > @llvm.aarch64.sve.trn1.nxv2f16 (<vscale x 2 x half > %a ,
1285
+ <vscale x 2 x half > %b )
1286
+ ret <vscale x 2 x half > %out
1287
+ }
1288
+
1280
1289
define <vscale x 4 x half > @trn1_f16_v4 (<vscale x 4 x half > %a , <vscale x 4 x half > %b ) {
1281
1290
; CHECK-LABEL: trn1_f16_v4:
1282
1291
; CHECK: trn1 z0.s, z0.s, z1.s
@@ -1304,6 +1313,15 @@ define <vscale x 8 x half> @trn1_f16(<vscale x 8 x half> %a, <vscale x 8 x half>
1304
1313
ret <vscale x 8 x half > %out
1305
1314
}
1306
1315
1316
+ define <vscale x 2 x float > @trn1_f32_v2 (<vscale x 2 x float > %a , <vscale x 2 x float > %b ) {
1317
+ ; CHECK-LABEL: trn1_f32_v2:
1318
+ ; CHECK: trn1 z0.d, z0.d, z1.d
1319
+ ; CHECK-NEXT: ret
1320
+ %out = call <vscale x 2 x float > @llvm.aarch64.sve.trn1.nxv2f32 (<vscale x 2 x float > %a ,
1321
+ <vscale x 2 x float > %b )
1322
+ ret <vscale x 2 x float > %out
1323
+ }
1324
+
1307
1325
define <vscale x 4 x float > @trn1_f32 (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
1308
1326
; CHECK-LABEL: trn1_f32:
1309
1327
; CHECK: trn1 z0.s, z0.s, z1.s
@@ -1398,6 +1416,15 @@ define <vscale x 2 x i64> @trn2_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
1398
1416
ret <vscale x 2 x i64 > %out
1399
1417
}
1400
1418
1419
+ define <vscale x 2 x half > @trn2_f16_v2 (<vscale x 2 x half > %a , <vscale x 2 x half > %b ) {
1420
+ ; CHECK-LABEL: trn2_f16_v2:
1421
+ ; CHECK: trn2 z0.d, z0.d, z1.d
1422
+ ; CHECK-NEXT: ret
1423
+ %out = call <vscale x 2 x half > @llvm.aarch64.sve.trn2.nxv2f16 (<vscale x 2 x half > %a ,
1424
+ <vscale x 2 x half > %b )
1425
+ ret <vscale x 2 x half > %out
1426
+ }
1427
+
1401
1428
define <vscale x 4 x half > @trn2_f16_v4 (<vscale x 4 x half > %a , <vscale x 4 x half > %b ) {
1402
1429
; CHECK-LABEL: trn2_f16_v4:
1403
1430
; CHECK: trn2 z0.s, z0.s, z1.s
@@ -1425,6 +1452,15 @@ define <vscale x 8 x half> @trn2_f16(<vscale x 8 x half> %a, <vscale x 8 x half>
1425
1452
ret <vscale x 8 x half > %out
1426
1453
}
1427
1454
1455
+ define <vscale x 2 x float > @trn2_f32_v2 (<vscale x 2 x float > %a , <vscale x 2 x float > %b ) {
1456
+ ; CHECK-LABEL: trn2_f32_v2:
1457
+ ; CHECK: trn2 z0.d, z0.d, z1.d
1458
+ ; CHECK-NEXT: ret
1459
+ %out = call <vscale x 2 x float > @llvm.aarch64.sve.trn2.nxv2f32 (<vscale x 2 x float > %a ,
1460
+ <vscale x 2 x float > %b )
1461
+ ret <vscale x 2 x float > %out
1462
+ }
1463
+
1428
1464
define <vscale x 4 x float > @trn2_f32 (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
1429
1465
; CHECK-LABEL: trn2_f32:
1430
1466
; CHECK: trn2 z0.s, z0.s, z1.s
@@ -1519,6 +1555,15 @@ define <vscale x 2 x i64> @uzp1_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
1519
1555
ret <vscale x 2 x i64 > %out
1520
1556
}
1521
1557
1558
+ define <vscale x 2 x half > @uzp1_f16_v2 (<vscale x 2 x half > %a , <vscale x 2 x half > %b ) {
1559
+ ; CHECK-LABEL: uzp1_f16_v2:
1560
+ ; CHECK: uzp1 z0.d, z0.d, z1.d
1561
+ ; CHECK-NEXT: ret
1562
+ %out = call <vscale x 2 x half > @llvm.aarch64.sve.uzp1.nxv2f16 (<vscale x 2 x half > %a ,
1563
+ <vscale x 2 x half > %b )
1564
+ ret <vscale x 2 x half > %out
1565
+ }
1566
+
1522
1567
define <vscale x 4 x half > @uzp1_f16_v4 (<vscale x 4 x half > %a , <vscale x 4 x half > %b ) {
1523
1568
; CHECK-LABEL: uzp1_f16_v4:
1524
1569
; CHECK: uzp1 z0.s, z0.s, z1.s
@@ -1546,6 +1591,15 @@ define <vscale x 8 x half> @uzp1_f16(<vscale x 8 x half> %a, <vscale x 8 x half>
1546
1591
ret <vscale x 8 x half > %out
1547
1592
}
1548
1593
1594
+ define <vscale x 2 x float > @uzp1_f32_v2 (<vscale x 2 x float > %a , <vscale x 2 x float > %b ) {
1595
+ ; CHECK-LABEL: uzp1_f32_v2:
1596
+ ; CHECK: uzp1 z0.d, z0.d, z1.d
1597
+ ; CHECK-NEXT: ret
1598
+ %out = call <vscale x 2 x float > @llvm.aarch64.sve.uzp1.nxv2f32 (<vscale x 2 x float > %a ,
1599
+ <vscale x 2 x float > %b )
1600
+ ret <vscale x 2 x float > %out
1601
+ }
1602
+
1549
1603
define <vscale x 4 x float > @uzp1_f32 (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
1550
1604
; CHECK-LABEL: uzp1_f32:
1551
1605
; CHECK: uzp1 z0.s, z0.s, z1.s
@@ -1640,6 +1694,15 @@ define <vscale x 2 x i64> @uzp2_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
1640
1694
ret <vscale x 2 x i64 > %out
1641
1695
}
1642
1696
1697
+ define <vscale x 2 x half > @uzp2_f16_v2 (<vscale x 2 x half > %a , <vscale x 2 x half > %b ) {
1698
+ ; CHECK-LABEL: uzp2_f16_v2:
1699
+ ; CHECK: uzp2 z0.d, z0.d, z1.d
1700
+ ; CHECK-NEXT: ret
1701
+ %out = call <vscale x 2 x half > @llvm.aarch64.sve.uzp2.nxv2f16 (<vscale x 2 x half > %a ,
1702
+ <vscale x 2 x half > %b )
1703
+ ret <vscale x 2 x half > %out
1704
+ }
1705
+
1643
1706
define <vscale x 4 x half > @uzp2_f16_v4 (<vscale x 4 x half > %a , <vscale x 4 x half > %b ) {
1644
1707
; CHECK-LABEL: uzp2_f16_v4:
1645
1708
; CHECK: uzp2 z0.s, z0.s, z1.s
@@ -1667,6 +1730,15 @@ define <vscale x 8 x half> @uzp2_f16(<vscale x 8 x half> %a, <vscale x 8 x half>
1667
1730
ret <vscale x 8 x half > %out
1668
1731
}
1669
1732
1733
+ define <vscale x 2 x float > @uzp2_f32_v2 (<vscale x 2 x float > %a , <vscale x 2 x float > %b ) {
1734
+ ; CHECK-LABEL: uzp2_f32_v2:
1735
+ ; CHECK: uzp2 z0.d, z0.d, z1.d
1736
+ ; CHECK-NEXT: ret
1737
+ %out = call <vscale x 2 x float > @llvm.aarch64.sve.uzp2.nxv2f32 (<vscale x 2 x float > %a ,
1738
+ <vscale x 2 x float > %b )
1739
+ ret <vscale x 2 x float > %out
1740
+ }
1741
+
1670
1742
define <vscale x 4 x float > @uzp2_f32 (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
1671
1743
; CHECK-LABEL: uzp2_f32:
1672
1744
; CHECK: uzp2 z0.s, z0.s, z1.s
@@ -1761,6 +1833,15 @@ define <vscale x 2 x i64> @zip1_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
1761
1833
ret <vscale x 2 x i64 > %out
1762
1834
}
1763
1835
1836
+ define <vscale x 2 x half > @zip1_f16_v2 (<vscale x 2 x half > %a , <vscale x 2 x half > %b ) {
1837
+ ; CHECK-LABEL: zip1_f16_v2:
1838
+ ; CHECK: zip1 z0.d, z0.d, z1.d
1839
+ ; CHECK-NEXT: ret
1840
+ %out = call <vscale x 2 x half > @llvm.aarch64.sve.zip1.nxv2f16 (<vscale x 2 x half > %a ,
1841
+ <vscale x 2 x half > %b )
1842
+ ret <vscale x 2 x half > %out
1843
+ }
1844
+
1764
1845
define <vscale x 4 x half > @zip1_f16_v4 (<vscale x 4 x half > %a , <vscale x 4 x half > %b ) {
1765
1846
; CHECK-LABEL: zip1_f16_v4:
1766
1847
; CHECK: zip1 z0.s, z0.s, z1.s
@@ -1788,6 +1869,15 @@ define <vscale x 8 x half> @zip1_f16(<vscale x 8 x half> %a, <vscale x 8 x half>
1788
1869
ret <vscale x 8 x half > %out
1789
1870
}
1790
1871
1872
+ define <vscale x 2 x float > @zip1_f32_v2 (<vscale x 2 x float > %a , <vscale x 2 x float > %b ) {
1873
+ ; CHECK-LABEL: zip1_f32_v2:
1874
+ ; CHECK: zip1 z0.d, z0.d, z1.d
1875
+ ; CHECK-NEXT: ret
1876
+ %out = call <vscale x 2 x float > @llvm.aarch64.sve.zip1.nxv2f32 (<vscale x 2 x float > %a ,
1877
+ <vscale x 2 x float > %b )
1878
+ ret <vscale x 2 x float > %out
1879
+ }
1880
+
1791
1881
define <vscale x 4 x float > @zip1_f32 (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
1792
1882
; CHECK-LABEL: zip1_f32:
1793
1883
; CHECK: zip1 z0.s, z0.s, z1.s
@@ -1882,6 +1972,15 @@ define <vscale x 2 x i64> @zip2_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
1882
1972
ret <vscale x 2 x i64 > %out
1883
1973
}
1884
1974
1975
+ define <vscale x 2 x half > @zip2_f16_v2 (<vscale x 2 x half > %a , <vscale x 2 x half > %b ) {
1976
+ ; CHECK-LABEL: zip2_f16_v2:
1977
+ ; CHECK: zip2 z0.d, z0.d, z1.d
1978
+ ; CHECK-NEXT: ret
1979
+ %out = call <vscale x 2 x half > @llvm.aarch64.sve.zip2.nxv2f16 (<vscale x 2 x half > %a ,
1980
+ <vscale x 2 x half > %b )
1981
+ ret <vscale x 2 x half > %out
1982
+ }
1983
+
1885
1984
define <vscale x 4 x half > @zip2_f16_v4 (<vscale x 4 x half > %a , <vscale x 4 x half > %b ) {
1886
1985
; CHECK-LABEL: zip2_f16_v4:
1887
1986
; CHECK: zip2 z0.s, z0.s, z1.s
@@ -1909,6 +2008,15 @@ define <vscale x 8 x half> @zip2_f16(<vscale x 8 x half> %a, <vscale x 8 x half>
1909
2008
ret <vscale x 8 x half > %out
1910
2009
}
1911
2010
2011
+ define <vscale x 2 x float > @zip2_f32_v2 (<vscale x 2 x float > %a , <vscale x 2 x float > %b ) {
2012
+ ; CHECK-LABEL: zip2_f32_v2:
2013
+ ; CHECK: zip2 z0.d, z0.d, z1.d
2014
+ ; CHECK-NEXT: ret
2015
+ %out = call <vscale x 2 x float > @llvm.aarch64.sve.zip2.nxv2f32 (<vscale x 2 x float > %a ,
2016
+ <vscale x 2 x float > %b )
2017
+ ret <vscale x 2 x float > %out
2018
+ }
2019
+
1912
2020
define <vscale x 4 x float > @zip2_f32 (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
1913
2021
; CHECK-LABEL: zip2_f32:
1914
2022
; CHECK: zip2 z0.s, z0.s, z1.s
@@ -2061,9 +2169,11 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.trn1.nxv16i8(<vscale x 16 x i8>, <v
2061
2169
declare <vscale x 8 x i16 > @llvm.aarch64.sve.trn1.nxv8i16 (<vscale x 8 x i16 >, <vscale x 8 x i16 >)
2062
2170
declare <vscale x 4 x i32 > @llvm.aarch64.sve.trn1.nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
2063
2171
declare <vscale x 2 x i64 > @llvm.aarch64.sve.trn1.nxv2i64 (<vscale x 2 x i64 >, <vscale x 2 x i64 >)
2172
+ declare <vscale x 2 x half > @llvm.aarch64.sve.trn1.nxv2f16 (<vscale x 2 x half >, <vscale x 2 x half >)
2064
2173
declare <vscale x 4 x half > @llvm.aarch64.sve.trn1.nxv4f16 (<vscale x 4 x half >, <vscale x 4 x half >)
2065
2174
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.trn1.nxv8bf16 (<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
2066
2175
declare <vscale x 8 x half > @llvm.aarch64.sve.trn1.nxv8f16 (<vscale x 8 x half >, <vscale x 8 x half >)
2176
+ declare <vscale x 2 x float > @llvm.aarch64.sve.trn1.nxv2f32 (<vscale x 2 x float >, <vscale x 2 x float >)
2067
2177
declare <vscale x 4 x float > @llvm.aarch64.sve.trn1.nxv4f32 (<vscale x 4 x float >, <vscale x 4 x float >)
2068
2178
declare <vscale x 2 x double > @llvm.aarch64.sve.trn1.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
2069
2179
@@ -2075,9 +2185,11 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.trn2.nxv16i8(<vscale x 16 x i8>, <v
2075
2185
declare <vscale x 8 x i16 > @llvm.aarch64.sve.trn2.nxv8i16 (<vscale x 8 x i16 >, <vscale x 8 x i16 >)
2076
2186
declare <vscale x 4 x i32 > @llvm.aarch64.sve.trn2.nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
2077
2187
declare <vscale x 2 x i64 > @llvm.aarch64.sve.trn2.nxv2i64 (<vscale x 2 x i64 >, <vscale x 2 x i64 >)
2188
+ declare <vscale x 2 x half > @llvm.aarch64.sve.trn2.nxv2f16 (<vscale x 2 x half >, <vscale x 2 x half >)
2078
2189
declare <vscale x 4 x half > @llvm.aarch64.sve.trn2.nxv4f16 (<vscale x 4 x half >, <vscale x 4 x half >)
2079
2190
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.trn2.nxv8bf16 (<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
2080
2191
declare <vscale x 8 x half > @llvm.aarch64.sve.trn2.nxv8f16 (<vscale x 8 x half >, <vscale x 8 x half >)
2192
+ declare <vscale x 2 x float > @llvm.aarch64.sve.trn2.nxv2f32 (<vscale x 2 x float >, <vscale x 2 x float >)
2081
2193
declare <vscale x 4 x float > @llvm.aarch64.sve.trn2.nxv4f32 (<vscale x 4 x float >, <vscale x 4 x float >)
2082
2194
declare <vscale x 2 x double > @llvm.aarch64.sve.trn2.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
2083
2195
@@ -2089,9 +2201,11 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8>, <v
2089
2201
declare <vscale x 8 x i16 > @llvm.aarch64.sve.uzp1.nxv8i16 (<vscale x 8 x i16 >, <vscale x 8 x i16 >)
2090
2202
declare <vscale x 4 x i32 > @llvm.aarch64.sve.uzp1.nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
2091
2203
declare <vscale x 2 x i64 > @llvm.aarch64.sve.uzp1.nxv2i64 (<vscale x 2 x i64 >, <vscale x 2 x i64 >)
2204
+ declare <vscale x 2 x half > @llvm.aarch64.sve.uzp1.nxv2f16 (<vscale x 2 x half >, <vscale x 2 x half >)
2092
2205
declare <vscale x 4 x half > @llvm.aarch64.sve.uzp1.nxv4f16 (<vscale x 4 x half >, <vscale x 4 x half >)
2093
2206
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.uzp1.nxv8bf16 (<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
2094
2207
declare <vscale x 8 x half > @llvm.aarch64.sve.uzp1.nxv8f16 (<vscale x 8 x half >, <vscale x 8 x half >)
2208
+ declare <vscale x 2 x float > @llvm.aarch64.sve.uzp1.nxv2f32 (<vscale x 2 x float >, <vscale x 2 x float >)
2095
2209
declare <vscale x 4 x float > @llvm.aarch64.sve.uzp1.nxv4f32 (<vscale x 4 x float >, <vscale x 4 x float >)
2096
2210
declare <vscale x 2 x double > @llvm.aarch64.sve.uzp1.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
2097
2211
@@ -2103,9 +2217,11 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp2.nxv16i8(<vscale x 16 x i8>, <v
2103
2217
declare <vscale x 8 x i16 > @llvm.aarch64.sve.uzp2.nxv8i16 (<vscale x 8 x i16 >, <vscale x 8 x i16 >)
2104
2218
declare <vscale x 4 x i32 > @llvm.aarch64.sve.uzp2.nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
2105
2219
declare <vscale x 2 x i64 > @llvm.aarch64.sve.uzp2.nxv2i64 (<vscale x 2 x i64 >, <vscale x 2 x i64 >)
2220
+ declare <vscale x 2 x half > @llvm.aarch64.sve.uzp2.nxv2f16 (<vscale x 2 x half >, <vscale x 2 x half >)
2106
2221
declare <vscale x 4 x half > @llvm.aarch64.sve.uzp2.nxv4f16 (<vscale x 4 x half >, <vscale x 4 x half >)
2107
2222
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.uzp2.nxv8bf16 (<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
2108
2223
declare <vscale x 8 x half > @llvm.aarch64.sve.uzp2.nxv8f16 (<vscale x 8 x half >, <vscale x 8 x half >)
2224
+ declare <vscale x 2 x float > @llvm.aarch64.sve.uzp2.nxv2f32 (<vscale x 2 x float >, <vscale x 2 x float >)
2109
2225
declare <vscale x 4 x float > @llvm.aarch64.sve.uzp2.nxv4f32 (<vscale x 4 x float >, <vscale x 4 x float >)
2110
2226
declare <vscale x 2 x double > @llvm.aarch64.sve.uzp2.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
2111
2227
@@ -2117,9 +2233,11 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.zip1.nxv16i8(<vscale x 16 x i8>, <v
2117
2233
declare <vscale x 8 x i16 > @llvm.aarch64.sve.zip1.nxv8i16 (<vscale x 8 x i16 >, <vscale x 8 x i16 >)
2118
2234
declare <vscale x 4 x i32 > @llvm.aarch64.sve.zip1.nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
2119
2235
declare <vscale x 2 x i64 > @llvm.aarch64.sve.zip1.nxv2i64 (<vscale x 2 x i64 >, <vscale x 2 x i64 >)
2236
+ declare <vscale x 2 x half > @llvm.aarch64.sve.zip1.nxv2f16 (<vscale x 2 x half >, <vscale x 2 x half >)
2120
2237
declare <vscale x 4 x half > @llvm.aarch64.sve.zip1.nxv4f16 (<vscale x 4 x half >, <vscale x 4 x half >)
2121
2238
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.zip1.nxv8bf16 (<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
2122
2239
declare <vscale x 8 x half > @llvm.aarch64.sve.zip1.nxv8f16 (<vscale x 8 x half >, <vscale x 8 x half >)
2240
+ declare <vscale x 2 x float > @llvm.aarch64.sve.zip1.nxv2f32 (<vscale x 2 x float >, <vscale x 2 x float >)
2123
2241
declare <vscale x 4 x float > @llvm.aarch64.sve.zip1.nxv4f32 (<vscale x 4 x float >, <vscale x 4 x float >)
2124
2242
declare <vscale x 2 x double > @llvm.aarch64.sve.zip1.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
2125
2243
@@ -2131,9 +2249,11 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.zip2.nxv16i8(<vscale x 16 x i8>, <v
2131
2249
declare <vscale x 8 x i16 > @llvm.aarch64.sve.zip2.nxv8i16 (<vscale x 8 x i16 >, <vscale x 8 x i16 >)
2132
2250
declare <vscale x 4 x i32 > @llvm.aarch64.sve.zip2.nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
2133
2251
declare <vscale x 2 x i64 > @llvm.aarch64.sve.zip2.nxv2i64 (<vscale x 2 x i64 >, <vscale x 2 x i64 >)
2252
+ declare <vscale x 2 x half > @llvm.aarch64.sve.zip2.nxv2f16 (<vscale x 2 x half >, <vscale x 2 x half >)
2134
2253
declare <vscale x 4 x half > @llvm.aarch64.sve.zip2.nxv4f16 (<vscale x 4 x half >, <vscale x 4 x half >)
2135
2254
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.zip2.nxv8bf16 (<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
2136
2255
declare <vscale x 8 x half > @llvm.aarch64.sve.zip2.nxv8f16 (<vscale x 8 x half >, <vscale x 8 x half >)
2256
+ declare <vscale x 2 x float > @llvm.aarch64.sve.zip2.nxv2f32 (<vscale x 2 x float >, <vscale x 2 x float >)
2137
2257
declare <vscale x 4 x float > @llvm.aarch64.sve.zip2.nxv4f32 (<vscale x 4 x float >, <vscale x 4 x float >)
2138
2258
declare <vscale x 2 x double > @llvm.aarch64.sve.zip2.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
2139
2259
0 commit comments