@@ -1445,4 +1445,224 @@ for.cond.cleanup: ; preds = %for.body, %entry
1445
1445
ret i32 %r.0.lcssa
1446
1446
}
1447
1447
1448
+ define i64 @mla_xx_sext_zext (ptr nocapture noundef readonly %x , i32 %n ) #0 {
1449
+ ; CHECK-LABEL: @mla_xx_sext_zext(
1450
+ ; CHECK-NEXT: entry:
1451
+ ; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
1452
+ ; CHECK-NEXT: br i1 [[CMP9]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1453
+ ; CHECK: for.body.preheader:
1454
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
1455
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1456
+ ; CHECK: vector.ph:
1457
+ ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
1458
+ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1459
+ ; CHECK: vector.body:
1460
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1461
+ ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
1462
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
1463
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
1464
+ ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
1465
+ ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i32> [[TMP1]], [[TMP1]]
1466
+ ; CHECK-NEXT: [[TMP3:%.*]] = zext nneg <8 x i32> [[TMP2]] to <8 x i64>
1467
+ ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
1468
+ ; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
1469
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
1470
+ ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1471
+ ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
1472
+ ; CHECK: middle.block:
1473
+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
1474
+ ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
1475
+ ; CHECK: scalar.ph:
1476
+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1477
+ ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1478
+ ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1479
+ ; CHECK: for.cond.cleanup:
1480
+ ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
1481
+ ; CHECK-NEXT: ret i64 [[S_0_LCSSA]]
1482
+ ; CHECK: for.body:
1483
+ ; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1484
+ ; CHECK-NEXT: [[S_010:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1485
+ ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_011]]
1486
+ ; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1487
+ ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
1488
+ ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
1489
+ ; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
1490
+ ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[S_010]], [[CONV3]]
1491
+ ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1
1492
+ ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
1493
+ ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
1494
+ ;
1495
+ entry:
1496
+ %cmp9 = icmp sgt i32 %n , 0
1497
+ br i1 %cmp9 , label %for.body , label %for.cond.cleanup
1498
+
1499
+ for.cond.cleanup: ; preds = %for.body, %entry
1500
+ %s.0.lcssa = phi i64 [ 0 , %entry ], [ %add , %for.body ]
1501
+ ret i64 %s.0.lcssa
1502
+
1503
+ for.body: ; preds = %entry, %for.body
1504
+ %i.011 = phi i32 [ %inc , %for.body ], [ 0 , %entry ]
1505
+ %s.010 = phi i64 [ %add , %for.body ], [ 0 , %entry ]
1506
+ %arrayidx = getelementptr inbounds i16 , ptr %x , i32 %i.011
1507
+ %0 = load i16 , ptr %arrayidx , align 2
1508
+ %conv = sext i16 %0 to i32
1509
+ %mul = mul nsw i32 %conv , %conv
1510
+ %conv3 = zext nneg i32 %mul to i64
1511
+ %add = add nuw nsw i64 %s.010 , %conv3
1512
+ %inc = add nuw nsw i32 %i.011 , 1
1513
+ %exitcond.not = icmp eq i32 %inc , %n
1514
+ br i1 %exitcond.not , label %for.cond.cleanup , label %for.body
1515
+ }
1516
+
1517
+ define i64 @mla_and_add_together_16_64 (ptr nocapture noundef readonly %x , i32 noundef %n ) #0 {
1518
+ ; CHECK-LABEL: @mla_and_add_together_16_64(
1519
+ ; CHECK-NEXT: entry:
1520
+ ; CHECK-NEXT: [[CMP16:%.*]] = icmp sgt i32 [[N:%.*]], 0
1521
+ ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP16]])
1522
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i32 [[N]], 8
1523
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1524
+ ; CHECK: vector.ph:
1525
+ ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
1526
+ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1527
+ ; CHECK: vector.body:
1528
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1529
+ ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
1530
+ ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
1531
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
1532
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
1533
+ ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
1534
+ ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i32> [[TMP1]], [[TMP1]]
1535
+ ; CHECK-NEXT: [[TMP3:%.*]] = zext nneg <8 x i32> [[TMP2]] to <8 x i64>
1536
+ ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
1537
+ ; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
1538
+ ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
1539
+ ; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
1540
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
1541
+ ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1542
+ ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
1543
+ ; CHECK: middle.block:
1544
+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
1545
+ ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
1546
+ ; CHECK: scalar.ph:
1547
+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1548
+ ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
1549
+ ; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
1550
+ ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1551
+ ; CHECK: for.cond.cleanup:
1552
+ ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
1553
+ ; CHECK-NEXT: [[ADD6_LCSSA:%.*]] = phi i32 [ [[ADD6:%.*]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
1554
+ ; CHECK-NEXT: [[CONV7:%.*]] = sext i32 [[ADD6_LCSSA]] to i64
1555
+ ; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[CONV7]], [[ADD_LCSSA]]
1556
+ ; CHECK-NEXT: ret i64 [[DIV]]
1557
+ ; CHECK: for.body:
1558
+ ; CHECK-NEXT: [[I_019:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1559
+ ; CHECK-NEXT: [[T_018:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1560
+ ; CHECK-NEXT: [[S_017:%.*]] = phi i32 [ [[ADD6]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
1561
+ ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_019]]
1562
+ ; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1563
+ ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
1564
+ ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
1565
+ ; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
1566
+ ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[T_018]], [[CONV3]]
1567
+ ; CHECK-NEXT: [[ADD6]] = add nsw i32 [[S_017]], [[CONV]]
1568
+ ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_019]], 1
1569
+ ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
1570
+ ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
1571
+ ;
1572
+ entry:
1573
+ %cmp16 = icmp sgt i32 %n , 0
1574
+ tail call void @llvm.assume (i1 %cmp16 )
1575
+ br label %for.body
1576
+
1577
+ for.cond.cleanup:
1578
+ %conv7 = sext i32 %add6 to i64
1579
+ %div = sdiv i64 %conv7 , %add
1580
+ ret i64 %div
1581
+
1582
+ for.body:
1583
+ %i.019 = phi i32 [ %inc , %for.body ], [ 0 , %entry ]
1584
+ %t.018 = phi i64 [ %add , %for.body ], [ 0 , %entry ]
1585
+ %s.017 = phi i32 [ %add6 , %for.body ], [ 0 , %entry ]
1586
+ %arrayidx = getelementptr inbounds i16 , ptr %x , i32 %i.019
1587
+ %0 = load i16 , ptr %arrayidx , align 2
1588
+ %conv = sext i16 %0 to i32
1589
+ %mul = mul nsw i32 %conv , %conv
1590
+ %conv3 = zext nneg i32 %mul to i64
1591
+ %add = add nuw nsw i64 %t.018 , %conv3
1592
+ %add6 = add nsw i32 %s.017 , %conv
1593
+ %inc = add nuw nsw i32 %i.019 , 1
1594
+ %exitcond.not = icmp eq i32 %inc , %n
1595
+ br i1 %exitcond.not , label %for.cond.cleanup , label %for.body
1596
+ }
1597
+
1598
+ define i64 @interleave_doublereduct_i16_i64 (ptr %x , ptr %y , i32 %n ) {
1599
+ ; CHECK-LABEL: @interleave_doublereduct_i16_i64(
1600
+ ; CHECK-NEXT: entry:
1601
+ ; CHECK-NEXT: [[CMP23:%.*]] = icmp sgt i32 [[N:%.*]], 0
1602
+ ; CHECK-NEXT: br i1 [[CMP23]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1603
+ ; CHECK: for.cond.cleanup:
1604
+ ; CHECK-NEXT: [[T_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD12:%.*]], [[FOR_BODY]] ]
1605
+ ; CHECK-NEXT: ret i64 [[T_0_LCSSA]]
1606
+ ; CHECK: for.body:
1607
+ ; CHECK-NEXT: [[I_025:%.*]] = phi i32 [ [[ADD13:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
1608
+ ; CHECK-NEXT: [[T_024:%.*]] = phi i64 [ [[ADD12]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
1609
+ ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[I_025]]
1610
+ ; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1611
+ ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
1612
+ ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[I_025]]
1613
+ ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
1614
+ ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
1615
+ ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
1616
+ ; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
1617
+ ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[T_024]], [[CONV3]]
1618
+ ; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_025]], 1
1619
+ ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[ADD4]]
1620
+ ; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
1621
+ ; CHECK-NEXT: [[CONV6:%.*]] = sext i16 [[TMP2]] to i32
1622
+ ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[Y]], i32 [[ADD4]]
1623
+ ; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
1624
+ ; CHECK-NEXT: [[CONV9:%.*]] = sext i16 [[TMP3]] to i32
1625
+ ; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[CONV9]], [[CONV6]]
1626
+ ; CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[MUL10]] to i64
1627
+ ; CHECK-NEXT: [[ADD12]] = add nsw i64 [[ADD]], [[CONV11]]
1628
+ ; CHECK-NEXT: [[ADD13]] = add nuw nsw i32 [[I_025]], 2
1629
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD13]], [[N]]
1630
+ ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
1631
+ ;
1632
+ entry:
1633
+ %cmp23 = icmp sgt i32 %n , 0
1634
+ br i1 %cmp23 , label %for.body , label %for.cond.cleanup
1635
+
1636
+ for.cond.cleanup:
1637
+ %t.0.lcssa = phi i64 [ 0 , %entry ], [ %add12 , %for.body ]
1638
+ ret i64 %t.0.lcssa
1639
+
1640
+ for.body:
1641
+ %i.025 = phi i32 [ %add13 , %for.body ], [ 0 , %entry ]
1642
+ %t.024 = phi i64 [ %add12 , %for.body ], [ 0 , %entry ]
1643
+ %arrayidx = getelementptr inbounds i16 , ptr %x , i32 %i.025
1644
+ %0 = load i16 , ptr %arrayidx , align 2
1645
+ %conv = sext i16 %0 to i32
1646
+ %arrayidx1 = getelementptr inbounds i16 , ptr %y , i32 %i.025
1647
+ %1 = load i16 , ptr %arrayidx1 , align 2
1648
+ %conv2 = sext i16 %1 to i32
1649
+ %mul = mul nsw i32 %conv2 , %conv
1650
+ %conv3 = sext i32 %mul to i64
1651
+ %add = add nsw i64 %t.024 , %conv3
1652
+ %add4 = or disjoint i32 %i.025 , 1
1653
+ %arrayidx5 = getelementptr inbounds i16 , ptr %x , i32 %add4
1654
+ %2 = load i16 , ptr %arrayidx5 , align 2
1655
+ %conv6 = sext i16 %2 to i32
1656
+ %arrayidx8 = getelementptr inbounds i16 , ptr %y , i32 %add4
1657
+ %3 = load i16 , ptr %arrayidx8 , align 2
1658
+ %conv9 = sext i16 %3 to i32
1659
+ %mul10 = mul nsw i32 %conv9 , %conv6
1660
+ %conv11 = sext i32 %mul10 to i64
1661
+ %add12 = add nsw i64 %add , %conv11
1662
+ %add13 = add nuw nsw i32 %i.025 , 2
1663
+ %cmp = icmp slt i32 %add13 , %n
1664
+ br i1 %cmp , label %for.body , label %for.cond.cleanup
1665
+ }
1666
+
1667
+
1448
1668
attributes #0 = { "target-features" ="+mve" }
0 commit comments