|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt < %s -S -mtriple=aarch64 -slp-vectorizer | FileCheck %s |
| 3 | + |
| 4 | +%struct.buf = type { [8 x i8] } |
| 5 | + |
| 6 | +define i8 @reduce_or(%struct.buf* %a, %struct.buf* %b) { |
| 7 | +; CHECK-LABEL: @reduce_or( |
| 8 | +; CHECK-NEXT: entry: |
| 9 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_BUF:%.*]], %struct.buf* [[A:%.*]], i64 0, i32 0, i64 0 |
| 10 | +; CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[ARRAYIDX]], align 1 |
| 11 | +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B:%.*]], i64 0, i32 0, i64 0 |
| 12 | +; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[ARRAYIDX3]], align 1 |
| 13 | +; CHECK-NEXT: [[XOR12:%.*]] = xor i8 [[TMP1]], [[TMP0]] |
| 14 | +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 1 |
| 15 | +; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX_1]], align 1 |
| 16 | +; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 1 |
| 17 | +; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX3_1]], align 1 |
| 18 | +; CHECK-NEXT: [[XOR12_1:%.*]] = xor i8 [[TMP3]], [[TMP2]] |
| 19 | +; CHECK-NEXT: [[OR13_1:%.*]] = or i8 [[XOR12_1]], [[XOR12]] |
| 20 | +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 2 |
| 21 | +; CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[ARRAYIDX_2]], align 1 |
| 22 | +; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 2 |
| 23 | +; CHECK-NEXT: [[TMP5:%.*]] = load i8, i8* [[ARRAYIDX3_2]], align 1 |
| 24 | +; CHECK-NEXT: [[XOR12_2:%.*]] = xor i8 [[TMP5]], [[TMP4]] |
| 25 | +; CHECK-NEXT: [[OR13_2:%.*]] = or i8 [[XOR12_2]], [[OR13_1]] |
| 26 | +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 3 |
| 27 | +; CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[ARRAYIDX_3]], align 1 |
| 28 | +; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 3 |
| 29 | +; CHECK-NEXT: [[TMP7:%.*]] = load i8, i8* [[ARRAYIDX3_3]], align 1 |
| 30 | +; CHECK-NEXT: [[XOR12_3:%.*]] = xor i8 [[TMP7]], [[TMP6]] |
| 31 | +; CHECK-NEXT: [[OR13_3:%.*]] = or i8 [[XOR12_3]], [[OR13_2]] |
| 32 | +; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 4 |
| 33 | +; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[ARRAYIDX_4]], align 1 |
| 34 | +; CHECK-NEXT: [[ARRAYIDX3_4:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 4 |
| 35 | +; CHECK-NEXT: [[TMP9:%.*]] = load i8, i8* [[ARRAYIDX3_4]], align 1 |
| 36 | +; CHECK-NEXT: [[XOR12_4:%.*]] = xor i8 [[TMP9]], [[TMP8]] |
| 37 | +; CHECK-NEXT: [[OR13_4:%.*]] = or i8 [[XOR12_4]], [[OR13_3]] |
| 38 | +; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 5 |
| 39 | +; CHECK-NEXT: [[TMP10:%.*]] = load i8, i8* [[ARRAYIDX_5]], align 1 |
| 40 | +; CHECK-NEXT: [[ARRAYIDX3_5:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 5 |
| 41 | +; CHECK-NEXT: [[TMP11:%.*]] = load i8, i8* [[ARRAYIDX3_5]], align 1 |
| 42 | +; CHECK-NEXT: [[XOR12_5:%.*]] = xor i8 [[TMP11]], [[TMP10]] |
| 43 | +; CHECK-NEXT: [[OR13_5:%.*]] = or i8 [[XOR12_5]], [[OR13_4]] |
| 44 | +; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 6 |
| 45 | +; CHECK-NEXT: [[TMP12:%.*]] = load i8, i8* [[ARRAYIDX_6]], align 1 |
| 46 | +; CHECK-NEXT: [[ARRAYIDX3_6:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 6 |
| 47 | +; CHECK-NEXT: [[TMP13:%.*]] = load i8, i8* [[ARRAYIDX3_6]], align 1 |
| 48 | +; CHECK-NEXT: [[XOR12_6:%.*]] = xor i8 [[TMP13]], [[TMP12]] |
| 49 | +; CHECK-NEXT: [[OR13_6:%.*]] = or i8 [[XOR12_6]], [[OR13_5]] |
| 50 | +; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[A]], i64 0, i32 0, i64 7 |
| 51 | +; CHECK-NEXT: [[TMP14:%.*]] = load i8, i8* [[ARRAYIDX_7]], align 1 |
| 52 | +; CHECK-NEXT: [[ARRAYIDX3_7:%.*]] = getelementptr inbounds [[STRUCT_BUF]], %struct.buf* [[B]], i64 0, i32 0, i64 7 |
| 53 | +; CHECK-NEXT: [[TMP15:%.*]] = load i8, i8* [[ARRAYIDX3_7]], align 1 |
| 54 | +; CHECK-NEXT: [[XOR12_7:%.*]] = xor i8 [[TMP15]], [[TMP14]] |
| 55 | +; CHECK-NEXT: [[OR13_7:%.*]] = or i8 [[XOR12_7]], [[OR13_6]] |
| 56 | +; CHECK-NEXT: ret i8 [[OR13_7]] |
| 57 | +; |
| 58 | +entry: |
| 59 | + %arrayidx = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 0 |
| 60 | + %0 = load i8, i8* %arrayidx, align 1 |
| 61 | + %arrayidx3 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 0 |
| 62 | + %1 = load i8, i8* %arrayidx3, align 1 |
| 63 | + %xor12 = xor i8 %1, %0 |
| 64 | + %arrayidx.1 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 1 |
| 65 | + %2 = load i8, i8* %arrayidx.1, align 1 |
| 66 | + %arrayidx3.1 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 1 |
| 67 | + %3 = load i8, i8* %arrayidx3.1, align 1 |
| 68 | + %xor12.1 = xor i8 %3, %2 |
| 69 | + %or13.1 = or i8 %xor12.1, %xor12 |
| 70 | + %arrayidx.2 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 2 |
| 71 | + %4 = load i8, i8* %arrayidx.2, align 1 |
| 72 | + %arrayidx3.2 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 2 |
| 73 | + %5 = load i8, i8* %arrayidx3.2, align 1 |
| 74 | + %xor12.2 = xor i8 %5, %4 |
| 75 | + %or13.2 = or i8 %xor12.2, %or13.1 |
| 76 | + %arrayidx.3 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 3 |
| 77 | + %6 = load i8, i8* %arrayidx.3, align 1 |
| 78 | + %arrayidx3.3 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 3 |
| 79 | + %7 = load i8, i8* %arrayidx3.3, align 1 |
| 80 | + %xor12.3 = xor i8 %7, %6 |
| 81 | + %or13.3 = or i8 %xor12.3, %or13.2 |
| 82 | + %arrayidx.4 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 4 |
| 83 | + %8 = load i8, i8* %arrayidx.4, align 1 |
| 84 | + %arrayidx3.4 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 4 |
| 85 | + %9 = load i8, i8* %arrayidx3.4, align 1 |
| 86 | + %xor12.4 = xor i8 %9, %8 |
| 87 | + %or13.4 = or i8 %xor12.4, %or13.3 |
| 88 | + %arrayidx.5 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 5 |
| 89 | + %10 = load i8, i8* %arrayidx.5, align 1 |
| 90 | + %arrayidx3.5 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 5 |
| 91 | + %11 = load i8, i8* %arrayidx3.5, align 1 |
| 92 | + %xor12.5 = xor i8 %11, %10 |
| 93 | + %or13.5 = or i8 %xor12.5, %or13.4 |
| 94 | + %arrayidx.6 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 6 |
| 95 | + %12 = load i8, i8* %arrayidx.6, align 1 |
| 96 | + %arrayidx3.6 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 6 |
| 97 | + %13 = load i8, i8* %arrayidx3.6, align 1 |
| 98 | + %xor12.6 = xor i8 %13, %12 |
| 99 | + %or13.6 = or i8 %xor12.6, %or13.5 |
| 100 | + %arrayidx.7 = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 7 |
| 101 | + %14 = load i8, i8* %arrayidx.7, align 1 |
| 102 | + %arrayidx3.7 = getelementptr inbounds %struct.buf, %struct.buf* %b, i64 0, i32 0, i64 7 |
| 103 | + %15 = load i8, i8* %arrayidx3.7, align 1 |
| 104 | + %xor12.7 = xor i8 %15, %14 |
| 105 | + %or13.7 = or i8 %xor12.7, %or13.6 |
| 106 | + ret i8 %or13.7 |
| 107 | +} |
0 commit comments