@@ -12,6 +12,15 @@ typedef struct _a {
12
12
13
13
void m () { at y; }
14
14
15
+ signed char sc;
16
+ unsigned char uc;
17
+ signed short ss;
18
+ unsigned short us;
19
+ signed int si;
20
+ unsigned int ui;
21
+ signed long long sll;
22
+ unsigned long long ull;
23
+
15
24
// CHECK: ![[A:.*]] = !cir.struct<struct "_a" {!s32i}>
16
25
17
26
int basic_binop_fetch (int *i) {
@@ -649,3 +658,85 @@ void cmp_val_ushort(unsigned short* p, short x, short u) {
649
658
void cmp_val_ulong (unsigned long * p, long x, long u) {
650
659
long r = __sync_val_compare_and_swap (p, x, u);
651
660
}
661
+
662
+ // CHECK-LABEL: @test_op_and_fetch
663
+ // LLVM-LABEL: @test_op_and_fetch
664
+ extern " C" void test_op_and_fetch (void )
665
+ {
666
+ // CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i
667
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
668
+ // CHECK: [[RET0:%.*]] = cir.binop(add, [[RES0]], [[VAL0]]) : !s8i
669
+ // LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1
670
+ // LLVM: [[RES0:%.*]] = atomicrmw add ptr @sc, i8 [[VAL0]] seq_cst, align 1
671
+ // LLVM: [[RET0:%.*]] = add i8 [[RES0]], [[VAL0]]
672
+ // LLVM: store i8 [[RET0]], ptr @sc, align 1
673
+ sc = __sync_add_and_fetch (&sc, uc);
674
+
675
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
676
+ // CHECK: [[RET1:%.*]] = cir.binop(add, [[RES1]], [[VAL1]]) : !u8i
677
+ // LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1
678
+ // LLVM: [[RES1:%.*]] = atomicrmw add ptr @uc, i8 [[VAL1]] seq_cst, align 1
679
+ // LLVM: [[RET1:%.*]] = add i8 [[RES1]], [[VAL1]]
680
+ // LLVM: store i8 [[RET1]], ptr @uc, align 1
681
+ uc = __sync_add_and_fetch (&uc, uc);
682
+
683
+ // CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i
684
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
685
+ // CHECK: [[RET2:%.*]] = cir.binop(add, [[RES2]], [[VAL2]]) : !s16i
686
+ // LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1
687
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
688
+ // LLVM: [[RES2:%.*]] = atomicrmw add ptr @ss, i16 [[CONV2]] seq_cst, align 2
689
+ // LLVM: [[RET2:%.*]] = add i16 [[RES2]], [[CONV2]]
690
+ // LLVM: store i16 [[RET2]], ptr @ss, align 2
691
+ ss = __sync_add_and_fetch (&ss, uc);
692
+
693
+ // CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i
694
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
695
+ // CHECK: [[RET3:%.*]] = cir.binop(add, [[RES3]], [[VAL3]]) : !u16i
696
+ // LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1
697
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
698
+ // LLVM: [[RES3:%.*]] = atomicrmw add ptr @us, i16 [[CONV3]] seq_cst, align 2
699
+ // LLVM: [[RET3:%.*]] = add i16 [[RES3]], [[CONV3]]
700
+ // LLVM: store i16 [[RET3]], ptr @us
701
+ us = __sync_add_and_fetch (&us, uc);
702
+
703
+ // CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i
704
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
705
+ // CHECK: [[RET4:%.*]] = cir.binop(add, [[RES4]], [[VAL4]]) : !s32i
706
+ // LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1
707
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
708
+ // LLVM: [[RES4:%.*]] = atomicrmw add ptr @si, i32 [[CONV4]] seq_cst, align 4
709
+ // LLVM: [[RET4:%.*]] = add i32 [[RES4]], [[CONV4]]
710
+ // LLVM: store i32 [[RET4]], ptr @si, align 4
711
+ si = __sync_add_and_fetch (&si, uc);
712
+
713
+ // CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i
714
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
715
+ // CHECK: [[RET5:%.*]] = cir.binop(add, [[RES5]], [[VAL5]]) : !u32i
716
+ // LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1
717
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
718
+ // LLVM: [[RES5:%.*]] = atomicrmw add ptr @ui, i32 [[CONV5]] seq_cst, align 4
719
+ // LLVM: [[RET5:%.*]] = add i32 [[RES5]], [[CONV5]]
720
+ // LLVM: store i32 [[RET5]], ptr @ui, align 4
721
+ ui = __sync_add_and_fetch (&ui, uc);
722
+
723
+ // CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i
724
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
725
+ // CHECK: [[RET6:%.*]] = cir.binop(add, [[RES6]], [[VAL6]]) : !s64i
726
+ // LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1
727
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
728
+ // LLVM: [[RES6:%.*]] = atomicrmw add ptr @sll, i64 [[CONV6]] seq_cst, align 8
729
+ // LLVM: [[RET6:%.*]] = add i64 [[RES6]], [[CONV6]]
730
+ // LLVM: store i64 [[RET6]], ptr @sll, align 8
731
+ sll = __sync_add_and_fetch (&sll, uc);
732
+
733
+ // CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i
734
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
735
+ // CHECK: [[RET7:%.*]] = cir.binop(add, [[RES7]], [[VAL7]]) : !u64i
736
+ // LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1
737
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
738
+ // LLVM: [[RES7:%.*]] = atomicrmw add ptr @ull, i64 [[CONV7]] seq_cst, align 8
739
+ // LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]]
740
+ // LLVM: store i64 [[RET7]], ptr @ull, align 8
741
+ ull = __sync_add_and_fetch (&ull, uc);
742
+ }
0 commit comments