2
2
3
3
4
4
define void @atomic_inc (ptr %ptr0 , ptr addrspace (1 ) %ptr1 , ptr addrspace (3 ) %ptr3 ) {
5
- ; CHECK: atomicrmw uinc_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
5
+ ; CHECK: atomicrmw uinc_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
6
6
%result0 = call i32 @llvm.amdgcn.atomic.inc.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 0 , i1 false )
7
7
8
- ; CHECK: atomicrmw uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
8
+ ; CHECK: atomicrmw uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
9
9
%result1 = call i32 @llvm.amdgcn.atomic.inc.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 0 , i32 0 , i1 false )
10
10
11
- ; CHECK: atomicrmw uinc_wrap ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
11
+ ; CHECK: atomicrmw uinc_wrap ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4{{$}}
12
12
%result2 = call i32 @llvm.amdgcn.atomic.inc.i32.p3 (ptr addrspace (3 ) %ptr3 , i32 46 , i32 0 , i32 0 , i1 false )
13
13
14
14
; CHECK: atomicrmw uinc_wrap ptr %ptr0, i64 48 syncscope("agent") seq_cst, align 8
@@ -26,13 +26,13 @@ define void @atomic_inc(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr
26
26
}
27
27
28
28
define void @atomic_dec (ptr %ptr0 , ptr addrspace (1 ) %ptr1 , ptr addrspace (3 ) %ptr3 ) {
29
- ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
29
+ ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
30
30
%result0 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 0 , i1 false )
31
31
32
- ; CHECK: atomicrmw udec_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
32
+ ; CHECK: atomicrmw udec_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
33
33
%result1 = call i32 @llvm.amdgcn.atomic.dec.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 0 , i32 0 , i1 false )
34
34
35
- ; CHECK: atomicrmw udec_wrap ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4
35
+ ; CHECK: atomicrmw udec_wrap ptr addrspace(3) %ptr3, i32 46 syncscope("agent") seq_cst, align 4{{$}}
36
36
%result2 = call i32 @llvm.amdgcn.atomic.dec.i32.p3 (ptr addrspace (3 ) %ptr3 , i32 46 , i32 0 , i32 0 , i1 false )
37
37
38
38
; CHECK: atomicrmw udec_wrap ptr %ptr0, i64 48 syncscope("agent") seq_cst, align 8
@@ -51,49 +51,49 @@ define void @atomic_dec(ptr %ptr0, ptr addrspace(1) %ptr1, ptr addrspace(3) %ptr
51
51
52
52
; Test some invalid ordering handling
53
53
define void @ordering (ptr %ptr0 , ptr addrspace (1 ) %ptr1 , ptr addrspace (3 ) %ptr3 ) {
54
- ; CHECK: atomicrmw volatile uinc_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
54
+ ; CHECK: atomicrmw volatile uinc_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
55
55
%result0 = call i32 @llvm.amdgcn.atomic.inc.i32.p0 (ptr %ptr0 , i32 42 , i32 -1 , i32 0 , i1 true )
56
56
57
- ; CHECK: atomicrmw volatile uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
57
+ ; CHECK: atomicrmw volatile uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
58
58
%result1 = call i32 @llvm.amdgcn.atomic.inc.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 0 , i32 0 , i1 true )
59
59
60
- ; CHECK: atomicrmw uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
60
+ ; CHECK: atomicrmw uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
61
61
%result2 = call i32 @llvm.amdgcn.atomic.inc.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 1 , i32 0 , i1 false )
62
62
63
- ; CHECK: atomicrmw volatile uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") monotonic, align 4
63
+ ; CHECK: atomicrmw volatile uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory !0
64
64
%result3 = call i32 @llvm.amdgcn.atomic.inc.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 2 , i32 0 , i1 true )
65
65
66
- ; CHECK: atomicrmw uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
66
+ ; CHECK: atomicrmw uinc_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
67
67
%result4 = call i32 @llvm.amdgcn.atomic.inc.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 3 , i32 0 , i1 false )
68
68
69
- ; CHECK: atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
69
+ ; CHECK: atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
70
70
%result5 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 4 , i1 true )
71
71
72
- ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
72
+ ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
73
73
%result6 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 5 , i1 false )
74
74
75
- ; CHECK: atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
75
+ ; CHECK: atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
76
76
%result7 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 6 , i1 true )
77
77
78
- ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
78
+ ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
79
79
%result8 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 7 , i1 false )
80
80
81
- ; CHECK:= atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
81
+ ; CHECK:= atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
82
82
%result9 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 0 , i32 8 , i1 true )
83
83
84
- ; CHECK:= atomicrmw volatile udec_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4
84
+ ; CHECK:= atomicrmw volatile udec_wrap ptr addrspace(1) %ptr1, i32 43 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
85
85
%result10 = call i32 @llvm.amdgcn.atomic.dec.i32.p1 (ptr addrspace (1 ) %ptr1 , i32 43 , i32 3 , i32 0 , i1 true )
86
86
ret void
87
87
}
88
88
89
89
define void @immarg_violations (ptr %ptr0 , i32 %val32 , i1 %val1 ) {
90
- ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4
90
+ ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
91
91
%result0 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 %val32 , i32 0 , i1 false )
92
92
93
- ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
93
+ ; CHECK: atomicrmw udec_wrap ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory !0
94
94
%result1 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 2 , i32 %val32 , i1 false )
95
95
96
- ; CHECK: atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4
96
+ ; CHECK: atomicrmw volatile udec_wrap ptr %ptr0, i32 42 syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory !0
97
97
%result2 = call i32 @llvm.amdgcn.atomic.dec.i32.p0 (ptr %ptr0 , i32 42 , i32 2 , i32 0 , i1 %val1 )
98
98
ret void
99
99
}
0 commit comments