@@ -4,11 +4,11 @@ use core::mem;
4
4
// Kernel-provided user-mode helper functions:
5
5
// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
6
6
unsafe fn __kuser_cmpxchg ( oldval : u32 , newval : u32 , ptr : * mut u32 ) -> bool {
7
- let f: extern "C" fn ( u32 , u32 , * mut u32 ) -> u32 = mem:: transmute ( 0xffff0fc0u32 ) ;
7
+ let f: extern "C" fn ( u32 , u32 , * mut u32 ) -> u32 = mem:: transmute ( 0xffff0fc0u32 ) ;
8
8
f ( oldval, newval, ptr) == 0
9
9
}
10
10
unsafe fn __kuser_memory_barrier ( ) {
11
- let f: extern "C" fn ( ) = mem:: transmute ( 0xffff0fa0u32 ) ;
11
+ let f: extern "C" fn ( ) = mem:: transmute ( 0xffff0fa0u32 ) ;
12
12
f ( ) ;
13
13
}
14
14
@@ -94,24 +94,28 @@ macro_rules! atomic_rmw {
94
94
pub unsafe extern "C" fn $name( ptr: * mut $ty, val: $ty) -> $ty {
95
95
atomic_rmw( ptr, |x| $op( x as $ty, val) as u32 ) as $ty
96
96
}
97
- }
97
+ } ;
98
98
}
99
99
macro_rules! atomic_cmpxchg {
100
100
( $name: ident, $ty: ty) => {
101
101
#[ cfg_attr( not( feature = "mangled-names" ) , no_mangle) ]
102
102
pub unsafe extern "C" fn $name( ptr: * mut $ty, oldval: $ty, newval: $ty) -> $ty {
103
103
atomic_cmpxchg( ptr, oldval as u32 , newval as u32 ) as $ty
104
104
}
105
- }
105
+ } ;
106
106
}
107
107
108
108
atomic_rmw ! ( __sync_fetch_and_add_1, u8 , |a: u8 , b: u8 | a. wrapping_add( b) ) ;
109
- atomic_rmw ! ( __sync_fetch_and_add_2, u16 , |a: u16 , b: u16 | a. wrapping_add( b) ) ;
110
- atomic_rmw ! ( __sync_fetch_and_add_4, u32 , |a: u32 , b: u32 | a. wrapping_add( b) ) ;
109
+ atomic_rmw ! ( __sync_fetch_and_add_2, u16 , |a: u16 , b: u16 | a
110
+ . wrapping_add( b) ) ;
111
+ atomic_rmw ! ( __sync_fetch_and_add_4, u32 , |a: u32 , b: u32 | a
112
+ . wrapping_add( b) ) ;
111
113
112
114
atomic_rmw ! ( __sync_fetch_and_sub_1, u8 , |a: u8 , b: u8 | a. wrapping_sub( b) ) ;
113
- atomic_rmw ! ( __sync_fetch_and_sub_2, u16 , |a: u16 , b: u16 | a. wrapping_sub( b) ) ;
114
- atomic_rmw ! ( __sync_fetch_and_sub_4, u32 , |a: u32 , b: u32 | a. wrapping_sub( b) ) ;
115
+ atomic_rmw ! ( __sync_fetch_and_sub_2, u16 , |a: u16 , b: u16 | a
116
+ . wrapping_sub( b) ) ;
117
+ atomic_rmw ! ( __sync_fetch_and_sub_4, u32 , |a: u32 , b: u32 | a
118
+ . wrapping_sub( b) ) ;
115
119
116
120
atomic_rmw ! ( __sync_fetch_and_and_1, u8 , |a: u8 , b: u8 | a & b) ;
117
121
atomic_rmw ! ( __sync_fetch_and_and_2, u16 , |a: u16 , b: u16 | a & b) ;
@@ -129,21 +133,69 @@ atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b));
129
133
atomic_rmw ! ( __sync_fetch_and_nand_2, u16 , |a: u16 , b: u16 | !( a & b) ) ;
130
134
atomic_rmw ! ( __sync_fetch_and_nand_4, u32 , |a: u32 , b: u32 | !( a & b) ) ;
131
135
132
- atomic_rmw ! ( __sync_fetch_and_max_1, i8 , |a: i8 , b: i8 | if a > b { a } else { b } ) ;
133
- atomic_rmw ! ( __sync_fetch_and_max_2, i16 , |a: i16 , b: i16 | if a > b { a } else { b } ) ;
134
- atomic_rmw ! ( __sync_fetch_and_max_4, i32 , |a: i32 , b: i32 | if a > b { a } else { b } ) ;
135
-
136
- atomic_rmw ! ( __sync_fetch_and_umax_1, u8 , |a: u8 , b: u8 | if a > b { a } else { b } ) ;
137
- atomic_rmw ! ( __sync_fetch_and_umax_2, u16 , |a: u16 , b: u16 | if a > b { a } else { b } ) ;
138
- atomic_rmw ! ( __sync_fetch_and_umax_4, u32 , |a: u32 , b: u32 | if a > b { a } else { b } ) ;
139
-
140
- atomic_rmw ! ( __sync_fetch_and_min_1, i8 , |a: i8 , b: i8 | if a < b { a } else { b } ) ;
141
- atomic_rmw ! ( __sync_fetch_and_min_2, i16 , |a: i16 , b: i16 | if a < b { a } else { b } ) ;
142
- atomic_rmw ! ( __sync_fetch_and_min_4, i32 , |a: i32 , b: i32 | if a < b { a } else { b } ) ;
143
-
144
- atomic_rmw ! ( __sync_fetch_and_umin_1, u8 , |a: u8 , b: u8 | if a < b { a } else { b } ) ;
145
- atomic_rmw ! ( __sync_fetch_and_umin_2, u16 , |a: u16 , b: u16 | if a < b { a } else { b } ) ;
146
- atomic_rmw ! ( __sync_fetch_and_umin_4, u32 , |a: u32 , b: u32 | if a < b { a } else { b } ) ;
136
+ atomic_rmw ! ( __sync_fetch_and_max_1, i8 , |a: i8 , b: i8 | if a > b {
137
+ a
138
+ } else {
139
+ b
140
+ } ) ;
141
+ atomic_rmw ! ( __sync_fetch_and_max_2, i16 , |a: i16 , b: i16 | if a > b {
142
+ a
143
+ } else {
144
+ b
145
+ } ) ;
146
+ atomic_rmw ! ( __sync_fetch_and_max_4, i32 , |a: i32 , b: i32 | if a > b {
147
+ a
148
+ } else {
149
+ b
150
+ } ) ;
151
+
152
+ atomic_rmw ! ( __sync_fetch_and_umax_1, u8 , |a: u8 , b: u8 | if a > b {
153
+ a
154
+ } else {
155
+ b
156
+ } ) ;
157
+ atomic_rmw ! ( __sync_fetch_and_umax_2, u16 , |a: u16 , b: u16 | if a > b {
158
+ a
159
+ } else {
160
+ b
161
+ } ) ;
162
+ atomic_rmw ! ( __sync_fetch_and_umax_4, u32 , |a: u32 , b: u32 | if a > b {
163
+ a
164
+ } else {
165
+ b
166
+ } ) ;
167
+
168
+ atomic_rmw ! ( __sync_fetch_and_min_1, i8 , |a: i8 , b: i8 | if a < b {
169
+ a
170
+ } else {
171
+ b
172
+ } ) ;
173
+ atomic_rmw ! ( __sync_fetch_and_min_2, i16 , |a: i16 , b: i16 | if a < b {
174
+ a
175
+ } else {
176
+ b
177
+ } ) ;
178
+ atomic_rmw ! ( __sync_fetch_and_min_4, i32 , |a: i32 , b: i32 | if a < b {
179
+ a
180
+ } else {
181
+ b
182
+ } ) ;
183
+
184
+ atomic_rmw ! ( __sync_fetch_and_umin_1, u8 , |a: u8 , b: u8 | if a < b {
185
+ a
186
+ } else {
187
+ b
188
+ } ) ;
189
+ atomic_rmw ! ( __sync_fetch_and_umin_2, u16 , |a: u16 , b: u16 | if a < b {
190
+ a
191
+ } else {
192
+ b
193
+ } ) ;
194
+ atomic_rmw ! ( __sync_fetch_and_umin_4, u32 , |a: u32 , b: u32 | if a < b {
195
+ a
196
+ } else {
197
+ b
198
+ } ) ;
147
199
148
200
atomic_rmw ! ( __sync_lock_test_and_set_1, u8 , |_: u8 , b: u8 | b) ;
149
201
atomic_rmw ! ( __sync_lock_test_and_set_2, u16 , |_: u16 , b: u16 | b) ;
0 commit comments